hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f9f4a375a1983afa9126e7bf5778b88b7a74a6f5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gpu.h"
float *g_Vx0;
// float *g_Vx0_out;
float *g_Vz0;
float *g_sigmaxx0;
float *g_sigmazz0;
float *g_sigmaxz0;
float *g_m1_x;
float *g_m1_z;
float *g_aux_m2_c;
float *g_aux_m3_c;
float *g_aux_m2m3_c;
//void setup_cuda(int ngpus, int argc, char **argv){
//insert from Bob' Born
// ;
//}
//void process_error( const hipError_t &error, char *string=0, bool verbose=false ){
//insert from Bob's Born
// ;
//}
extern "C" void rtm_gpu_init(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
//set cuda devices and put all data onto gpu memory
hipError_t cuda_ret;
hipError_t err;
//Set Device
cuda_ret = hipSetDevice(0);
if(cuda_ret != hipSuccess){
fprintf(stderr, "Failed to Set The cuda Device !\n");
}
else{
fprintf(stderr, "GPU Device Set ====> OK\n");
}
// data init
hipMalloc(&g_Vx0,sizeof(float)*nx*nz*nt);
// hipMalloc(&g_Vx0_out,sizeof(float)*nx*nz*nt);
// hipMemset(g_Vx0_out, 0, sizeof(float)*nx*nz*nt);
hipMalloc(&g_Vz0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_sigmaxx0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_sigmazz0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_sigmaxz0,sizeof(float)*nx*nz*nt);
hipMalloc(&g_m1_x,sizeof(float)*nx*nz);
hipMalloc(&g_m1_z,sizeof(float)*nx*nz);
hipMalloc(&g_aux_m2_c,sizeof(float)*nx*nz);
hipMalloc(&g_aux_m3_c,sizeof(float)*nx*nz);
hipMalloc(&g_aux_m2m3_c,sizeof(float)*nx*nz);
fprintf(stderr,"GPU Data Init ====> OK\n");
// data copy
// hipMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
// hipMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// hipMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// hipMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// hipMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// hipMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
// fprintf(stderr,"Data Copy To GPU OK\n");
}
extern "C" void rtm_gpu_copy_in(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy
hipMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyHostToDevice);
hipMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
hipMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
hipMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
hipMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
hipMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, hipMemcpyHostToDevice);
fprintf(stderr,"Data Copy To GPU ====> OK\n");
}
extern "C" void rtm_gpu_copy_out(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0)//, //(nz, nx, nt)
//float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy back from GPU mem
hipMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy(Vz0, g_Vz0,sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
hipMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
fprintf(stderr,"Data Copy To CPU ====> OK\n");
}
extern "C" void rtm_gpu_final(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy back from GPU mem
// hipMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy( Vz0, g_Vz0,sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, hipMemcpyDeviceToHost);
// hipMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// hipMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, hipMemcpyDeviceToHost);
// fprintf(stderr,"Data Copy To CPU OK\n");
hipFree(&g_Vx0);
/// hipFree(&g_Vx0_out);
hipFree(&g_Vz0);
hipFree(&g_sigmaxx0);
hipFree(&g_sigmazz0);
hipFree(&g_sigmaxz0);
hipFree(&g_m1_x);
hipFree(&g_m1_z);
hipFree(&g_aux_m2_c);
hipFree(&g_aux_m3_c);
hipFree(&g_aux_m2m3_c);
fprintf(stderr,"GPU Mem Released ====> OK\n");
}
__global__ void rtm_gpu_kernel(int it,int nt, int nz, int nx,
float * g_Vx0, float * g_Vz0, float * g_sigmaxx0, float * g_sigmazz0, float * g_sigmaxz0, //(nz, nx, nt)
float * g_m1_x,float * g_m1_z,float * g_aux_m2_c, float * g_aux_m3_c, float * g_aux_m2m3_c)//(nz, nx)
{
float c1=35.0/294912.0,c2=-405.0/229376.0,c3=567.0/40960.0,c4=-735.0/8192.0,c5=19845.0/16384.0;
//GPU thread index
int iz, ix;
iz = blockIdx.x*blockDim.x + threadIdx.x;
ix = blockIdx.y*blockDim.y + threadIdx.y;
//gt = it;
// gt = blockIdx.z*blockDim.y + threadIdx.z;
// g_Vx0[index3d(gz, gx, gt)] = g_Vx0[index3d(gz, gx, gt)] + g_Vx0[index3d(gz, gx, gt+2)];
// g_Vz0[index3d(gz, gx, gt)] = g_Vz0[index3d(gz, gx, gt)] + g_Vz0[index3d(gz, gx, gt+2)];
// g_sigmaxx0[index3d(gz, gx, gt)] = g_sigmaxx0[index3d(gz, gx, gt)] + g_sigmaxx0[index3d(gz, gx, gt+2)];
// g_sigmazz0[index3d(gz, gx, gt)] = g_sigmazz0[index3d(gz, gx, gt)] + g_sigmazz0[index3d(gz, gx, gt+2)];
// g_sigmaxz0[index3d(gz, gx, gt)] = g_sigmaxz0[index3d(gz, gx, gt)] + g_sigmaxz0[index3d(gz, gx, gt+2)];
if(ix>=9 && ix<(nx-9) && iz>=4 && iz<(nz-5)){
g_Vx0[index3d(iz,ix ,it)] = g_Vx0[index3d(iz,ix ,it)];// + g_aux_m2m3_c[index(iz,ix-5)]*c1*g_sigmaxx0[index3d(iz,ix-5,it+1)];
// + g_aux_m2m3_c[index(iz,ix-4)]*c2*g_sigmaxx0[index3d(iz,ix-4,it+1)];
// + g_aux_m2m3_c[index(iz,ix-3)]*c3*g_sigmaxx0[index3d(iz,ix-3,it+1)]
// + g_aux_m2m3_c[index(iz,ix-2)]*c4*g_sigmaxx0[index3d(iz,ix-2,it+1)]
// + g_aux_m2m3_c[index(iz,ix-1)]*c5*g_sigmaxx0[index3d(iz,ix-1,it+1)]
// - g_aux_m2m3_c[index(iz,ix)] *c5*g_sigmaxx0[index3d(iz,ix,it+1)]
// - g_aux_m2m3_c[index(iz,ix+1)]*c4*g_sigmaxx0[index3d(iz,ix+1,it+1)]
// - g_aux_m2m3_c[index(iz,ix+2)]*c3*g_sigmaxx0[index3d(iz,ix+2,it+1)]
// - g_aux_m2m3_c[index(iz,ix+3)]*c2*g_sigmaxx0[index3d(iz,ix+3,it+1)]
// - g_aux_m2m3_c[index(iz,ix+4)]*c1*g_sigmaxx0[index3d(iz,ix+4,it+1)] ;
}
__syncthreads();
// g_Vx0[index3d(iz,ix+5,it)] = g_Vx0[index3d(iz,ix+5,it)] + g_aux_m2m3_c[index(iz,ix)]*c1*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix+4,it)] = g_Vx0[index3d(iz,ix+4,it)] + g_aux_m2m3_c[index(iz,ix)]*c2*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix+3,it)] = g_Vx0[index3d(iz,ix+3,it)] + g_aux_m2m3_c[index(iz,ix)]*c3*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix+2,it)] = g_Vx0[index3d(iz,ix+2,it)] + g_aux_m2m3_c[index(iz,ix)]*c4*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix+1,it)] = g_Vx0[index3d(iz,ix+1,it)] + g_aux_m2m3_c[index(iz,ix)]*c5*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix ,it)] = g_Vx0[index3d(iz,ix ,it)] - g_aux_m2m3_c[index(iz,ix)]*c5*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix-1,it)] = g_Vx0[index3d(iz,ix-1,it)] - g_aux_m2m3_c[index(iz,ix)]*c4*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix-2,it)] = g_Vx0[index3d(iz,ix-2,it)] - g_aux_m2m3_c[index(iz,ix)]*c3*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix-3,it)] = g_Vx0[index3d(iz,ix-3,it)] - g_aux_m2m3_c[index(iz,ix)]*c2*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix-4,it)] = g_Vx0[index3d(iz,ix-4,it)] - g_aux_m2m3_c[index(iz,ix)]*c1*g_sigmaxx0[index3d(iz,ix,it+1)];
// }
}
extern "C" void rtm_gpu_func(int it, int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
hipError_t err;
hipEvent_t start, stop;
float elapsedTime = 0.0f;
//time record
//rtm_gpu_init(nt, nz, nx, Vx0, Vz0, sigmaxx0, sigmazz0, sigmaxz0, m1_x, m1_z, aux_m2_c, aux_m3_c, aux_m2m3_c);
err = hipGetLastError();
if(hipSuccess != err){
fprintf(stderr, "Cuda error0: %s.\n", hipGetErrorString(err));
}
rtm_gpu_copy_in(nt, nz, nx, Vx0, Vz0, sigmaxx0, sigmazz0, sigmaxz0, m1_x, m1_z, aux_m2_c, aux_m3_c, aux_m2m3_c);
err = hipGetLastError();
if(hipSuccess != err){
fprintf(stderr, "Cuda error1: %s.\n", hipGetErrorString(err));
}
dim3 dimGrid(nz/TZ, nx/TX);
dim3 dimBlock(TZ, TX);
//RTM kernel
//hipEventRecord(start, 0);
fprintf(stderr,"GPU Computing...(NZ=%d, NX=%d, TZ=%d, TX=%d)\n", nz, nx, TZ, TX);
hipLaunchKernelGGL(( rtm_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, it,nt, nz, nx, g_Vx0, g_Vz0, g_sigmaxx0, g_sigmazz0, g_sigmaxz0, g_m1_x, g_m1_z, g_aux_m2_c, g_aux_m3_c, g_aux_m2m3_c);
hipDeviceSynchronize();
err = hipGetLastError();
if(hipSuccess != err){
fprintf(stderr, "Cuda error2: %s.\n", hipGetErrorString(err));
}
rtm_gpu_copy_out(nt, nz, nx, Vx0, Vz0, sigmaxx0, sigmazz0, sigmaxz0);//, m1_x, m1_z, aux_m2_c, aux_m3_c, aux_m2m3_c);
hipDeviceSynchronize();
err = hipGetLastError();
if(hipSuccess != err){
fprintf(stderr, "Cuda error3: %s.\n", hipGetErrorString(err));
}
// rtm_gpu_final(nt, nz, nx, Vx0, Vz0, sigmaxx0, sigmazz0, sigmaxz0, m1_x, m1_z, aux_m2_c, aux_m3_c, aux_m2m3_c);
err = hipGetLastError();
if(hipSuccess != err){
fprintf(stderr, "Cuda error4: %s.\n", hipGetErrorString(err));
}
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&elapsedTime, start, stop);
//fprintf(stderr,"GPU Computational Elapsed Time: %.4f\n",elapsedTime);
}
|
f9f4a375a1983afa9126e7bf5778b88b7a74a6f5.cu
|
#include <stdio.h>
#include "gpu.h"
float *g_Vx0;
// float *g_Vx0_out;
float *g_Vz0;
float *g_sigmaxx0;
float *g_sigmazz0;
float *g_sigmaxz0;
float *g_m1_x;
float *g_m1_z;
float *g_aux_m2_c;
float *g_aux_m3_c;
float *g_aux_m2m3_c;
//void setup_cuda(int ngpus, int argc, char **argv){
//insert from Bob' Born
// ;
//}
//void process_error( const cudaError_t &error, char *string=0, bool verbose=false ){
//insert from Bob's Born
// ;
//}
extern "C" void rtm_gpu_init(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
//set cuda devices and put all data onto gpu memory
cudaError_t cuda_ret;
cudaError_t err;
//Set Device
cuda_ret = cudaSetDevice(0);
if(cuda_ret != cudaSuccess){
fprintf(stderr, "Failed to Set The cuda Device !\n");
}
else{
fprintf(stderr, "GPU Device Set ====> OK\n");
}
// data init
cudaMalloc(&g_Vx0,sizeof(float)*nx*nz*nt);
// cudaMalloc(&g_Vx0_out,sizeof(float)*nx*nz*nt);
// cudaMemset(g_Vx0_out, 0, sizeof(float)*nx*nz*nt);
cudaMalloc(&g_Vz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmaxx0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmazz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmaxz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_m1_x,sizeof(float)*nx*nz);
cudaMalloc(&g_m1_z,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m2_c,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m3_c,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m2m3_c,sizeof(float)*nx*nz);
fprintf(stderr,"GPU Data Init ====> OK\n");
// data copy
// cudaMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
// cudaMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// cudaMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// cudaMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// cudaMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// cudaMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
// fprintf(stderr,"Data Copy To GPU OK\n");
}
extern "C" void rtm_gpu_copy_in(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy
cudaMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
fprintf(stderr,"Data Copy To GPU ====> OK\n");
}
extern "C" void rtm_gpu_copy_out(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0)//, //(nz, nx, nt)
//float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy back from GPU mem
cudaMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(Vz0, g_Vz0,sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
fprintf(stderr,"Data Copy To CPU ====> OK\n");
}
extern "C" void rtm_gpu_final(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
// data copy back from GPU mem
// cudaMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy( Vz0, g_Vz0,sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
// cudaMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// cudaMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
// fprintf(stderr,"Data Copy To CPU OK\n");
cudaFree(&g_Vx0);
/// cudaFree(&g_Vx0_out);
cudaFree(&g_Vz0);
cudaFree(&g_sigmaxx0);
cudaFree(&g_sigmazz0);
cudaFree(&g_sigmaxz0);
cudaFree(&g_m1_x);
cudaFree(&g_m1_z);
cudaFree(&g_aux_m2_c);
cudaFree(&g_aux_m3_c);
cudaFree(&g_aux_m2m3_c);
fprintf(stderr,"GPU Mem Released ====> OK\n");
}
__global__ void rtm_gpu_kernel(int it,int nt, int nz, int nx,
float * g_Vx0, float * g_Vz0, float * g_sigmaxx0, float * g_sigmazz0, float * g_sigmaxz0, //(nz, nx, nt)
float * g_m1_x,float * g_m1_z,float * g_aux_m2_c, float * g_aux_m3_c, float * g_aux_m2m3_c)//(nz, nx)
{
float c1=35.0/294912.0,c2=-405.0/229376.0,c3=567.0/40960.0,c4=-735.0/8192.0,c5=19845.0/16384.0;
//GPU thread index
int iz, ix;
iz = blockIdx.x*blockDim.x + threadIdx.x;
ix = blockIdx.y*blockDim.y + threadIdx.y;
//gt = it;
// gt = blockIdx.z*blockDim.y + threadIdx.z;
// g_Vx0[index3d(gz, gx, gt)] = g_Vx0[index3d(gz, gx, gt)] + g_Vx0[index3d(gz, gx, gt+2)];
// g_Vz0[index3d(gz, gx, gt)] = g_Vz0[index3d(gz, gx, gt)] + g_Vz0[index3d(gz, gx, gt+2)];
// g_sigmaxx0[index3d(gz, gx, gt)] = g_sigmaxx0[index3d(gz, gx, gt)] + g_sigmaxx0[index3d(gz, gx, gt+2)];
// g_sigmazz0[index3d(gz, gx, gt)] = g_sigmazz0[index3d(gz, gx, gt)] + g_sigmazz0[index3d(gz, gx, gt+2)];
// g_sigmaxz0[index3d(gz, gx, gt)] = g_sigmaxz0[index3d(gz, gx, gt)] + g_sigmaxz0[index3d(gz, gx, gt+2)];
if(ix>=9 && ix<(nx-9) && iz>=4 && iz<(nz-5)){
g_Vx0[index3d(iz,ix ,it)] = g_Vx0[index3d(iz,ix ,it)];// + g_aux_m2m3_c[index(iz,ix-5)]*c1*g_sigmaxx0[index3d(iz,ix-5,it+1)];
// + g_aux_m2m3_c[index(iz,ix-4)]*c2*g_sigmaxx0[index3d(iz,ix-4,it+1)];
// + g_aux_m2m3_c[index(iz,ix-3)]*c3*g_sigmaxx0[index3d(iz,ix-3,it+1)]
// + g_aux_m2m3_c[index(iz,ix-2)]*c4*g_sigmaxx0[index3d(iz,ix-2,it+1)]
// + g_aux_m2m3_c[index(iz,ix-1)]*c5*g_sigmaxx0[index3d(iz,ix-1,it+1)]
// - g_aux_m2m3_c[index(iz,ix)] *c5*g_sigmaxx0[index3d(iz,ix,it+1)]
// - g_aux_m2m3_c[index(iz,ix+1)]*c4*g_sigmaxx0[index3d(iz,ix+1,it+1)]
// - g_aux_m2m3_c[index(iz,ix+2)]*c3*g_sigmaxx0[index3d(iz,ix+2,it+1)]
// - g_aux_m2m3_c[index(iz,ix+3)]*c2*g_sigmaxx0[index3d(iz,ix+3,it+1)]
// - g_aux_m2m3_c[index(iz,ix+4)]*c1*g_sigmaxx0[index3d(iz,ix+4,it+1)] ;
}
__syncthreads();
// g_Vx0[index3d(iz,ix+5,it)] = g_Vx0[index3d(iz,ix+5,it)] + g_aux_m2m3_c[index(iz,ix)]*c1*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix+4,it)] = g_Vx0[index3d(iz,ix+4,it)] + g_aux_m2m3_c[index(iz,ix)]*c2*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix+3,it)] = g_Vx0[index3d(iz,ix+3,it)] + g_aux_m2m3_c[index(iz,ix)]*c3*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix+2,it)] = g_Vx0[index3d(iz,ix+2,it)] + g_aux_m2m3_c[index(iz,ix)]*c4*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix+1,it)] = g_Vx0[index3d(iz,ix+1,it)] + g_aux_m2m3_c[index(iz,ix)]*c5*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix ,it)] = g_Vx0[index3d(iz,ix ,it)] - g_aux_m2m3_c[index(iz,ix)]*c5*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix-1,it)] = g_Vx0[index3d(iz,ix-1,it)] - g_aux_m2m3_c[index(iz,ix)]*c4*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix-2,it)] = g_Vx0[index3d(iz,ix-2,it)] - g_aux_m2m3_c[index(iz,ix)]*c3*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix-3,it)] = g_Vx0[index3d(iz,ix-3,it)] - g_aux_m2m3_c[index(iz,ix)]*c2*g_sigmaxx0[index3d(iz,ix,it+1)];
// g_Vx0[index3d(iz,ix-4,it)] = g_Vx0[index3d(iz,ix-4,it)] - g_aux_m2m3_c[index(iz,ix)]*c1*g_sigmaxx0[index3d(iz,ix,it+1)];
// }
}
extern "C" void rtm_gpu_func(int it, int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)//(nz, nx)
{
cudaError_t err;
cudaEvent_t start, stop;
float elapsedTime = 0.0f;
//time record
//rtm_gpu_init(nt, nz, nx, Vx0, Vz0, sigmaxx0, sigmazz0, sigmaxz0, m1_x, m1_z, aux_m2_c, aux_m3_c, aux_m2m3_c);
err = cudaGetLastError();
if(cudaSuccess != err){
fprintf(stderr, "Cuda error0: %s.\n", cudaGetErrorString(err));
}
rtm_gpu_copy_in(nt, nz, nx, Vx0, Vz0, sigmaxx0, sigmazz0, sigmaxz0, m1_x, m1_z, aux_m2_c, aux_m3_c, aux_m2m3_c);
err = cudaGetLastError();
if(cudaSuccess != err){
fprintf(stderr, "Cuda error1: %s.\n", cudaGetErrorString(err));
}
dim3 dimGrid(nz/TZ, nx/TX);
dim3 dimBlock(TZ, TX);
//RTM kernel
//cudaEventRecord(start, 0);
fprintf(stderr,"GPU Computing...(NZ=%d, NX=%d, TZ=%d, TX=%d)\n", nz, nx, TZ, TX);
rtm_gpu_kernel<<<dimGrid, dimBlock>>>(it,nt, nz, nx, g_Vx0, g_Vz0, g_sigmaxx0, g_sigmazz0, g_sigmaxz0, g_m1_x, g_m1_z, g_aux_m2_c, g_aux_m3_c, g_aux_m2m3_c);
cudaThreadSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err){
fprintf(stderr, "Cuda error2: %s.\n", cudaGetErrorString(err));
}
rtm_gpu_copy_out(nt, nz, nx, Vx0, Vz0, sigmaxx0, sigmazz0, sigmaxz0);//, m1_x, m1_z, aux_m2_c, aux_m3_c, aux_m2m3_c);
cudaThreadSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err){
fprintf(stderr, "Cuda error3: %s.\n", cudaGetErrorString(err));
}
// rtm_gpu_final(nt, nz, nx, Vx0, Vz0, sigmaxx0, sigmazz0, sigmaxz0, m1_x, m1_z, aux_m2_c, aux_m3_c, aux_m2m3_c);
err = cudaGetLastError();
if(cudaSuccess != err){
fprintf(stderr, "Cuda error4: %s.\n", cudaGetErrorString(err));
}
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime, start, stop);
//fprintf(stderr,"GPU Computational Elapsed Time: %.4f\n",elapsedTime);
}
|
98dabc15c281683f86ac164ab4ae199c7359b30a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
#include "cudaSourceRotatingFrame.h"
#define BLOCKDIMX 16
#define BLOCKDIMY 16
/* THIS FUNCTION
This function is used in source/source.m and introduces the fictitious forces which
result from a rotating coordinate frame. The rotation axis is fixed at +Z-hat to
reduce computational burden; The frame equations are given at the start of the
kernel itself.
*/
__global__ void cukern_sourceRotatingFrame(double *rho, double *E, double *px, double *py, double *xyvector);
//__global__ void cukern_sourceRotatingFrame(double *rho, double *E, double *px, double *py, double *Rx, double *Ry, int3 arraysize);
__constant__ __device__ double devLambda[2];
__constant__ __device__ int devIntParams[3];
__global__ void cukern_FetchPartitionSubset1D(double *in, int nodeN, double *out, int partX0, int partNX);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=7) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaSourceRotatingFrame(rho, E, px, py, omega, dt, [xvector yvector])\n");
CHECK_CUDA_ERROR("entering cudaSourceRotatingFrame");
// Get source array info and create destination arrays
MGArray fluid[4];
int worked = MGA_accessMatlabArrays(prhs, 0, 3, &fluid[0]);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access fluid arrays."); }
/* FIXME: accept this as a matlab array instead
* FIXME: Transfer appropriate segments to __constant__ memory
* FIXME: that seems the only reasonable way to avoid partitioning hell
*/
MGArray xyvec;
worked = MGA_accessMatlabArrays(prhs, 6, 6, &xyvec);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access X-Y vector."); }
dim3 gridsize, blocksize;
int3 arraysize;
double omega = *mxGetPr(prhs[4]);
double dt = *mxGetPr(prhs[5]);
worked = sourcefunction_RotatingFrame(&fluid[0], &xyvec, omega, dt);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to apply rotating frame source terms."); }
}
int sourcefunction_RotatingFrame(MGArray *fluidXY, MGArray *XYVectors, double omega, double dt)
{
dim3 gridsize, blocksize;
int3 arraysize;
double lambda[4];
lambda[0] = omega;
lambda[1] = dt;
int i;
int worked;
double *devXYset[fluidXY->nGPUs];
int sub[6];
for(i = 0; i < fluidXY->nGPUs; i++) {
hipSetDevice(fluidXY->deviceID[i]);
worked = CHECK_CUDA_ERROR("hipSetDevice");
if(worked != SUCCESSFUL) break;
// Upload rotation parameters
hipMemcpyToSymbol(devLambda, &lambda[0], 2*sizeof(double), 0, hipMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("memcpy to symbol");
if(worked != SUCCESSFUL) break;
// Upload partition size
calcPartitionExtent(fluidXY, i, &sub[0]);
hipMemcpyToSymbol(devIntParams, &sub[3], 3*sizeof(int), 0, hipMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("memcpy to symbol");
if(worked != SUCCESSFUL) break;
// Swipe the needed subsegments of the X/Y vectors from the supplied node-wide array
hipMalloc((void **)&devXYset[i], (sub[3]+sub[4])*sizeof(double));
worked = CHECK_CUDA_ERROR("hipMalloc");
if(worked != SUCCESSFUL) break;
blocksize = makeDim3(128, 1, 1);
gridsize.x = ROUNDUPTO(sub[3], 128) / 128;
gridsize.y = gridsize.z = 1;
hipLaunchKernelGGL(( cukern_FetchPartitionSubset1D), dim3(gridsize), dim3(blocksize), 0, 0, XYVectors->devicePtr[i], fluidXY->dim[0], devXYset[i], sub[0], sub[3]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, XYVectors, i, "cukern_FetchPartitionSubset1D, X");
if(worked != SUCCESSFUL) break;
gridsize.x = ROUNDUPTO(sub[4], 128) / 128;
hipLaunchKernelGGL(( cukern_FetchPartitionSubset1D), dim3(gridsize), dim3(blocksize), 0, 0, XYVectors->devicePtr[i] + fluidXY->dim[0], fluidXY->dim[1], devXYset[i]+sub[3], sub[1], sub[4]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, XYVectors, i, "cukern_FetchPartitionSubset1D, Y");
if(worked != SUCCESSFUL) break;
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
blocksize = makeDim3(BLOCKDIMX, BLOCKDIMY, 1);
gridsize.x = ROUNDUPTO(arraysize.x, blocksize.x) / blocksize.x;
gridsize.y = arraysize.z;
gridsize.z = 1;
hipLaunchKernelGGL(( cukern_sourceRotatingFrame), dim3(gridsize), dim3(blocksize), 0, 0,
fluidXY[0].devicePtr[i],
fluidXY[1].devicePtr[i],
fluidXY[2].devicePtr[i],
fluidXY[3].devicePtr[i],
devXYset[i]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluidXY, i, "applyScalarPotential");
if(worked != SUCCESSFUL) break;
}
if(worked == SUCCESSFUL) {
for(i = 0; i < fluidXY->nGPUs; i++) {
hipFree(devXYset[i]);
worked = CHECK_CUDA_ERROR("hipFree");
}
}
// Differencing has corrupted the energy and momentum halos: Restore them
if(worked == SUCCESSFUL)
worked = MGA_exchangeLocalHalos(fluidXY + 1, 3);
return CHECK_IMOGEN_ERROR(worked);
}
/*
* a = -[2 w X v + w X (w X r) ]
* dv = -[2 w X v + w X (w X r) ] dt
* dp = -rho dv = -rho [[2 w X v + w X (w X r) ] dt
* dp = -[2 w X p + rho w X (w X r) ] dt
*
* w X p = |I J K | = <-w py, w px, 0> = u
* |0 0 w |
* |px py pz|
*
* w X r = <-w y, w x, 0> = s;
* w X s = |I J K| = <-w^2 x, -w^2 y, 0> = -w^2<x,y,0> = b
* |0 0 w|
* |-wy wx 0|
* dp = -[2 u + rho b] dt
* = -[2 w<-py, px, 0> - rho w^2 <x, y, 0>] dt
* = w dt [2<py, -px> + rho w <x, y>] in going to static frame
*
* dE = -v dot dp
*/
/* rho, E, Px, Py, Pz: arraysize-sized arrays
omega: scalar
Rx: [nx 1 1] sized array
Ry: [ny 1 1] sized array */
#define NTH (BLOCKDIMX*BLOCKDIMY)
#define OMEGA devLambda[0]
#define DT devLambda[1]
__global__ void cukern_sourceRotatingFrame(double *rho, double *E, double *px, double *py, double *Rvector)
{
__shared__ double shar[4*BLOCKDIMX*BLOCKDIMY];
//__shared__ double pxhf[BLOCKDIMX*BLOCKDIMY], pyhf[BLOCKDIMX*BLOCKDIMY];
//__shared__ double px0[BLOCKDIMX*BLOCKDIMY], py0[BLOCKDIMX*BLOCKDIMY];
/* strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz */
int myx = threadIdx.x + BLOCKDIMX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0]; int ny = devIntParams[1];
if(myx >= devIntParams[0]) return;
// int globaddr = myx + nx*(myy + ny*myz);
int tileaddr = myx + nx*(myy + ny*myz);
rho += tileaddr; E += tileaddr; px += tileaddr; py += tileaddr;
tileaddr = threadIdx.x + BLOCKDIMX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
double locRho;
double dmom; double dener;
// double locMom[2];
for(; myy < ny; myy += BLOCKDIMY) {
locY = Rvector[myy];
// Load original values to register
//locRho = rho[globaddr];
//locMom[0] = px[globaddr];
//locMom[1] = py[globaddr];
locRho = *rho;
shar[tileaddr] = *px;
shar[tileaddr+NTH] = *py;
// Predict momenta at half-timestep using 1st order method
dmom = DT*OMEGA*(shar[tileaddr+NTH] + OMEGA*locX*locRho/2.0); // dmom = delta px
// dener = (shar[tileaddr]+dmom/2)*dmom/locRho;
shar[tileaddr+2*NTH] = shar[tileaddr] + dmom;
dmom = DT*OMEGA*(-shar[tileaddr] + OMEGA*locY*locRho/2.0); // dmom = delta py
// dener += (shar[tileaddr]+dmom/2)*dmom/locRho;
shar[tileaddr+3*NTH] = shar[tileaddr+NTH] + dmom;
// Now make full timestep update: Evalute f' using f(t_half)
dmom = DT*OMEGA*(2*shar[tileaddr+3*NTH] + OMEGA*locX*locRho);
dener = (shar[tileaddr]+dmom/2)*dmom/locRho;
*px = shar[tileaddr] + dmom;
dmom = DT*OMEGA*(-2*shar[tileaddr+2*NTH] + OMEGA*locY*locRho);
dener += (shar[tileaddr+NTH]+dmom/2)*dmom/locRho;
*py = shar[tileaddr+NTH] + dmom;
// Change in energy is exactly the work done by force
// Is exactly (p^2 / 2 rho) after minus before
*E += dener;
rho += nx*BLOCKDIMY;
E += nx*BLOCKDIMY;
px += nx*BLOCKDIMY;
py += nx*BLOCKDIMY;
}
}
/* Simple kernel:
* Given in[0 ... (nodeN-1)], copies the segment in[partX0 ... (partX0 + partNX -1)] to out[0 ... (partNX-1)]
* and helpfully wraps addresses circularly
* invoke with gridDim.x * blockDim.x >= partNX
*/
__global__ void cukern_FetchPartitionSubset1D(double *in, int nodeN, double *out, int partX0, int partNX)
{
// calculate output address
int addrOut = threadIdx.x + blockDim.x * blockIdx.x;
if(addrOut >= partNX) return;
// Affine map back to input address
int addrIn = addrOut + partX0;
if(addrIn < 0) addrIn += partNX;
out[addrOut] = in[addrIn];
}
|
98dabc15c281683f86ac164ab4ae199c7359b30a.cu
|
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
#include "cudaSourceRotatingFrame.h"
#define BLOCKDIMX 16
#define BLOCKDIMY 16
/* THIS FUNCTION
This function is used in source/source.m and introduces the fictitious forces which
result from a rotating coordinate frame. The rotation axis is fixed at +Z-hat to
reduce computational burden; The frame equations are given at the start of the
kernel itself.
*/
__global__ void cukern_sourceRotatingFrame(double *rho, double *E, double *px, double *py, double *xyvector);
//__global__ void cukern_sourceRotatingFrame(double *rho, double *E, double *px, double *py, double *Rx, double *Ry, int3 arraysize);
__constant__ __device__ double devLambda[2];
__constant__ __device__ int devIntParams[3];
__global__ void cukern_FetchPartitionSubset1D(double *in, int nodeN, double *out, int partX0, int partNX);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=7) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaSourceRotatingFrame(rho, E, px, py, omega, dt, [xvector yvector])\n");
CHECK_CUDA_ERROR("entering cudaSourceRotatingFrame");
// Get source array info and create destination arrays
MGArray fluid[4];
int worked = MGA_accessMatlabArrays(prhs, 0, 3, &fluid[0]);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access fluid arrays."); }
/* FIXME: accept this as a matlab array instead
* FIXME: Transfer appropriate segments to __constant__ memory
* FIXME: that seems the only reasonable way to avoid partitioning hell
*/
MGArray xyvec;
worked = MGA_accessMatlabArrays(prhs, 6, 6, &xyvec);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access X-Y vector."); }
dim3 gridsize, blocksize;
int3 arraysize;
double omega = *mxGetPr(prhs[4]);
double dt = *mxGetPr(prhs[5]);
worked = sourcefunction_RotatingFrame(&fluid[0], &xyvec, omega, dt);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to apply rotating frame source terms."); }
}
int sourcefunction_RotatingFrame(MGArray *fluidXY, MGArray *XYVectors, double omega, double dt)
{
dim3 gridsize, blocksize;
int3 arraysize;
double lambda[4];
lambda[0] = omega;
lambda[1] = dt;
int i;
int worked;
double *devXYset[fluidXY->nGPUs];
int sub[6];
for(i = 0; i < fluidXY->nGPUs; i++) {
cudaSetDevice(fluidXY->deviceID[i]);
worked = CHECK_CUDA_ERROR("cudaSetDevice");
if(worked != SUCCESSFUL) break;
// Upload rotation parameters
cudaMemcpyToSymbol(devLambda, &lambda[0], 2*sizeof(double), 0, cudaMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("memcpy to symbol");
if(worked != SUCCESSFUL) break;
// Upload partition size
calcPartitionExtent(fluidXY, i, &sub[0]);
cudaMemcpyToSymbol(devIntParams, &sub[3], 3*sizeof(int), 0, cudaMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("memcpy to symbol");
if(worked != SUCCESSFUL) break;
// Swipe the needed subsegments of the X/Y vectors from the supplied node-wide array
cudaMalloc((void **)&devXYset[i], (sub[3]+sub[4])*sizeof(double));
worked = CHECK_CUDA_ERROR("cudaMalloc");
if(worked != SUCCESSFUL) break;
blocksize = makeDim3(128, 1, 1);
gridsize.x = ROUNDUPTO(sub[3], 128) / 128;
gridsize.y = gridsize.z = 1;
cukern_FetchPartitionSubset1D<<<gridsize, blocksize>>>(XYVectors->devicePtr[i], fluidXY->dim[0], devXYset[i], sub[0], sub[3]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, XYVectors, i, "cukern_FetchPartitionSubset1D, X");
if(worked != SUCCESSFUL) break;
gridsize.x = ROUNDUPTO(sub[4], 128) / 128;
cukern_FetchPartitionSubset1D<<<gridsize, blocksize>>>(XYVectors->devicePtr[i] + fluidXY->dim[0], fluidXY->dim[1], devXYset[i]+sub[3], sub[1], sub[4]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, XYVectors, i, "cukern_FetchPartitionSubset1D, Y");
if(worked != SUCCESSFUL) break;
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
blocksize = makeDim3(BLOCKDIMX, BLOCKDIMY, 1);
gridsize.x = ROUNDUPTO(arraysize.x, blocksize.x) / blocksize.x;
gridsize.y = arraysize.z;
gridsize.z = 1;
cukern_sourceRotatingFrame<<<gridsize, blocksize>>>(
fluidXY[0].devicePtr[i],
fluidXY[1].devicePtr[i],
fluidXY[2].devicePtr[i],
fluidXY[3].devicePtr[i],
devXYset[i]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluidXY, i, "applyScalarPotential");
if(worked != SUCCESSFUL) break;
}
if(worked == SUCCESSFUL) {
for(i = 0; i < fluidXY->nGPUs; i++) {
cudaFree(devXYset[i]);
worked = CHECK_CUDA_ERROR("cudaFree");
}
}
// Differencing has corrupted the energy and momentum halos: Restore them
if(worked == SUCCESSFUL)
worked = MGA_exchangeLocalHalos(fluidXY + 1, 3);
return CHECK_IMOGEN_ERROR(worked);
}
/*
* a = -[2 w X v + w X (w X r) ]
* dv = -[2 w X v + w X (w X r) ] dt
* dp = -rho dv = -rho [[2 w X v + w X (w X r) ] dt
* dp = -[2 w X p + rho w X (w X r) ] dt
*
* w X p = |I J K | = <-w py, w px, 0> = u
* |0 0 w |
* |px py pz|
*
* w X r = <-w y, w x, 0> = s;
* w X s = |I J K| = <-w^2 x, -w^2 y, 0> = -w^2<x,y,0> = b
* |0 0 w|
* |-wy wx 0|
* dp = -[2 u + rho b] dt
* = -[2 w<-py, px, 0> - rho w^2 <x, y, 0>] dt
* = w dt [2<py, -px> + rho w <x, y>] in going to static frame
*
* dE = -v dot dp
*/
/* rho, E, Px, Py, Pz: arraysize-sized arrays
omega: scalar
Rx: [nx 1 1] sized array
Ry: [ny 1 1] sized array */
#define NTH (BLOCKDIMX*BLOCKDIMY)
#define OMEGA devLambda[0]
#define DT devLambda[1]
__global__ void cukern_sourceRotatingFrame(double *rho, double *E, double *px, double *py, double *Rvector)
{
__shared__ double shar[4*BLOCKDIMX*BLOCKDIMY];
//__shared__ double pxhf[BLOCKDIMX*BLOCKDIMY], pyhf[BLOCKDIMX*BLOCKDIMY];
//__shared__ double px0[BLOCKDIMX*BLOCKDIMY], py0[BLOCKDIMX*BLOCKDIMY];
/* strategy: XY files, fill in X direction, step in Y direction; griddim.y = Nz */
int myx = threadIdx.x + BLOCKDIMX*blockIdx.x;
int myy = threadIdx.y;
int myz = blockIdx.y;
int nx = devIntParams[0]; int ny = devIntParams[1];
if(myx >= devIntParams[0]) return;
// int globaddr = myx + nx*(myy + ny*myz);
int tileaddr = myx + nx*(myy + ny*myz);
rho += tileaddr; E += tileaddr; px += tileaddr; py += tileaddr;
tileaddr = threadIdx.x + BLOCKDIMX*threadIdx.y;
double locX = Rvector[myx];
Rvector += nx; // Advances this to the Y array for below
double locY;
double locRho;
double dmom; double dener;
// double locMom[2];
for(; myy < ny; myy += BLOCKDIMY) {
locY = Rvector[myy];
// Load original values to register
//locRho = rho[globaddr];
//locMom[0] = px[globaddr];
//locMom[1] = py[globaddr];
locRho = *rho;
shar[tileaddr] = *px;
shar[tileaddr+NTH] = *py;
// Predict momenta at half-timestep using 1st order method
dmom = DT*OMEGA*(shar[tileaddr+NTH] + OMEGA*locX*locRho/2.0); // dmom = delta px
// dener = (shar[tileaddr]+dmom/2)*dmom/locRho;
shar[tileaddr+2*NTH] = shar[tileaddr] + dmom;
dmom = DT*OMEGA*(-shar[tileaddr] + OMEGA*locY*locRho/2.0); // dmom = delta py
// dener += (shar[tileaddr]+dmom/2)*dmom/locRho;
shar[tileaddr+3*NTH] = shar[tileaddr+NTH] + dmom;
// Now make full timestep update: Evalute f' using f(t_half)
dmom = DT*OMEGA*(2*shar[tileaddr+3*NTH] + OMEGA*locX*locRho);
dener = (shar[tileaddr]+dmom/2)*dmom/locRho;
*px = shar[tileaddr] + dmom;
dmom = DT*OMEGA*(-2*shar[tileaddr+2*NTH] + OMEGA*locY*locRho);
dener += (shar[tileaddr+NTH]+dmom/2)*dmom/locRho;
*py = shar[tileaddr+NTH] + dmom;
// Change in energy is exactly the work done by force
// Is exactly (p^2 / 2 rho) after minus before
*E += dener;
rho += nx*BLOCKDIMY;
E += nx*BLOCKDIMY;
px += nx*BLOCKDIMY;
py += nx*BLOCKDIMY;
}
}
/* Simple kernel:
* Given in[0 ... (nodeN-1)], copies the segment in[partX0 ... (partX0 + partNX -1)] to out[0 ... (partNX-1)]
* and helpfully wraps addresses circularly
* invoke with gridDim.x * blockDim.x >= partNX
*/
__global__ void cukern_FetchPartitionSubset1D(double *in, int nodeN, double *out, int partX0, int partNX)
{
// calculate output address
int addrOut = threadIdx.x + blockDim.x * blockIdx.x;
if(addrOut >= partNX) return;
// Affine map back to input address
int addrIn = addrOut + partX0;
if(addrIn < 0) addrIn += partNX;
out[addrOut] = in[addrIn];
}
|
d50299b8522aea384f9b9bceb2ffb2316ede9a18.hip
|
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <stdio.h>
#include <stdexcept>
#include <hip/hip_runtime.h>
#include <math.h>
#include <device_launch_parameters.h>
#include "relulayer.h"
__global__ void ReluLayer_Forward_cu(double *previousLayerForward, double *out)
{
if (previousLayerForward[blockIdx.x] < 0)
{
out[blockIdx.x] = 0;
}
else
{
out[blockIdx.x] = previousLayerForward[blockIdx.x];
}
}
__global__ void ReluLayer_Backward_cu(double *forward, double* nextlayerBackward, double *out, double learnRate)
{
if (forward[blockIdx.x] <= 0)
{
out[blockIdx.x] = 0;
}
else
{
out[blockIdx.x] = nextlayerBackward[blockIdx.x];
}
}
void ReluLayer_Forward(double *previousLayerForward, double *output, int nodeCount)
{
hipLaunchKernelGGL(( ReluLayer_Forward_cu) , dim3(nodeCount), dim3(1) , 0, 0, previousLayerForward, output);
LayerSynchronize();
}
void ReluLayer_Backward(double *forward, double* nextlayerBackward, double *output, int nodeCount, double learnRate)
{
hipLaunchKernelGGL(( ReluLayer_Backward_cu) , dim3(nodeCount), dim3(1) , 0, 0, forward, nextlayerBackward, output, learnRate);
LayerSynchronize();
}
|
d50299b8522aea384f9b9bceb2ffb2316ede9a18.cu
|
#pragma once
#include <stdio.h>
#include <stdexcept>
#include <cuda_runtime.h>
#include <math.h>
#include <device_launch_parameters.h>
#include "relulayer.h"
__global__ void ReluLayer_Forward_cu(double *previousLayerForward, double *out)
{
if (previousLayerForward[blockIdx.x] < 0)
{
out[blockIdx.x] = 0;
}
else
{
out[blockIdx.x] = previousLayerForward[blockIdx.x];
}
}
__global__ void ReluLayer_Backward_cu(double *forward, double* nextlayerBackward, double *out, double learnRate)
{
if (forward[blockIdx.x] <= 0)
{
out[blockIdx.x] = 0;
}
else
{
out[blockIdx.x] = nextlayerBackward[blockIdx.x];
}
}
void ReluLayer_Forward(double *previousLayerForward, double *output, int nodeCount)
{
ReluLayer_Forward_cu <<<nodeCount, 1 >>>(previousLayerForward, output);
LayerSynchronize();
}
void ReluLayer_Backward(double *forward, double* nextlayerBackward, double *output, int nodeCount, double learnRate)
{
ReluLayer_Backward_cu <<<nodeCount, 1 >>>(forward, nextlayerBackward, output, learnRate);
LayerSynchronize();
}
|
fa1cac5e814cf80e206166bb912693c4c3268bed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -*- LSST-C++ -*-
/*
* LSST Data Management System
* Copyright 2008, 2009, 2010 LSST Corporation.
*
* This product includes software developed by the
* LSST Project (http://www.lsst.org/).
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the LSST License Statement and
* the GNU General Public License along with this program. If not,
* see <http://www.lsstcorp.org/LegalNotices/>.
*/
/**
* @file
*
* @brief GPU convolution code
*
* @author Kresimir Cosic
*
* @ingroup afw
*/
#define NVCC_COMPILING
#include <assert.h>
#include "lsst/afw/image/LsstImageTypes.h"
#include "lsst/afw/math/detail/convCUDA.h"
namespace lsst {
namespace afw {
namespace math {
namespace detail {
namespace gpu {
typedef unsigned char uint8;
extern __shared__ uint8 smem[];
typedef KerPixel dfloat;
namespace {
__host__ __device__
int CeilDivide(int num, int divisor) {
return (num + divisor - 1) / divisor;
}
/**
Loads a part of image to shared memory.
Can handle edges of image. Data outside the edge is not copied to shared memory.
@arg smemImg - pointer to destination in shared memory
@arg img - pointer to source in global memory
@arg imgW - width of img
@arg imgH - height of img
@arg x - x cordinate in img, top left corner of rectangle to be copied
@arg y - y cordinate in img, top left corner of rectangle to be copied
@arg simgPitchX - width of box to be copied
@arg simgPitchY - height of box to be copied
*/
template <typename InPixelT, typename ArithPixelT>
__device__ void LoadImageToSmem(ArithPixelT* smemImg, InPixelT* img, int imgW, int imgH,
int x, int y, int simgPitchX, int simgPitchY)
{
if (x + simgPitchX <= imgW && y + simgPitchY <= imgH) {
for (int i = 0; i < simgPitchY; i++) {
for (int j = threadIdx.x; j < simgPitchX; j += blockDim.x) {
smemImg[i*simgPitchX+j] = img[(i+y)*imgW+j+x];
}
}
} else {
for (int i = 0; i < simgPitchY; i++) {
for (int j = threadIdx.x; j < simgPitchX; j += blockDim.x) {
if ((i + y)*imgW + j + x < imgW * imgH) {
smemImg[i*simgPitchX+j] = img[(i+y)*imgW+j+x];
}
}
}
}
}
} //local namespace ends
/* =============================================================================
*
* SPATIAL FUNCTIONS: CHEBYSHEV AND POLYNOMIAL
*
*/
/// Calculates values of chebyshev function at location given by yCoeffs and y.
/// yCoeffs must contain values for given row
__device__ double ChebyshevLineY (
int order,
double y,
double* yCoeffs
)
{
if (order < 2)
return yCoeffs[0] + order * yCoeffs[1] * y ;
double CshM2 = yCoeffs[order];
double CshM1 = 2 * y * yCoeffs[order] + yCoeffs[order-1];
for (int i = order - 2; i > 0; i--) {
double CshNext = 2 * y * CshM1 + yCoeffs[i] - CshM2;
CshM2 = CshM1;
CshM1 = CshNext;
}
return y * CshM1 + yCoeffs[0] - CshM2;
}
/// Calculates values of polynomial function at location given by yCoeffs and y.
/// yCoeffs must contain values for given row
__device__ double PolynomialLineY (
int order,
double y,
double* yCoeffs
)
{
double retVal = yCoeffs[order];
for (int yCoeffInd = order - 1; yCoeffInd >= 0; --yCoeffInd) {
retVal = (retVal * y) + yCoeffs[yCoeffInd];
}
return retVal;
}
/// Calculates _yCoeffs for row given in xPrime.
/// xCheby must point to memory for temporary storage
__device__ void Chebyshev_T_of_X (
const int order,
double xPrime,
double* _params,
double* _yCoeffs, //output
double* _xCheby //temporary
)
{
if (threadIdx.x >= blockSizeX) return;
int paramN = (order + 1) * (order + 2) / 2;
/* Solve as follows:
- f(x,y) = Cy0 T0(y') + Cy1 T1(y') + Cy2 T2(y') + Cy3 T3(y') + ...
where:
Cy0 = P0 T0(x') + P1 T1(x') + P3 T2(x') + P6 T3(x') + ...
Cy1 = P2 T0(x') + P4 T1(x') + P7 T2(x') + ...
Cy2 = P5 T0(x') + P8 T1(x') + ...
Cy3 = P9 T0(x') + ...
...
First compute Tn(x') for each n
Then use that to compute Cy0, Cy1, ...Cyn
*/
// Compute _xCheby[i] = Ti(x') using the standard recurrence relationship;
_xCheby[0] = 1;
// note that _initialize already set _xCheby[0] = 1.
if (order > 0) {
_xCheby[1] = xPrime;
}
for (int xInd = 2; xInd <= order; ++xInd) {
_xCheby[xInd] = (2 * xPrime * _xCheby[xInd-1]) - _xCheby[xInd-2];
}
// Initialize _yCoeffs to right-hand terms of equation shown in documentation block
int paramInd = paramN - 1;
for (int yCoeffInd = order, xChebyInd = 0; yCoeffInd >= 0;
--yCoeffInd, ++xChebyInd, --paramInd) {
_yCoeffs[yCoeffInd] = _params[paramInd] * _xCheby[xChebyInd];
}
// Add the remaining terms to _yCoeffs (starting from _order-1 because _yCoeffs[_order] is done)
for (int startYCoeffInd = order - 1, yCoeffInd = startYCoeffInd, xChebyInd = 0;
paramInd >= 0; --paramInd) {
_yCoeffs[yCoeffInd] += _params[paramInd] * _xCheby[xChebyInd];
if (yCoeffInd == 0) {
--startYCoeffInd;
yCoeffInd = startYCoeffInd;
xChebyInd = 0;
} else {
--yCoeffInd;
++xChebyInd;
}
}
}
/// Calculates _yCoeffs for row given in xPrime.
/// xCheby must point to memory for temporary storage
__device__ void Polynomial_T_of_X (
const int order,
double x,
double* _params,
double* _yCoeffs //output
)
{
if (threadIdx.x >= blockSizeX) return;
int paramN = (order + 1) * (order + 2) / 2;
/* Solve as follows:
- f(x,y) = Cy0 + Cy1 y + Cy2 y^2 + Cy3 y^3 + ...
where:
Cy0 = P0 + P1 x + P3 x^2 + P6 x^3 + ...
Cy1 = P2 + P4 x + P7 x2 + ...
Cy2 = P5 + P8 x + ...
Cy3 = P9 + ...
...
First compute Cy0, Cy1...Cyn by solving 1-d polynomials in x in the usual way.
Then compute f(x,y) by solving the 1-d polynomial in y in the usual way.
*/
const int maxYCoeffInd = order;
int paramInd = paramN - 1;
// initialize the y coefficients
for (int yCoeffInd = maxYCoeffInd; yCoeffInd >= 0; --yCoeffInd, --paramInd) {
_yCoeffs[yCoeffInd] = _params[paramInd];
}
// finish computing the y coefficients
for (int startYCoeffInd = maxYCoeffInd - 1, yCoeffInd = startYCoeffInd;
paramInd >= 0; --paramInd) {
_yCoeffs[yCoeffInd] = (_yCoeffs[yCoeffInd] * x) + _params[paramInd];
if (yCoeffInd == 0) {
--startYCoeffInd;
yCoeffInd = startYCoeffInd;
} else {
--yCoeffInd;
}
}
}
__global__ void ChebyshevImageValues(
double* out, int outW, int outH,
int order,
double* params,
double* rowPos,
double* colPos,
double minX, double minY, double maxX, double maxY
)
{
const double scaleX = 2.0 / (maxX - minX);
const double scaleY = 2.0 / (maxY - minY);
const double offsetX = -(minX + maxX) * 0.5;
const double offsetY = -(minY + maxY) * 0.5;
const int coeffN = order + 1;
double* smemDbl = (double*)smem;
const int coeffPadding = coeffN + 1 - (coeffN % 2);
double* yCoeffsAll = smemDbl ;
double* xChebyAll = yCoeffsAll + (coeffPadding * blockSizeX);
double* smemParams = xChebyAll + (coeffPadding * blockSizeX);
int yLineGroup = threadIdx.x % blockSizeX;
double* yCoeffs = yCoeffsAll + (coeffPadding * yLineGroup);
double* xCheby = xChebyAll + (coeffPadding * yLineGroup);
//load params to shared memory
int paramN = (order + 1) * (order + 2) / 2;
for(int i = threadIdx.x; i < paramN; i += blockDim.x)
smemParams[i] = params[i];
int totalBlocks = CeilDivide(outW, blockSizeX);
int totalPixelsInBlock = blockSizeX * outH;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI * blockSizeX;
int blkY = 0;
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
int outPixelX = blkX + curPixelX;
const int x = colPos[outPixelX];
const double xPrime = (x + offsetX) * scaleX;
__syncthreads();
Chebyshev_T_of_X(order, xPrime, smemParams, yCoeffs, xCheby);
__syncthreads();
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelY = blkY + curPixelY;
if (outPixelX >= outW || outPixelY >= outH) continue;
const int y = rowPos[outPixelY];
const double yPrime = (y + offsetY) * scaleY;
out[outPixelY*outW + outPixelX] = ChebyshevLineY(order, yPrime, yCoeffs);
curPixelY += blockDim.x / blockSizeX;
}
}
}
__global__ void PolynomialImageValues(
double* out, int outW, int outH,
int order,
double* params,
double* rowPos,
double* colPos
)
{
const int coeffN = order + 1;
double* smemDbl = (double*)smem;
const int coeffPadding = coeffN + 1 - (coeffN % 2);
double* yCoeffsAll = smemDbl ;
double* smemParams = yCoeffsAll + (coeffPadding * blockSizeX);
int yLineGroup = threadIdx.x % blockSizeX;
double* yCoeffs = yCoeffsAll + (coeffPadding * yLineGroup);
//load params to shared memory
int paramN = (order + 1) * (order + 2) / 2;
for(int i = threadIdx.x; i < paramN; i += blockDim.x)
smemParams[i] = params[i];
int totalBlocks = CeilDivide(outW, blockSizeX);
int totalPixelsInBlock = blockSizeX * outH;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI * blockSizeX;
int blkY = 0;
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
int outPixelX = blkX + curPixelX;
const int x = colPos[outPixelX];
__syncthreads();
Polynomial_T_of_X(order, x, smemParams, yCoeffs);
__syncthreads();
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelY = blkY + curPixelY;
if (outPixelX >= outW || outPixelY >= outH) continue;
const int y = rowPos[outPixelY];
out[outPixelY*outW + outPixelX] = PolynomialLineY(order, y, yCoeffs);
curPixelY += blockDim.x / blockSizeX;
}
}
}
/// calculates values of Chebyshev function for every pixel in the image
void Call_ChebyshevImageValues(
double* out, int outW, int outH,
int order,
double* params,
double* rowPos,
double* colPos,
double minX, double minY, double maxX, double maxY,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(CeilDivide(outW, blockSizeX));
hipLaunchKernelGGL(( ChebyshevImageValues) , dim3(grid), dim3(block), sharedMemorySize , 0,
out, outW, outH, order, params,
rowPos, colPos,
minX, minY, maxX, maxY
);
}
/// calculates values of polynomial function for every pixel in the image
void Call_PolynomialImageValues(
double* out, int outW, int outH,
int order,
double* params,
double* rowPos,
double* colPos,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(CeilDivide(outW, blockSizeX));
hipLaunchKernelGGL(( PolynomialImageValues) , dim3(grid), dim3(block), sharedMemorySize , 0,
out, outW, outH, order, params,
rowPos, colPos
);
}
__global__ void NormalizationImageValues(
double* out, int outW, int outH,
double** sFn, int n,
double* kernelSum, bool* isDivideByZeroGPU
)
{
double* smemDbl = (double*)smem;
double* smemKernelSum = smemDbl ;
double** smemSfnPtr = (double**) (smemKernelSum + n);
//load kernel sums into shared memory
for(int i = threadIdx.x; i < n; i += blockDim.x) {
smemKernelSum[i] = kernelSum[i];
}
//load pointers to sFn values into shared memory
for(int i = threadIdx.x; i < n; i += blockDim.x) {
smemSfnPtr[i] = sFn[i];
}
__syncthreads();
int totalPixels = outW * outH;
for(int curPixel = threadIdx.x; curPixel < totalPixels; curPixel += blockDim.x)
{
//int outPixelX=curPixel%outW;
//int outPixelY=curPixel/outW;
double sum = 0;
for (int i = 0; i < n; i++) {
sum += smemSfnPtr[i][curPixel] * smemKernelSum[i];
}
if (sum == 0) *isDivideByZeroGPU = true;
else out[curPixel] = 1.0 / sum;
}
}
/// calculates normalization values for every pixel in the image
void Call_NormalizationImageValues(
double* out, int outW, int outH,
double** sFn, int n,
double* kernelSum,
bool* isDivideByZeroGPU,
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
hipLaunchKernelGGL(( NormalizationImageValues) , dim3(grid), dim3(block), sharedMemorySize , 0,
out, outW, outH,
sFn, n,
kernelSum,
isDivideByZeroGPU
);
}
/* =============================================================================
*
* MULTIPLE SPATIALLY INVARIANT KERNELS
* (single input image, multiple kernels, multiple output images)
*
* USED FOR:
* - spatially invariant kernel (image and variance planes)
* - linear combination kernel (image plane)
*
*/
//#define SumUpPixelProduct(n) sum+=pixLine[n] * filtLine[n];
#define SumUpPixelProduct(n) if (filtLine[n]!=0) sum+=pixLine[n] * filtLine[n];
#define SumUpPixelProductX4(n) \
SumUpPixelProduct(n) \
SumUpPixelProduct(n+1) \
SumUpPixelProduct(n+2) \
SumUpPixelProduct(n+3)
#if 0 //simpler but slower version of ApplyFilterOnce (without unrolling)
/**
Convolves filter in smemfilt with part of image loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image in shared memory.
*/
__device__ dfloat ApplyFilterOnce(
dfloat* smemFilt, int filtW, int filtH,
int curPixelX, int curPixelY, int simgPitchX
)
{
dfloat* smemImg = (dfloat*)smem;
dfloat totalSum = 0;
dfloat* pixLine = &smemImg[curPixelY*simgPitchX+curPixelX];
dfloat* filtLine = smemFilt;
int pixLineAdd = simgPitchX - filtW;
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
#pragma unroll 4
for (int x = 0; x < filtW; x++) {
if (*filtLine != 0) {
sum += *pixLine * *filtLine;
}
pixLine++;
filtLine++;
}
pixLine += pixLineAdd;
totalSum += sum;
}
return totalSum;
}
#else //unrolled version of ApplyFilterOnce
/**
Convolves filter in smemfilt with part of image loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image in shared memory.
*/
__device__ dfloat ApplyFilterOnce(
dfloat* smemFilt, int filtW, int filtH,
int curPixelX, int curPixelY, int simgPitchX
)
{
dfloat* smemImg = (dfloat*)smem;
dfloat totalSum = 0;
dfloat* pixLineOrig = &smemImg[curPixelY*simgPitchX+curPixelX];
dfloat* filtLineOrig = smemFilt;
int remainingFiltW = filtW;
int pixLineAdd = simgPitchX;
int procWidth;
if (remainingFiltW >= 12) {
procWidth = 24;
while (remainingFiltW >= procWidth) {
dfloat* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProductX4(8)
SumUpPixelProductX4(12)
SumUpPixelProductX4(16)
SumUpPixelProductX4(20)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
remainingFiltW -= procWidth;
pixLineOrig += procWidth;
filtLineOrig += procWidth;
}
procWidth = 12;
if (remainingFiltW >= procWidth) {
dfloat* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProductX4(8)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
remainingFiltW -= procWidth;
pixLineOrig += procWidth;
filtLineOrig += procWidth;
}
if (remainingFiltW == 0) return totalSum;
}
dfloat* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
if (remainingFiltW < 4) {
if (remainingFiltW == 1)
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProduct(0)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
else if (remainingFiltW == 2)
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProduct(0)
SumUpPixelProduct(1)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
else if (remainingFiltW == 3)
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProduct(0)
SumUpPixelProduct(1)
SumUpPixelProduct(2)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
return totalSum;
}
if (remainingFiltW < 9) {
if (remainingFiltW == 4) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 5) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProduct(4)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 6) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProduct(4)
SumUpPixelProduct(5)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 7) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProduct(4)
SumUpPixelProduct(5)
SumUpPixelProduct(6)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 8) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
}
return totalSum;
}
if (remainingFiltW == 9) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProduct(8)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 10) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProduct(8)
SumUpPixelProduct(9)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 11) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProduct(8)
SumUpPixelProduct(9)
SumUpPixelProduct(10)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
}
return totalSum;
}
#endif
template <typename OutPixelT, typename InPixelT>
__global__ void SpatiallyInvariantImgConvolutionKernel(
InPixelT* img, int imgW, int imgH,
dfloat* allFilt, int filtN,
int filtW, int filtH,
OutPixelT** result
)
{
const int outW = imgW - filtW + 1;
const int outH = imgH - filtH + 1;
int simgPitchX = blockSizeX + filtW - 1;
int simgPitchY = blockSizeY + filtH - 1;
/*int simgSize=simgPitchX*simgPitchY;
dfloat* smemFiltBeg=(dfloat*)smem + simgSize;
for (int filtI=filtStart; filtI<filtStart+filtN; filtI++) {
dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
for(int i=threadIdx.x; i<filtW*filtH; i+=blockDim.x)
smemFilt[i]=filt[filtI][i];
}*/
int blockNX = CeilDivide(outW, blockSizeX);
int blockNY = CeilDivide(outH, blockSizeY);
int totalBlocks = blockNX * blockNY;
int totalPixelsInBlock = blockSizeX * blockSizeY;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI % blockNX;
int blkY = blkI / blockNX;
int x = blkX * blockSizeX;
int y = blkY * blockSizeY;
__syncthreads();
LoadImageToSmem((dfloat*) smem, img, imgW, imgH, x, y, simgPitchX, simgPitchY);
__syncthreads();
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelX = x + curPixelX;
int outPixelY = y + curPixelY;
if (outPixelX >= outW || outPixelY >= outH) continue;
for (int filtI = 0; filtI < filtN; filtI++) {
dfloat* filtPtr = &allFilt[filtI*filtW*filtH];
//dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
//dfloat sum = ApplyFilterOnce(smemFilt, filtW, filtH, curPixelX, curPixelY, simgPitchX);
dfloat sum = ApplyFilterOnce(filtPtr, filtW, filtH, curPixelX, curPixelY, simgPitchX);
OutPixelT* curResultImg = result[filtI];
curResultImg[outPixelY*outW + outPixelX] = OutPixelT(sum);
}
curPixelX += blockDim.x;
while (curPixelX >= blockSizeX) {
curPixelX -= blockSizeX;
curPixelY++;
}
}
}
}
template <typename OutPixelT, typename InPixelT>
void Call_SpatiallyInvariantImageConvolutionKernel(
InPixelT* inImageGPU, int inImageWidth, int inImageHeight,
KerPixel* allKernelsGPU, int kernelTotalN,
int kernelW, int kernelH,
OutPixelT* outImageGPU[],
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
hipLaunchKernelGGL(( SpatiallyInvariantImgConvolutionKernel<OutPixelT, InPixelT>) , dim3(grid), dim3(block), sharedMemorySize , 0,
inImageGPU, inImageWidth, inImageHeight,
allKernelsGPU, kernelTotalN,
kernelW, kernelH,
outImageGPU
);
}
#define INSTANTIATE_SpatiallyInvariantImageConvolutionKernel(OutPixelT,InPixelT) \
template void Call_SpatiallyInvariantImageConvolutionKernel<OutPixelT,InPixelT>( \
InPixelT* inImageGPU, int inImageWidth, int inImageHeight, \
KerPixel* allKernelsGPU, int kernelTotalN, \
int kernelW, int kernelH, \
OutPixelT* outImageGPU[], \
int blockN, \
int sharedMemorySize \
);
/* =============================================================================
*
* LINEAR COMBINATION KERNEL - IMAGE
*
*/
namespace {
/**
Convolves calculated filter with part of image loadad at start of the shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image in shared memory.
Input image block should be placed at start of shared memory
@arg allFilt - pointer to all filters (placed sequantially)
@arg filtN - number of filters
@arg sfval - values of spatial functions for each filter at current pixel
@arg normval - normalization coefficient
@arg simgPitchX - size of both part of image in shared memory and part of image mask data
@return result of convolving image data
*/
__device__ dfloat ApplyFilterOnceLCImg(
dfloat* allFilt, int filtN, int filtW, int filtH,
double sfVal[],
double normVal,
int curPixelX, int curPixelY, int simgPitchX
)
{
dfloat* smemImg = (dfloat*)smem;
dfloat totalSum = 0;
dfloat* pixLine = &smemImg[curPixelY*simgPitchX+curPixelX];
int pixLineAdd = simgPitchX - filtW;
int kernelSize = filtW * filtH;
int filtRemainder = filtN % 3;
for (int y = 0; y < filtH; y++) {
dfloat sum = 0;
for (int x = 0; x < filtW; x++) {
dfloat* filtLine = allFilt + y * filtW + x;
dfloat filtVal;
filtVal = *filtLine * sfVal[0];
filtLine += kernelSize;
int filtI = 1;
if (filtRemainder == 2) {
filtVal += *filtLine * sfVal[1];
filtLine += kernelSize;
filtI = 2;
} else if (filtRemainder == 0) {
filtVal += *filtLine * sfVal[1];
filtLine += kernelSize;
filtVal += *filtLine * sfVal[2];
filtLine += kernelSize;
filtI = 3;
}
while(filtI < filtN) {
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
}
filtVal *= normVal;
if (filtVal != 0) {
sum += *pixLine * filtVal;
}
pixLine++;
}
pixLine += pixLineAdd;
totalSum += sum;
}
return totalSum;
}
} //local namespace ends
template <typename OutPixelT, typename InPixelT>
__global__ void ConvolutionKernel_LC_Img(
InPixelT* img, int imgW, int imgH,
dfloat* filt, int filtN,
int filtW, int filtH,
double** sfValImg,
double* norm,
OutPixelT* out
)
{
//Asserts that : blockDim.x is divisible by blockSizeX
const int outW = imgW - filtW + 1;
const int outH = imgH - filtH + 1;
int simgPitchX = blockSizeX + filtW - 1;
int simgPitchY = blockSizeY + filtH - 1;
/*int simgSize=simgPitchX*simgPitchY;
dfloat* smemFiltBeg=(dfloat*)smem + simgSize;
for (int filtI=filtStart; filtI<filtStart+filtN; filtI++) {
dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
for(int i=threadIdx.x; i<filtW*filtH; i+=blockDim.x)
smemFilt[i]=filt[filtI][i];
}*/
int blockNX = CeilDivide(outW, blockSizeX);
int blockNY = CeilDivide(outH, blockSizeY);
int totalBlocks = blockNX * blockNY;
int totalPixelsInBlock = blockSizeX * blockSizeY;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI % blockNX;
int blkY = blkI / blockNX;
int x = blkX * blockSizeX;
int y = blkY * blockSizeY;
__syncthreads();
LoadImageToSmem((dfloat*) smem, img, imgW, imgH, x, y, simgPitchX, simgPitchY);
__syncthreads();
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelX = x + curPixelX;
int outPixelY = y + curPixelY;
int outAddr = outPixelY * outW + outPixelX;
if (outPixelX >= outW || outPixelY >= outH) continue;
double sfVal[maxGpuSfCount];
for (int filtI = 0; filtI < filtN; filtI++)
sfVal[filtI] = sfValImg[filtI][outAddr];
double normVal = 1;
if (norm != NULL)
normVal = norm[outAddr];
double sum = ApplyFilterOnceLCImg(filt, filtN, filtW, filtH,
sfVal, normVal, curPixelX, curPixelY, simgPitchX);
out[outAddr] = OutPixelT(sum);
curPixelY += blockDim.x / blockSizeX;
}
}
}
template <typename OutPixelT, typename InPixelT>
void Call_ConvolutionKernel_LC_Img(
InPixelT* inImageGPU, int inImageWidth, int inImageHeight,
KerPixel* kernelGPU, int kernelTotalN,
int kernelW, int kernelH,
double* sfValGPU[],
double* normGPU,
OutPixelT* outImageGPU,
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
hipLaunchKernelGGL(( ConvolutionKernel_LC_Img) , dim3(grid), dim3(block), sharedMemorySize , 0,
inImageGPU, inImageWidth, inImageHeight,
kernelGPU, kernelTotalN,
kernelW, kernelH,
sfValGPU,
normGPU,
outImageGPU
);
}
#define INSTANTIATE_ConvolutionKernel_LC_Img(OutPixelT,InPixelT) \
template void Call_ConvolutionKernel_LC_Img<OutPixelT,InPixelT>( \
InPixelT* inImageGPU, int inImageWidth, int inImageHeight, \
KerPixel* kernelGPU, int kernelTotalN, \
int kernelW, int kernelH, \
double* sfValGPU[], \
double* normGPU, \
OutPixelT* outImageGPU, \
int blockN, \
int sharedMemorySize \
);
/* =============================================================================
*
* LINEAR COMBINATION KERNEL - VARIANCE AND MASK
*
*/
namespace {
/**
Convolves calculated filter with part of image loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image in shared memory.
Input variance should be placed at start of shared memory
@arg smemMsk - pointer to input image mask data
@arg mskSum - output parameter, result of convolving mask data
@arg allFilt - pointer to all filters (placed sequantially)
@arg filtN - number of filters
@arg sfval - values of spatial functions for each filter at current pixel
@arg normval - normalization coefficient
@arg simgPitchX - size of both part of image in shared memory and part of image mask data
@return result of convolving variance data
*/
__device__ dfloat ApplyFilterOnceLCVar(
MskPixel* smemMsk,
MskPixel& mskSum,
dfloat* allFilt, int filtN, int filtW, int filtH,
double sfVal[],
double normVal,
int curPixelX, int curPixelY, int simgPitchX
)
{
dfloat* smemImg = (dfloat*)smem;
dfloat totalSum = 0;
dfloat* pixLine = &smemImg[curPixelY*simgPitchX+curPixelX];
MskPixel* pixMskLine = &smemMsk[curPixelY*simgPitchX+curPixelX];
int pixLineAdd = simgPitchX - filtW;
int kernelSize = filtW * filtH;
mskSum = 0;
int filtRemainder = filtN % 3;
for (int y = 0; y < filtH; y++) {
dfloat sum = 0;
for (int x = 0; x < filtW; x++) {
dfloat* filtLine = allFilt + y * filtW + x;
dfloat filtVal = *filtLine * sfVal[0];
filtLine += kernelSize;
int filtI = 1;
if (filtRemainder == 2) {
filtVal += *filtLine * sfVal[1];
filtLine += kernelSize;
filtI = 2;
} else if (filtRemainder == 0) {
filtVal += *filtLine * sfVal[1];
filtLine += kernelSize;
filtVal += *filtLine * sfVal[2];
filtLine += kernelSize;
filtI = 3;
}
while(filtI < filtN) {
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
}
filtVal *= normVal;
if (filtVal != 0) {
sum += *pixLine * (filtVal * filtVal);
mskSum |= *pixMskLine;
}
pixLine++;
pixMskLine++;
}
pixLine += pixLineAdd;
pixMskLine += pixLineAdd;
totalSum += sum;
}
return totalSum;
}
} //local namespace ends
__global__ void ConvolutionKernel_LC_Var(
VarPixel* img, int imgW, int imgH,
MskPixel* inMsk,
dfloat* filt, int filtN,
int filtW, int filtH,
double** sfValImg,
double* norm,
VarPixel* outVar,
MskPixel* outMsk
)
{
//Asserts that : blockDim.x is divisible by blockSizeX
const int outW = imgW - filtW + 1;
const int outH = imgH - filtH + 1;
int simgPitchX = blockSizeX + filtW - 1;
int simgPitchY = blockSizeY + filtH - 1;
int simgSize = simgPitchX * simgPitchY;
MskPixel* smemMsk = (MskPixel*)((dfloat*)smem + simgSize);
/*dfloat* smemFiltBeg=(dfloat*)smem + simgSize;
for (int filtI=filtStart; filtI<filtStart+filtN; filtI++) {
dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
for(int i=threadIdx.x; i<filtW*filtH; i+=blockDim.x)
smemFilt[i]=filt[filtI][i];
}*/
int blockNX = CeilDivide(outW, blockSizeX);
int blockNY = CeilDivide(outH, blockSizeY);
int totalBlocks = blockNX * blockNY;
int totalPixelsInBlock = blockSizeX * blockSizeY;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI % blockNX;
int blkY = blkI / blockNX;
int x = blkX * blockSizeX;
int y = blkY * blockSizeY;
__syncthreads();
LoadImageToSmem((dfloat*) smem, img, imgW, imgH, x, y, simgPitchX, simgPitchY);
LoadImageToSmem( smemMsk, inMsk, imgW, imgH, x, y, simgPitchX, simgPitchY);
__syncthreads();
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelX = x + curPixelX;
int outPixelY = y + curPixelY;
int outAddr = outPixelY * outW + outPixelX;
if (outPixelX >= outW || outPixelY >= outH) continue;
double sfVal[maxGpuSfCount];
for (int filtI = 0; filtI < filtN; filtI++) {
sfVal[filtI] = sfValImg[filtI][outAddr];
}
double normVal = 1;
if (norm != NULL) normVal = norm[outAddr];
MskPixel mskSum;
dfloat sum = ApplyFilterOnceLCVar(smemMsk, mskSum, filt, filtN, filtW, filtH,
sfVal, normVal, curPixelX, curPixelY, simgPitchX);
outVar[outAddr] = sum;
outMsk[outAddr] = mskSum;
curPixelY += blockDim.x / blockSizeX;
}
}
}
void Call_ConvolutionKernel_LC_Var(
VarPixel* inImageGPU, int inImageWidth, int inImageHeight,
MskPixel* inMskGPU,
KerPixel* kernelGPU, int kernelTotalN,
int kernelW, int kernelH,
double* sfValGPU[],
double* normGPU,
VarPixel* outImageGPU,
MskPixel* outMskGPU,
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
hipLaunchKernelGGL(( ConvolutionKernel_LC_Var) , dim3(grid), dim3(block), sharedMemorySize , 0,
inImageGPU, inImageWidth, inImageHeight,
inMskGPU,
kernelGPU, kernelTotalN,
kernelW, kernelH,
sfValGPU,
normGPU,
outImageGPU,
outMskGPU
);
}
/* =============================================================================
*
* SPATIALLY INVARIANT KERNEL - MASK PLANE
*
*/
namespace {
//#define SumUpPixelProductMask(n) sum |=pixLine[n];
#define SumUpPixelProductMask(n) if (filtLine[n]!=0) sum |=pixLine[n] ;
#define SumUpPixelProductMaskX4(n) \
SumUpPixelProductMask(n) \
SumUpPixelProductMask(n+1) \
SumUpPixelProductMask(n+2) \
SumUpPixelProductMask(n+3)
#if 0 //simpler but slower version of MaskApplyFilterOnce (without unrolling)
/**
Convolves filter in smemfilt with part of image mask loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image mask in shared memory.
*/
__device__ MskPixel MaskApplyFilterOnce(
dfloat* smemFilt, int filtW, int filtH,
int curPixelX, int curPixelY, int simgPitchX
)
{
MskPixel* smemImg = (MskPixel*)smem;
MskPixel totalSum = 0;
MskPixel* pixLine = &smemImg[curPixelY*simgPitchX+curPixelX];
dfloat* filtLine = smemFilt;
int pixLineAdd = simgPitchX - filtW;
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
#pragma unroll 4
for (int x = 0; x < filtW; x++) {
if (*filtLine != 0) {
sum |= *pixLine;
}
pixLine++;
filtLine++;
}
pixLine += pixLineAdd;
totalSum |= sum;
}
return totalSum;
}
#else //unrolled version of MaskApplyFilterOnce
/**
Convolves filter in smemfilt with part of image mask loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image mask in shared memory.
*/
__device__ MskPixel MaskApplyFilterOnce(
dfloat* smemFilt, int filtW, int filtH,
int curPixelX, int curPixelY, int simgPitchX
)
{
MskPixel* smemImg = (MskPixel*)smem;
MskPixel totalSum = 0;
MskPixel* pixLineOrig = &smemImg[curPixelY*simgPitchX+curPixelX];
dfloat* filtLineOrig = smemFilt;
int remainingFiltW = filtW;
int pixLineAdd = simgPitchX;
int procWidth;
if (remainingFiltW >= 12) {
procWidth = 24;
while (remainingFiltW >= procWidth) {
MskPixel* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMaskX4(8)
SumUpPixelProductMaskX4(12)
SumUpPixelProductMaskX4(16)
SumUpPixelProductMaskX4(20)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
remainingFiltW -= procWidth;
pixLineOrig += procWidth;
filtLineOrig += procWidth;
}
procWidth = 12;
if (remainingFiltW >= procWidth) {
MskPixel* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMaskX4(8)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
remainingFiltW -= procWidth;
pixLineOrig += procWidth;
filtLineOrig += procWidth;
}
if (remainingFiltW == 0) return totalSum;
}
MskPixel* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
if (remainingFiltW < 4) {
if (remainingFiltW == 1) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMask(0)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 2) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMask(0)
SumUpPixelProductMask(1)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 3) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMask(0)
SumUpPixelProductMask(1)
SumUpPixelProductMask(2)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
}
return totalSum;
}
if (remainingFiltW < 9) {
if (remainingFiltW == 4) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 5) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMask(4)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 6) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMask(4)
SumUpPixelProductMask(5)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 7) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMask(4)
SumUpPixelProductMask(5)
SumUpPixelProductMask(6)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 8) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
}
return totalSum;
}
if (remainingFiltW == 9) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMask(8)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 10) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMask(8)
SumUpPixelProductMask(9)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 11) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMask(8)
SumUpPixelProductMask(9)
SumUpPixelProductMask(10)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
}
return totalSum;
}
#endif
} //local namespace ends
__global__ void SpatiallyInvariantMaskConvolutionKernel(
MskPixel* img, int imgW, int imgH,
dfloat* allFilt, int filtN,
int filtW, int filtH,
MskPixel** result
)
{
const int outW = imgW - filtW + 1;
const int outH = imgH - filtH + 1;
int simgPitchX = blockSizeX + filtW - 1;
int simgPitchY = blockSizeY + filtH - 1;
/*int simgSize=simgPitchX*simgPitchY;
dfloat* smemFiltBeg=(dfloat*)smem + simgSize;
for (int filtI=filtStart; filtI<filtStart+filtN; filtI++) {
dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
for(int i=threadIdx.x; i<filtW*filtH; i+=blockDim.x)
smemFilt[i]=filt[filtI][i];
}*/
int blockNX = CeilDivide(outW, blockSizeX);
int blockNY = CeilDivide(outH, blockSizeY);
int totalBlocks = blockNX * blockNY;
int totalPixelsInBlock = blockSizeX * blockSizeY;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI % blockNX;
int blkY = blkI / blockNX;
int x = blkX * blockSizeX;
int y = blkY * blockSizeY;
__syncthreads();
LoadImageToSmem((MskPixel*) smem, img, imgW, imgH, x, y, simgPitchX, simgPitchY);
__syncthreads();
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelX = x + curPixelX;
int outPixelY = y + curPixelY;
if (outPixelX >= outW || outPixelY >= outH) continue;
for (int filtI = 0; filtI < filtN; filtI++) {
//dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
dfloat* filtPtr = &allFilt[filtI*filtW*filtH];
MskPixel sum = MaskApplyFilterOnce(filtPtr, filtW, filtH, curPixelX, curPixelY, simgPitchX);
MskPixel* curResultImg = result[filtI];
curResultImg[outPixelY*outW + outPixelX] = sum;
}
curPixelX += blockDim.x;
while (curPixelX >= blockSizeX) {
curPixelX -= blockSizeX;
curPixelY++;
}
}
}
}
void Call_SpatiallyInvariantMaskConvolutionKernel(
MskPixel* inImageGPU, int inImageWidth, int inImageHeight,
KerPixel* allKernelsGPU, int kernelTotalN,
int kernelW, int kernelH,
MskPixel* outImageGPU[],
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
hipLaunchKernelGGL(( SpatiallyInvariantMaskConvolutionKernel) , dim3(grid), dim3(block), sharedMemorySize , 0,
inImageGPU, inImageWidth, inImageHeight,
allKernelsGPU, kernelTotalN,
kernelW, kernelH,
outImageGPU
);
}
#define INSTANTIATE(OutPixelT,InPixelT) \
INSTANTIATE_SpatiallyInvariantImageConvolutionKernel(OutPixelT,InPixelT) \
INSTANTIATE_ConvolutionKernel_LC_Img(OutPixelT,InPixelT)
/*
* Explicit instantiation
*/
/// \cond
INSTANTIATE(double, double)
INSTANTIATE(double, float)
INSTANTIATE(double, int)
INSTANTIATE(double, boost::uint16_t)
INSTANTIATE(float, float)
INSTANTIATE(float, int)
INSTANTIATE(float, boost::uint16_t)
INSTANTIATE(int, int)
INSTANTIATE(boost::uint16_t, boost::uint16_t)
/// \endcond
// ================== GPU kernel for testing ======================
template <typename T>
__global__ void Test(T* ret)
{
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if (threadId == 0) ret[0] = 5;
if (threadId == 1) ret[1] = 8;
}
template <typename T>
void CallTestGpuKernel(T* ret)
{
dim3 block(192);
dim3 grid(60);
hipLaunchKernelGGL(( Test) , dim3(grid), dim3(block), 0, 0, ret);
}
template void CallTestGpuKernel<int>(int*);
}
}
}
}
} //namespace lsst::afw::math::detail::gpu ends
|
fa1cac5e814cf80e206166bb912693c4c3268bed.cu
|
// -*- LSST-C++ -*-
/*
* LSST Data Management System
* Copyright 2008, 2009, 2010 LSST Corporation.
*
* This product includes software developed by the
* LSST Project (http://www.lsst.org/).
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the LSST License Statement and
* the GNU General Public License along with this program. If not,
* see <http://www.lsstcorp.org/LegalNotices/>.
*/
/**
* @file
*
* @brief GPU convolution code
*
* @author Kresimir Cosic
*
* @ingroup afw
*/
#define NVCC_COMPILING
#include <assert.h>
#include "lsst/afw/image/LsstImageTypes.h"
#include "lsst/afw/math/detail/convCUDA.h"
namespace lsst {
namespace afw {
namespace math {
namespace detail {
namespace gpu {
typedef unsigned char uint8;
extern __shared__ uint8 smem[];
typedef KerPixel dfloat;
namespace {
__host__ __device__
int CeilDivide(int num, int divisor) {
return (num + divisor - 1) / divisor;
}
/**
Loads a part of image to shared memory.
Can handle edges of image. Data outside the edge is not copied to shared memory.
@arg smemImg - pointer to destination in shared memory
@arg img - pointer to source in global memory
@arg imgW - width of img
@arg imgH - height of img
@arg x - x cordinate in img, top left corner of rectangle to be copied
@arg y - y cordinate in img, top left corner of rectangle to be copied
@arg simgPitchX - width of box to be copied
@arg simgPitchY - height of box to be copied
*/
template <typename InPixelT, typename ArithPixelT>
__device__ void LoadImageToSmem(ArithPixelT* smemImg, InPixelT* img, int imgW, int imgH,
int x, int y, int simgPitchX, int simgPitchY)
{
if (x + simgPitchX <= imgW && y + simgPitchY <= imgH) {
for (int i = 0; i < simgPitchY; i++) {
for (int j = threadIdx.x; j < simgPitchX; j += blockDim.x) {
smemImg[i*simgPitchX+j] = img[(i+y)*imgW+j+x];
}
}
} else {
for (int i = 0; i < simgPitchY; i++) {
for (int j = threadIdx.x; j < simgPitchX; j += blockDim.x) {
if ((i + y)*imgW + j + x < imgW * imgH) {
smemImg[i*simgPitchX+j] = img[(i+y)*imgW+j+x];
}
}
}
}
}
} //local namespace ends
/* =============================================================================
*
* SPATIAL FUNCTIONS: CHEBYSHEV AND POLYNOMIAL
*
*/
/// Calculates values of chebyshev function at location given by yCoeffs and y.
/// yCoeffs must contain values for given row
__device__ double ChebyshevLineY (
int order,
double y,
double* yCoeffs
)
{
if (order < 2)
return yCoeffs[0] + order * yCoeffs[1] * y ;
double CshM2 = yCoeffs[order];
double CshM1 = 2 * y * yCoeffs[order] + yCoeffs[order-1];
for (int i = order - 2; i > 0; i--) {
double CshNext = 2 * y * CshM1 + yCoeffs[i] - CshM2;
CshM2 = CshM1;
CshM1 = CshNext;
}
return y * CshM1 + yCoeffs[0] - CshM2;
}
/// Calculates values of polynomial function at location given by yCoeffs and y.
/// yCoeffs must contain values for given row
__device__ double PolynomialLineY (
int order,
double y,
double* yCoeffs
)
{
double retVal = yCoeffs[order];
for (int yCoeffInd = order - 1; yCoeffInd >= 0; --yCoeffInd) {
retVal = (retVal * y) + yCoeffs[yCoeffInd];
}
return retVal;
}
/// Calculates _yCoeffs for row given in xPrime.
/// xCheby must point to memory for temporary storage
__device__ void Chebyshev_T_of_X (
const int order,
double xPrime,
double* _params,
double* _yCoeffs, //output
double* _xCheby //temporary
)
{
if (threadIdx.x >= blockSizeX) return;
int paramN = (order + 1) * (order + 2) / 2;
/* Solve as follows:
- f(x,y) = Cy0 T0(y') + Cy1 T1(y') + Cy2 T2(y') + Cy3 T3(y') + ...
where:
Cy0 = P0 T0(x') + P1 T1(x') + P3 T2(x') + P6 T3(x') + ...
Cy1 = P2 T0(x') + P4 T1(x') + P7 T2(x') + ...
Cy2 = P5 T0(x') + P8 T1(x') + ...
Cy3 = P9 T0(x') + ...
...
First compute Tn(x') for each n
Then use that to compute Cy0, Cy1, ...Cyn
*/
// Compute _xCheby[i] = Ti(x') using the standard recurrence relationship;
_xCheby[0] = 1;
// note that _initialize already set _xCheby[0] = 1.
if (order > 0) {
_xCheby[1] = xPrime;
}
for (int xInd = 2; xInd <= order; ++xInd) {
_xCheby[xInd] = (2 * xPrime * _xCheby[xInd-1]) - _xCheby[xInd-2];
}
// Initialize _yCoeffs to right-hand terms of equation shown in documentation block
int paramInd = paramN - 1;
for (int yCoeffInd = order, xChebyInd = 0; yCoeffInd >= 0;
--yCoeffInd, ++xChebyInd, --paramInd) {
_yCoeffs[yCoeffInd] = _params[paramInd] * _xCheby[xChebyInd];
}
// Add the remaining terms to _yCoeffs (starting from _order-1 because _yCoeffs[_order] is done)
for (int startYCoeffInd = order - 1, yCoeffInd = startYCoeffInd, xChebyInd = 0;
paramInd >= 0; --paramInd) {
_yCoeffs[yCoeffInd] += _params[paramInd] * _xCheby[xChebyInd];
if (yCoeffInd == 0) {
--startYCoeffInd;
yCoeffInd = startYCoeffInd;
xChebyInd = 0;
} else {
--yCoeffInd;
++xChebyInd;
}
}
}
/// Calculates _yCoeffs for row given in xPrime.
/// xCheby must point to memory for temporary storage
__device__ void Polynomial_T_of_X (
const int order,
double x,
double* _params,
double* _yCoeffs //output
)
{
if (threadIdx.x >= blockSizeX) return;
int paramN = (order + 1) * (order + 2) / 2;
/* Solve as follows:
- f(x,y) = Cy0 + Cy1 y + Cy2 y^2 + Cy3 y^3 + ...
where:
Cy0 = P0 + P1 x + P3 x^2 + P6 x^3 + ...
Cy1 = P2 + P4 x + P7 x2 + ...
Cy2 = P5 + P8 x + ...
Cy3 = P9 + ...
...
First compute Cy0, Cy1...Cyn by solving 1-d polynomials in x in the usual way.
Then compute f(x,y) by solving the 1-d polynomial in y in the usual way.
*/
const int maxYCoeffInd = order;
int paramInd = paramN - 1;
// initialize the y coefficients
for (int yCoeffInd = maxYCoeffInd; yCoeffInd >= 0; --yCoeffInd, --paramInd) {
_yCoeffs[yCoeffInd] = _params[paramInd];
}
// finish computing the y coefficients
for (int startYCoeffInd = maxYCoeffInd - 1, yCoeffInd = startYCoeffInd;
paramInd >= 0; --paramInd) {
_yCoeffs[yCoeffInd] = (_yCoeffs[yCoeffInd] * x) + _params[paramInd];
if (yCoeffInd == 0) {
--startYCoeffInd;
yCoeffInd = startYCoeffInd;
} else {
--yCoeffInd;
}
}
}
__global__ void ChebyshevImageValues(
double* out, int outW, int outH,
int order,
double* params,
double* rowPos,
double* colPos,
double minX, double minY, double maxX, double maxY
)
{
const double scaleX = 2.0 / (maxX - minX);
const double scaleY = 2.0 / (maxY - minY);
const double offsetX = -(minX + maxX) * 0.5;
const double offsetY = -(minY + maxY) * 0.5;
const int coeffN = order + 1;
double* smemDbl = (double*)smem;
const int coeffPadding = coeffN + 1 - (coeffN % 2);
double* yCoeffsAll = smemDbl ;
double* xChebyAll = yCoeffsAll + (coeffPadding * blockSizeX);
double* smemParams = xChebyAll + (coeffPadding * blockSizeX);
int yLineGroup = threadIdx.x % blockSizeX;
double* yCoeffs = yCoeffsAll + (coeffPadding * yLineGroup);
double* xCheby = xChebyAll + (coeffPadding * yLineGroup);
//load params to shared memory
int paramN = (order + 1) * (order + 2) / 2;
for(int i = threadIdx.x; i < paramN; i += blockDim.x)
smemParams[i] = params[i];
int totalBlocks = CeilDivide(outW, blockSizeX);
int totalPixelsInBlock = blockSizeX * outH;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI * blockSizeX;
int blkY = 0;
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
int outPixelX = blkX + curPixelX;
const int x = colPos[outPixelX];
const double xPrime = (x + offsetX) * scaleX;
__syncthreads();
Chebyshev_T_of_X(order, xPrime, smemParams, yCoeffs, xCheby);
__syncthreads();
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelY = blkY + curPixelY;
if (outPixelX >= outW || outPixelY >= outH) continue;
const int y = rowPos[outPixelY];
const double yPrime = (y + offsetY) * scaleY;
out[outPixelY*outW + outPixelX] = ChebyshevLineY(order, yPrime, yCoeffs);
curPixelY += blockDim.x / blockSizeX;
}
}
}
__global__ void PolynomialImageValues(
double* out, int outW, int outH,
int order,
double* params,
double* rowPos,
double* colPos
)
{
const int coeffN = order + 1;
double* smemDbl = (double*)smem;
const int coeffPadding = coeffN + 1 - (coeffN % 2);
double* yCoeffsAll = smemDbl ;
double* smemParams = yCoeffsAll + (coeffPadding * blockSizeX);
int yLineGroup = threadIdx.x % blockSizeX;
double* yCoeffs = yCoeffsAll + (coeffPadding * yLineGroup);
//load params to shared memory
int paramN = (order + 1) * (order + 2) / 2;
for(int i = threadIdx.x; i < paramN; i += blockDim.x)
smemParams[i] = params[i];
int totalBlocks = CeilDivide(outW, blockSizeX);
int totalPixelsInBlock = blockSizeX * outH;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI * blockSizeX;
int blkY = 0;
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
int outPixelX = blkX + curPixelX;
const int x = colPos[outPixelX];
__syncthreads();
Polynomial_T_of_X(order, x, smemParams, yCoeffs);
__syncthreads();
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelY = blkY + curPixelY;
if (outPixelX >= outW || outPixelY >= outH) continue;
const int y = rowPos[outPixelY];
out[outPixelY*outW + outPixelX] = PolynomialLineY(order, y, yCoeffs);
curPixelY += blockDim.x / blockSizeX;
}
}
}
/// calculates values of Chebyshev function for every pixel in the image
void Call_ChebyshevImageValues(
double* out, int outW, int outH,
int order,
double* params,
double* rowPos,
double* colPos,
double minX, double minY, double maxX, double maxY,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(CeilDivide(outW, blockSizeX));
ChebyshevImageValues <<< grid, block, sharedMemorySize >>>(
out, outW, outH, order, params,
rowPos, colPos,
minX, minY, maxX, maxY
);
}
/// calculates values of polynomial function for every pixel in the image
void Call_PolynomialImageValues(
double* out, int outW, int outH,
int order,
double* params,
double* rowPos,
double* colPos,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(CeilDivide(outW, blockSizeX));
PolynomialImageValues <<< grid, block, sharedMemorySize >>>(
out, outW, outH, order, params,
rowPos, colPos
);
}
__global__ void NormalizationImageValues(
double* out, int outW, int outH,
double** sFn, int n,
double* kernelSum, bool* isDivideByZeroGPU
)
{
double* smemDbl = (double*)smem;
double* smemKernelSum = smemDbl ;
double** smemSfnPtr = (double**) (smemKernelSum + n);
//load kernel sums into shared memory
for(int i = threadIdx.x; i < n; i += blockDim.x) {
smemKernelSum[i] = kernelSum[i];
}
//load pointers to sFn values into shared memory
for(int i = threadIdx.x; i < n; i += blockDim.x) {
smemSfnPtr[i] = sFn[i];
}
__syncthreads();
int totalPixels = outW * outH;
for(int curPixel = threadIdx.x; curPixel < totalPixels; curPixel += blockDim.x)
{
//int outPixelX=curPixel%outW;
//int outPixelY=curPixel/outW;
double sum = 0;
for (int i = 0; i < n; i++) {
sum += smemSfnPtr[i][curPixel] * smemKernelSum[i];
}
if (sum == 0) *isDivideByZeroGPU = true;
else out[curPixel] = 1.0 / sum;
}
}
/// calculates normalization values for every pixel in the image
void Call_NormalizationImageValues(
double* out, int outW, int outH,
double** sFn, int n,
double* kernelSum,
bool* isDivideByZeroGPU,
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
NormalizationImageValues <<< grid, block, sharedMemorySize >>>(
out, outW, outH,
sFn, n,
kernelSum,
isDivideByZeroGPU
);
}
/* =============================================================================
*
* MULTIPLE SPATIALLY INVARIANT KERNELS
* (single input image, multiple kernels, multiple output images)
*
* USED FOR:
* - spatially invariant kernel (image and variance planes)
* - linear combination kernel (image plane)
*
*/
//#define SumUpPixelProduct(n) sum+=pixLine[n] * filtLine[n];
#define SumUpPixelProduct(n) if (filtLine[n]!=0) sum+=pixLine[n] * filtLine[n];
#define SumUpPixelProductX4(n) \
SumUpPixelProduct(n) \
SumUpPixelProduct(n+1) \
SumUpPixelProduct(n+2) \
SumUpPixelProduct(n+3)
#if 0 //simpler but slower version of ApplyFilterOnce (without unrolling)
/**
Convolves filter in smemfilt with part of image loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image in shared memory.
*/
__device__ dfloat ApplyFilterOnce(
dfloat* smemFilt, int filtW, int filtH,
int curPixelX, int curPixelY, int simgPitchX
)
{
dfloat* smemImg = (dfloat*)smem;
dfloat totalSum = 0;
dfloat* pixLine = &smemImg[curPixelY*simgPitchX+curPixelX];
dfloat* filtLine = smemFilt;
int pixLineAdd = simgPitchX - filtW;
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
#pragma unroll 4
for (int x = 0; x < filtW; x++) {
if (*filtLine != 0) {
sum += *pixLine * *filtLine;
}
pixLine++;
filtLine++;
}
pixLine += pixLineAdd;
totalSum += sum;
}
return totalSum;
}
#else //unrolled version of ApplyFilterOnce
/**
Convolves filter in smemfilt with part of image loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image in shared memory.
*/
__device__ dfloat ApplyFilterOnce(
dfloat* smemFilt, int filtW, int filtH,
int curPixelX, int curPixelY, int simgPitchX
)
{
dfloat* smemImg = (dfloat*)smem;
dfloat totalSum = 0;
dfloat* pixLineOrig = &smemImg[curPixelY*simgPitchX+curPixelX];
dfloat* filtLineOrig = smemFilt;
int remainingFiltW = filtW;
int pixLineAdd = simgPitchX;
int procWidth;
if (remainingFiltW >= 12) {
procWidth = 24;
while (remainingFiltW >= procWidth) {
dfloat* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProductX4(8)
SumUpPixelProductX4(12)
SumUpPixelProductX4(16)
SumUpPixelProductX4(20)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
remainingFiltW -= procWidth;
pixLineOrig += procWidth;
filtLineOrig += procWidth;
}
procWidth = 12;
if (remainingFiltW >= procWidth) {
dfloat* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProductX4(8)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
remainingFiltW -= procWidth;
pixLineOrig += procWidth;
filtLineOrig += procWidth;
}
if (remainingFiltW == 0) return totalSum;
}
dfloat* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
if (remainingFiltW < 4) {
if (remainingFiltW == 1)
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProduct(0)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
else if (remainingFiltW == 2)
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProduct(0)
SumUpPixelProduct(1)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
else if (remainingFiltW == 3)
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProduct(0)
SumUpPixelProduct(1)
SumUpPixelProduct(2)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
return totalSum;
}
if (remainingFiltW < 9) {
if (remainingFiltW == 4) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 5) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProduct(4)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 6) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProduct(4)
SumUpPixelProduct(5)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 7) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProduct(4)
SumUpPixelProduct(5)
SumUpPixelProduct(6)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 8) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
}
return totalSum;
}
if (remainingFiltW == 9) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProduct(8)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 10) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProduct(8)
SumUpPixelProduct(9)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
} else if (remainingFiltW == 11) {
for (int filtY = 0; filtY < filtH; filtY++) {
dfloat sum = 0;
SumUpPixelProductX4(0)
SumUpPixelProductX4(4)
SumUpPixelProduct(8)
SumUpPixelProduct(9)
SumUpPixelProduct(10)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum += sum;
}
}
return totalSum;
}
#endif
template <typename OutPixelT, typename InPixelT>
__global__ void SpatiallyInvariantImgConvolutionKernel(
InPixelT* img, int imgW, int imgH,
dfloat* allFilt, int filtN,
int filtW, int filtH,
OutPixelT** result
)
{
const int outW = imgW - filtW + 1;
const int outH = imgH - filtH + 1;
int simgPitchX = blockSizeX + filtW - 1;
int simgPitchY = blockSizeY + filtH - 1;
/*int simgSize=simgPitchX*simgPitchY;
dfloat* smemFiltBeg=(dfloat*)smem + simgSize;
for (int filtI=filtStart; filtI<filtStart+filtN; filtI++) {
dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
for(int i=threadIdx.x; i<filtW*filtH; i+=blockDim.x)
smemFilt[i]=filt[filtI][i];
}*/
int blockNX = CeilDivide(outW, blockSizeX);
int blockNY = CeilDivide(outH, blockSizeY);
int totalBlocks = blockNX * blockNY;
int totalPixelsInBlock = blockSizeX * blockSizeY;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI % blockNX;
int blkY = blkI / blockNX;
int x = blkX * blockSizeX;
int y = blkY * blockSizeY;
__syncthreads();
LoadImageToSmem((dfloat*) smem, img, imgW, imgH, x, y, simgPitchX, simgPitchY);
__syncthreads();
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelX = x + curPixelX;
int outPixelY = y + curPixelY;
if (outPixelX >= outW || outPixelY >= outH) continue;
for (int filtI = 0; filtI < filtN; filtI++) {
dfloat* filtPtr = &allFilt[filtI*filtW*filtH];
//dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
//dfloat sum = ApplyFilterOnce(smemFilt, filtW, filtH, curPixelX, curPixelY, simgPitchX);
dfloat sum = ApplyFilterOnce(filtPtr, filtW, filtH, curPixelX, curPixelY, simgPitchX);
OutPixelT* curResultImg = result[filtI];
curResultImg[outPixelY*outW + outPixelX] = OutPixelT(sum);
}
curPixelX += blockDim.x;
while (curPixelX >= blockSizeX) {
curPixelX -= blockSizeX;
curPixelY++;
}
}
}
}
template <typename OutPixelT, typename InPixelT>
void Call_SpatiallyInvariantImageConvolutionKernel(
InPixelT* inImageGPU, int inImageWidth, int inImageHeight,
KerPixel* allKernelsGPU, int kernelTotalN,
int kernelW, int kernelH,
OutPixelT* outImageGPU[],
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
SpatiallyInvariantImgConvolutionKernel<OutPixelT, InPixelT> <<< grid, block, sharedMemorySize >>>(
inImageGPU, inImageWidth, inImageHeight,
allKernelsGPU, kernelTotalN,
kernelW, kernelH,
outImageGPU
);
}
#define INSTANTIATE_SpatiallyInvariantImageConvolutionKernel(OutPixelT,InPixelT) \
template void Call_SpatiallyInvariantImageConvolutionKernel<OutPixelT,InPixelT>( \
InPixelT* inImageGPU, int inImageWidth, int inImageHeight, \
KerPixel* allKernelsGPU, int kernelTotalN, \
int kernelW, int kernelH, \
OutPixelT* outImageGPU[], \
int blockN, \
int sharedMemorySize \
);
/* =============================================================================
*
* LINEAR COMBINATION KERNEL - IMAGE
*
*/
namespace {
/**
Convolves calculated filter with part of image loadad at start of the shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image in shared memory.
Input image block should be placed at start of shared memory
@arg allFilt - pointer to all filters (placed sequantially)
@arg filtN - number of filters
@arg sfval - values of spatial functions for each filter at current pixel
@arg normval - normalization coefficient
@arg simgPitchX - size of both part of image in shared memory and part of image mask data
@return result of convolving image data
*/
__device__ dfloat ApplyFilterOnceLCImg(
dfloat* allFilt, int filtN, int filtW, int filtH,
double sfVal[],
double normVal,
int curPixelX, int curPixelY, int simgPitchX
)
{
dfloat* smemImg = (dfloat*)smem;
dfloat totalSum = 0;
dfloat* pixLine = &smemImg[curPixelY*simgPitchX+curPixelX];
int pixLineAdd = simgPitchX - filtW;
int kernelSize = filtW * filtH;
int filtRemainder = filtN % 3;
for (int y = 0; y < filtH; y++) {
dfloat sum = 0;
for (int x = 0; x < filtW; x++) {
dfloat* filtLine = allFilt + y * filtW + x;
dfloat filtVal;
filtVal = *filtLine * sfVal[0];
filtLine += kernelSize;
int filtI = 1;
if (filtRemainder == 2) {
filtVal += *filtLine * sfVal[1];
filtLine += kernelSize;
filtI = 2;
} else if (filtRemainder == 0) {
filtVal += *filtLine * sfVal[1];
filtLine += kernelSize;
filtVal += *filtLine * sfVal[2];
filtLine += kernelSize;
filtI = 3;
}
while(filtI < filtN) {
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
}
filtVal *= normVal;
if (filtVal != 0) {
sum += *pixLine * filtVal;
}
pixLine++;
}
pixLine += pixLineAdd;
totalSum += sum;
}
return totalSum;
}
} //local namespace ends
template <typename OutPixelT, typename InPixelT>
__global__ void ConvolutionKernel_LC_Img(
InPixelT* img, int imgW, int imgH,
dfloat* filt, int filtN,
int filtW, int filtH,
double** sfValImg,
double* norm,
OutPixelT* out
)
{
//Asserts that : blockDim.x is divisible by blockSizeX
const int outW = imgW - filtW + 1;
const int outH = imgH - filtH + 1;
int simgPitchX = blockSizeX + filtW - 1;
int simgPitchY = blockSizeY + filtH - 1;
/*int simgSize=simgPitchX*simgPitchY;
dfloat* smemFiltBeg=(dfloat*)smem + simgSize;
for (int filtI=filtStart; filtI<filtStart+filtN; filtI++) {
dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
for(int i=threadIdx.x; i<filtW*filtH; i+=blockDim.x)
smemFilt[i]=filt[filtI][i];
}*/
int blockNX = CeilDivide(outW, blockSizeX);
int blockNY = CeilDivide(outH, blockSizeY);
int totalBlocks = blockNX * blockNY;
int totalPixelsInBlock = blockSizeX * blockSizeY;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI % blockNX;
int blkY = blkI / blockNX;
int x = blkX * blockSizeX;
int y = blkY * blockSizeY;
__syncthreads();
LoadImageToSmem((dfloat*) smem, img, imgW, imgH, x, y, simgPitchX, simgPitchY);
__syncthreads();
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelX = x + curPixelX;
int outPixelY = y + curPixelY;
int outAddr = outPixelY * outW + outPixelX;
if (outPixelX >= outW || outPixelY >= outH) continue;
double sfVal[maxGpuSfCount];
for (int filtI = 0; filtI < filtN; filtI++)
sfVal[filtI] = sfValImg[filtI][outAddr];
double normVal = 1;
if (norm != NULL)
normVal = norm[outAddr];
double sum = ApplyFilterOnceLCImg(filt, filtN, filtW, filtH,
sfVal, normVal, curPixelX, curPixelY, simgPitchX);
out[outAddr] = OutPixelT(sum);
curPixelY += blockDim.x / blockSizeX;
}
}
}
template <typename OutPixelT, typename InPixelT>
void Call_ConvolutionKernel_LC_Img(
InPixelT* inImageGPU, int inImageWidth, int inImageHeight,
KerPixel* kernelGPU, int kernelTotalN,
int kernelW, int kernelH,
double* sfValGPU[],
double* normGPU,
OutPixelT* outImageGPU,
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
ConvolutionKernel_LC_Img <<< grid, block, sharedMemorySize >>>(
inImageGPU, inImageWidth, inImageHeight,
kernelGPU, kernelTotalN,
kernelW, kernelH,
sfValGPU,
normGPU,
outImageGPU
);
}
#define INSTANTIATE_ConvolutionKernel_LC_Img(OutPixelT,InPixelT) \
template void Call_ConvolutionKernel_LC_Img<OutPixelT,InPixelT>( \
InPixelT* inImageGPU, int inImageWidth, int inImageHeight, \
KerPixel* kernelGPU, int kernelTotalN, \
int kernelW, int kernelH, \
double* sfValGPU[], \
double* normGPU, \
OutPixelT* outImageGPU, \
int blockN, \
int sharedMemorySize \
);
/* =============================================================================
*
* LINEAR COMBINATION KERNEL - VARIANCE AND MASK
*
*/
namespace {
/**
Convolves calculated filter with part of image loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image in shared memory.
Input variance should be placed at start of shared memory
@arg smemMsk - pointer to input image mask data
@arg mskSum - output parameter, result of convolving mask data
@arg allFilt - pointer to all filters (placed sequantially)
@arg filtN - number of filters
@arg sfval - values of spatial functions for each filter at current pixel
@arg normval - normalization coefficient
@arg simgPitchX - size of both part of image in shared memory and part of image mask data
@return result of convolving variance data
*/
__device__ dfloat ApplyFilterOnceLCVar(
MskPixel* smemMsk,
MskPixel& mskSum,
dfloat* allFilt, int filtN, int filtW, int filtH,
double sfVal[],
double normVal,
int curPixelX, int curPixelY, int simgPitchX
)
{
dfloat* smemImg = (dfloat*)smem;
dfloat totalSum = 0;
dfloat* pixLine = &smemImg[curPixelY*simgPitchX+curPixelX];
MskPixel* pixMskLine = &smemMsk[curPixelY*simgPitchX+curPixelX];
int pixLineAdd = simgPitchX - filtW;
int kernelSize = filtW * filtH;
mskSum = 0;
int filtRemainder = filtN % 3;
for (int y = 0; y < filtH; y++) {
dfloat sum = 0;
for (int x = 0; x < filtW; x++) {
dfloat* filtLine = allFilt + y * filtW + x;
dfloat filtVal = *filtLine * sfVal[0];
filtLine += kernelSize;
int filtI = 1;
if (filtRemainder == 2) {
filtVal += *filtLine * sfVal[1];
filtLine += kernelSize;
filtI = 2;
} else if (filtRemainder == 0) {
filtVal += *filtLine * sfVal[1];
filtLine += kernelSize;
filtVal += *filtLine * sfVal[2];
filtLine += kernelSize;
filtI = 3;
}
while(filtI < filtN) {
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
filtVal += *filtLine * sfVal[filtI];
filtLine += kernelSize;
filtI++;
}
filtVal *= normVal;
if (filtVal != 0) {
sum += *pixLine * (filtVal * filtVal);
mskSum |= *pixMskLine;
}
pixLine++;
pixMskLine++;
}
pixLine += pixLineAdd;
pixMskLine += pixLineAdd;
totalSum += sum;
}
return totalSum;
}
} //local namespace ends
__global__ void ConvolutionKernel_LC_Var(
VarPixel* img, int imgW, int imgH,
MskPixel* inMsk,
dfloat* filt, int filtN,
int filtW, int filtH,
double** sfValImg,
double* norm,
VarPixel* outVar,
MskPixel* outMsk
)
{
//Asserts that : blockDim.x is divisible by blockSizeX
const int outW = imgW - filtW + 1;
const int outH = imgH - filtH + 1;
int simgPitchX = blockSizeX + filtW - 1;
int simgPitchY = blockSizeY + filtH - 1;
int simgSize = simgPitchX * simgPitchY;
MskPixel* smemMsk = (MskPixel*)((dfloat*)smem + simgSize);
/*dfloat* smemFiltBeg=(dfloat*)smem + simgSize;
for (int filtI=filtStart; filtI<filtStart+filtN; filtI++) {
dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
for(int i=threadIdx.x; i<filtW*filtH; i+=blockDim.x)
smemFilt[i]=filt[filtI][i];
}*/
int blockNX = CeilDivide(outW, blockSizeX);
int blockNY = CeilDivide(outH, blockSizeY);
int totalBlocks = blockNX * blockNY;
int totalPixelsInBlock = blockSizeX * blockSizeY;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI % blockNX;
int blkY = blkI / blockNX;
int x = blkX * blockSizeX;
int y = blkY * blockSizeY;
__syncthreads();
LoadImageToSmem((dfloat*) smem, img, imgW, imgH, x, y, simgPitchX, simgPitchY);
LoadImageToSmem( smemMsk, inMsk, imgW, imgH, x, y, simgPitchX, simgPitchY);
__syncthreads();
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelX = x + curPixelX;
int outPixelY = y + curPixelY;
int outAddr = outPixelY * outW + outPixelX;
if (outPixelX >= outW || outPixelY >= outH) continue;
double sfVal[maxGpuSfCount];
for (int filtI = 0; filtI < filtN; filtI++) {
sfVal[filtI] = sfValImg[filtI][outAddr];
}
double normVal = 1;
if (norm != NULL) normVal = norm[outAddr];
MskPixel mskSum;
dfloat sum = ApplyFilterOnceLCVar(smemMsk, mskSum, filt, filtN, filtW, filtH,
sfVal, normVal, curPixelX, curPixelY, simgPitchX);
outVar[outAddr] = sum;
outMsk[outAddr] = mskSum;
curPixelY += blockDim.x / blockSizeX;
}
}
}
void Call_ConvolutionKernel_LC_Var(
VarPixel* inImageGPU, int inImageWidth, int inImageHeight,
MskPixel* inMskGPU,
KerPixel* kernelGPU, int kernelTotalN,
int kernelW, int kernelH,
double* sfValGPU[],
double* normGPU,
VarPixel* outImageGPU,
MskPixel* outMskGPU,
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
ConvolutionKernel_LC_Var <<< grid, block, sharedMemorySize >>>(
inImageGPU, inImageWidth, inImageHeight,
inMskGPU,
kernelGPU, kernelTotalN,
kernelW, kernelH,
sfValGPU,
normGPU,
outImageGPU,
outMskGPU
);
}
/* =============================================================================
*
* SPATIALLY INVARIANT KERNEL - MASK PLANE
*
*/
namespace {
//#define SumUpPixelProductMask(n) sum |=pixLine[n];
#define SumUpPixelProductMask(n) if (filtLine[n]!=0) sum |=pixLine[n] ;
#define SumUpPixelProductMaskX4(n) \
SumUpPixelProductMask(n) \
SumUpPixelProductMask(n+1) \
SumUpPixelProductMask(n+2) \
SumUpPixelProductMask(n+3)
#if 0 //simpler but slower version of MaskApplyFilterOnce (without unrolling)
/**
Convolves filter in smemfilt with part of image mask loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image mask in shared memory.
*/
__device__ MskPixel MaskApplyFilterOnce(
dfloat* smemFilt, int filtW, int filtH,
int curPixelX, int curPixelY, int simgPitchX
)
{
MskPixel* smemImg = (MskPixel*)smem;
MskPixel totalSum = 0;
MskPixel* pixLine = &smemImg[curPixelY*simgPitchX+curPixelX];
dfloat* filtLine = smemFilt;
int pixLineAdd = simgPitchX - filtW;
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
#pragma unroll 4
for (int x = 0; x < filtW; x++) {
if (*filtLine != 0) {
sum |= *pixLine;
}
pixLine++;
filtLine++;
}
pixLine += pixLineAdd;
totalSum |= sum;
}
return totalSum;
}
#else //unrolled version of MaskApplyFilterOnce
/**
Convolves filter in smemfilt with part of image mask loadad at start of shared memory.
Convolves only one pixel, given by curPixelX and curPixelY, of image mask in shared memory.
*/
__device__ MskPixel MaskApplyFilterOnce(
dfloat* smemFilt, int filtW, int filtH,
int curPixelX, int curPixelY, int simgPitchX
)
{
MskPixel* smemImg = (MskPixel*)smem;
MskPixel totalSum = 0;
MskPixel* pixLineOrig = &smemImg[curPixelY*simgPitchX+curPixelX];
dfloat* filtLineOrig = smemFilt;
int remainingFiltW = filtW;
int pixLineAdd = simgPitchX;
int procWidth;
if (remainingFiltW >= 12) {
procWidth = 24;
while (remainingFiltW >= procWidth) {
MskPixel* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMaskX4(8)
SumUpPixelProductMaskX4(12)
SumUpPixelProductMaskX4(16)
SumUpPixelProductMaskX4(20)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
remainingFiltW -= procWidth;
pixLineOrig += procWidth;
filtLineOrig += procWidth;
}
procWidth = 12;
if (remainingFiltW >= procWidth) {
MskPixel* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMaskX4(8)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
remainingFiltW -= procWidth;
pixLineOrig += procWidth;
filtLineOrig += procWidth;
}
if (remainingFiltW == 0) return totalSum;
}
MskPixel* pixLine = pixLineOrig;
dfloat* filtLine = filtLineOrig;
if (remainingFiltW < 4) {
if (remainingFiltW == 1) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMask(0)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 2) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMask(0)
SumUpPixelProductMask(1)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 3) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMask(0)
SumUpPixelProductMask(1)
SumUpPixelProductMask(2)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
}
return totalSum;
}
if (remainingFiltW < 9) {
if (remainingFiltW == 4) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 5) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMask(4)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 6) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMask(4)
SumUpPixelProductMask(5)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 7) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMask(4)
SumUpPixelProductMask(5)
SumUpPixelProductMask(6)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 8) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
}
return totalSum;
}
if (remainingFiltW == 9) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMask(8)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 10) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMask(8)
SumUpPixelProductMask(9)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
} else if (remainingFiltW == 11) {
for (int filtY = 0; filtY < filtH; filtY++) {
MskPixel sum = 0;
SumUpPixelProductMaskX4(0)
SumUpPixelProductMaskX4(4)
SumUpPixelProductMask(8)
SumUpPixelProductMask(9)
SumUpPixelProductMask(10)
filtLine += filtW;
pixLine += pixLineAdd;
totalSum |= sum;
}
}
return totalSum;
}
#endif
} //local namespace ends
__global__ void SpatiallyInvariantMaskConvolutionKernel(
MskPixel* img, int imgW, int imgH,
dfloat* allFilt, int filtN,
int filtW, int filtH,
MskPixel** result
)
{
const int outW = imgW - filtW + 1;
const int outH = imgH - filtH + 1;
int simgPitchX = blockSizeX + filtW - 1;
int simgPitchY = blockSizeY + filtH - 1;
/*int simgSize=simgPitchX*simgPitchY;
dfloat* smemFiltBeg=(dfloat*)smem + simgSize;
for (int filtI=filtStart; filtI<filtStart+filtN; filtI++) {
dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
for(int i=threadIdx.x; i<filtW*filtH; i+=blockDim.x)
smemFilt[i]=filt[filtI][i];
}*/
int blockNX = CeilDivide(outW, blockSizeX);
int blockNY = CeilDivide(outH, blockSizeY);
int totalBlocks = blockNX * blockNY;
int totalPixelsInBlock = blockSizeX * blockSizeY;
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
int blkX = blkI % blockNX;
int blkY = blkI / blockNX;
int x = blkX * blockSizeX;
int y = blkY * blockSizeY;
__syncthreads();
LoadImageToSmem((MskPixel*) smem, img, imgW, imgH, x, y, simgPitchX, simgPitchY);
__syncthreads();
int curPixelX = threadIdx.x % blockSizeX;
int curPixelY = threadIdx.x / blockSizeX;
for(int curPixel = threadIdx.x; curPixel < totalPixelsInBlock; curPixel += blockDim.x)
{
int outPixelX = x + curPixelX;
int outPixelY = y + curPixelY;
if (outPixelX >= outW || outPixelY >= outH) continue;
for (int filtI = 0; filtI < filtN; filtI++) {
//dfloat* smemFilt=smemFiltBeg + (filtW*filtH)*(filtI-filtStart);
dfloat* filtPtr = &allFilt[filtI*filtW*filtH];
MskPixel sum = MaskApplyFilterOnce(filtPtr, filtW, filtH, curPixelX, curPixelY, simgPitchX);
MskPixel* curResultImg = result[filtI];
curResultImg[outPixelY*outW + outPixelX] = sum;
}
curPixelX += blockDim.x;
while (curPixelX >= blockSizeX) {
curPixelX -= blockSizeX;
curPixelY++;
}
}
}
}
void Call_SpatiallyInvariantMaskConvolutionKernel(
MskPixel* inImageGPU, int inImageWidth, int inImageHeight,
KerPixel* allKernelsGPU, int kernelTotalN,
int kernelW, int kernelH,
MskPixel* outImageGPU[],
int blockN,
int sharedMemorySize
)
{
dim3 block(256);
dim3 grid(blockN);
SpatiallyInvariantMaskConvolutionKernel <<< grid, block, sharedMemorySize >>>(
inImageGPU, inImageWidth, inImageHeight,
allKernelsGPU, kernelTotalN,
kernelW, kernelH,
outImageGPU
);
}
#define INSTANTIATE(OutPixelT,InPixelT) \
INSTANTIATE_SpatiallyInvariantImageConvolutionKernel(OutPixelT,InPixelT) \
INSTANTIATE_ConvolutionKernel_LC_Img(OutPixelT,InPixelT)
/*
* Explicit instantiation
*/
/// \cond
INSTANTIATE(double, double)
INSTANTIATE(double, float)
INSTANTIATE(double, int)
INSTANTIATE(double, boost::uint16_t)
INSTANTIATE(float, float)
INSTANTIATE(float, int)
INSTANTIATE(float, boost::uint16_t)
INSTANTIATE(int, int)
INSTANTIATE(boost::uint16_t, boost::uint16_t)
/// \endcond
// ================== GPU kernel for testing ======================
template <typename T>
__global__ void Test(T* ret)
{
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if (threadId == 0) ret[0] = 5;
if (threadId == 1) ret[1] = 8;
}
template <typename T>
void CallTestGpuKernel(T* ret)
{
dim3 block(192);
dim3 grid(60);
Test <<< grid, block>>>(ret);
}
template void CallTestGpuKernel<int>(int*);
}
}
}
}
} //namespace lsst::afw::math::detail::gpu ends
|
5abd5bbd9dded5fe5ebede4ab3358c4e8f7c9e0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cucheb.h>
/* driver */
int main(){
// input file
string mtxfile("../matrices/Stranke94.mtx");
// cuchebstats
cuchebstats ccstats;
// cuchebmatrix
cuchebmatrix ccm;
cuchebmatrix_init(mtxfile, &ccm);
// cucheblanczos
cucheblanczos ccl;
cucheblanczos_init(2, 5, &ccm, &ccl);
// print CCB
cucheblanczos_print(&ccl);
// set starting vector
cucheblanczos_startvecs(&ccl);
// do arnoldi run
cucheblanczos_arnoldi(5,&ccm,&ccl,&ccstats);
// print arnoldi vectors
double val;
int nvecs;
nvecs = (ccl.bsize)*(ccl.nblocks);
for(int jj=0; jj < nvecs+ccl.bsize; jj++){
for(int ii=0; ii < ccl.n; ii++){
hipMemcpy(&val,&(ccl.dvecs)[jj*ccl.n + ii],sizeof(double),hipMemcpyDeviceToHost);
printf(" dvecs[%d] = %+e\n", jj*ccl.n+ii, val);
}
printf("\n");
}
printf("\n");
// compute ritz values
cucheblanczos_ritz(&ccm,&ccl);
// print bands
for(int ii=0; ii < nvecs; ii++){
for(int jj=0; jj < nvecs+ccl.bsize; jj++){
printf(" schurvecs[%d] = %+e\n", ii*(nvecs+ccl.bsize)+jj, ccl.schurvecs[ii*(nvecs+ccl.bsize)+jj]);
}
printf("\n");
for(int jj=0; jj < ccl.bsize+1; jj++){
printf(" bands[%d] = %+e\n", ii*(ccl.bsize+1)+jj,
ccl.bands[ii*(ccl.bsize+1)+jj]);
}
printf("\n");
}
printf("\n");
// print evals
for(int ii=0; ii < nvecs; ii++){
printf(" evals[%d] = %+e\n", ii, ccl.evals[ii]);
}
printf("\n");
// print ritz vectors
for(int jj=0; jj < nvecs; jj++){
for(int ii=0; ii < ccl.n; ii++){
hipMemcpy(&val,&(ccl.dvecs)[jj*ccl.n + ii],sizeof(double),hipMemcpyDeviceToHost);
printf(" dvecs[%d] = %+e\n", jj*ccl.n+ii, val);
}
printf("\n");
}
printf("\n");
// compute rayleigh quotients and residuals
cucheblanczos_rayleigh(&ccm,&ccl);
// print ritz vectors
for(int jj=0; jj < nvecs; jj++){
printf(" evals[%d] = %+e, res[%d] = %+e\n", jj, ccl.evals[jj],
jj, ccl.res[jj]);
}
printf("\n");
// destroy CCM
cuchebmatrix_destroy(&ccm);
// destroy CCL
cucheblanczos_destroy(&ccl);
// return
return 0;
}
|
5abd5bbd9dded5fe5ebede4ab3358c4e8f7c9e0a.cu
|
#include <cucheb.h>
/* driver */
int main(){
// input file
string mtxfile("../matrices/Stranke94.mtx");
// cuchebstats
cuchebstats ccstats;
// cuchebmatrix
cuchebmatrix ccm;
cuchebmatrix_init(mtxfile, &ccm);
// cucheblanczos
cucheblanczos ccl;
cucheblanczos_init(2, 5, &ccm, &ccl);
// print CCB
cucheblanczos_print(&ccl);
// set starting vector
cucheblanczos_startvecs(&ccl);
// do arnoldi run
cucheblanczos_arnoldi(5,&ccm,&ccl,&ccstats);
// print arnoldi vectors
double val;
int nvecs;
nvecs = (ccl.bsize)*(ccl.nblocks);
for(int jj=0; jj < nvecs+ccl.bsize; jj++){
for(int ii=0; ii < ccl.n; ii++){
cudaMemcpy(&val,&(ccl.dvecs)[jj*ccl.n + ii],sizeof(double),cudaMemcpyDeviceToHost);
printf(" dvecs[%d] = %+e\n", jj*ccl.n+ii, val);
}
printf("\n");
}
printf("\n");
// compute ritz values
cucheblanczos_ritz(&ccm,&ccl);
// print bands
for(int ii=0; ii < nvecs; ii++){
for(int jj=0; jj < nvecs+ccl.bsize; jj++){
printf(" schurvecs[%d] = %+e\n", ii*(nvecs+ccl.bsize)+jj, ccl.schurvecs[ii*(nvecs+ccl.bsize)+jj]);
}
printf("\n");
for(int jj=0; jj < ccl.bsize+1; jj++){
printf(" bands[%d] = %+e\n", ii*(ccl.bsize+1)+jj,
ccl.bands[ii*(ccl.bsize+1)+jj]);
}
printf("\n");
}
printf("\n");
// print evals
for(int ii=0; ii < nvecs; ii++){
printf(" evals[%d] = %+e\n", ii, ccl.evals[ii]);
}
printf("\n");
// print ritz vectors
for(int jj=0; jj < nvecs; jj++){
for(int ii=0; ii < ccl.n; ii++){
cudaMemcpy(&val,&(ccl.dvecs)[jj*ccl.n + ii],sizeof(double),cudaMemcpyDeviceToHost);
printf(" dvecs[%d] = %+e\n", jj*ccl.n+ii, val);
}
printf("\n");
}
printf("\n");
// compute rayleigh quotients and residuals
cucheblanczos_rayleigh(&ccm,&ccl);
// print ritz vectors
for(int jj=0; jj < nvecs; jj++){
printf(" evals[%d] = %+e, res[%d] = %+e\n", jj, ccl.evals[jj],
jj, ccl.res[jj]);
}
printf("\n");
// destroy CCM
cuchebmatrix_destroy(&ccm);
// destroy CCL
cucheblanczos_destroy(&ccl);
// return
return 0;
}
|
e2aebee95b59e29a00b41e30001740ecad193959.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Maroun Tork, Lina Maudlej and Mark Silberstein
* All rights reserved.
* If used, please cite: PAPER NAME, AUTHORS, CONFERENCE WHERE PUBLISHED
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bf_host.cu.hpp"
void hostContext::teardown_connection(ib_resources_t* ib_resources) {
ibv_destroy_qp(ib_resources->qp);
ibv_destroy_cq(ib_resources->recv_cq);
ibv_destroy_cq(ib_resources->send_cq);
ibv_dereg_mr(ib_resources->lmr_recv);
ibv_dereg_mr(ib_resources->lmr_send);
free(ib_resources->lrecv_buf);
free(ib_resources->lsend_buf);
ibv_dealloc_pd(ib_resources->pd);
ibv_close_device(ib_resources->context);
free(ib_resources);
}
ib_resources_t* hostContext::setup_notify_connection(const string& interface, int sfd) {
ib_resources_t *ib_resources = (struct ib_resources_t *)malloc(sizeof(struct ib_resources_t));
struct ibv_device **device_list = ibv_get_device_list(NULL);
if (!device_list) {
std::cerr << "ibv_get_device_list failed" << std::endl;
exit(1);
}
string device_name = ib_device_from_netdev(interface.c_str());
struct ibv_context *context = ibv_open_device_by_name(device_name);
struct ibv_pd *pd = ibv_alloc_pd(context);
if (!pd) {
std::cerr << "ibv_alloc_pd() failed" << std::endl;
exit(1);
}
struct ibv_mr *mr_recv;
char* recv_buf;
CUDA_CHECK(hipMalloc(&recv_buf, _workers_num * sizeof(unsigned int)));
unsigned int recv_arr_size = _workers_num;
unsigned int recv_init[recv_arr_size];
for(int i = 0 ; i < recv_arr_size ; i++) {
recv_init[i] = HOST_MAX_RECV_WQES - _workers_num + i;
}
CUDA_CHECK(hipMemcpy(recv_buf, recv_init, _workers_num * sizeof(unsigned int), hipMemcpyHostToDevice));
mr_recv = ibv_reg_mr(pd, recv_buf, _workers_num * sizeof(unsigned int), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
if (!mr_recv) {
std::cerr << "ibv_reg_mr() failed for data_for_host" << std::endl;
exit(1);
}
struct ibv_mr *mr_send;
char *send_buf;
CUDA_CHECK(hipMalloc(&send_buf,2 * _workers_num * sizeof(unsigned int)));
unsigned int send_arr_size = 2 * _workers_num;
unsigned int send_init[send_arr_size];
for(int i = 0 ; i < _workers_num ; i++) {
send_init[i] = HOST_MAX_SEND_WQES - _workers_num + i;
}
for(int i = 0 ; i < _workers_num ; i++) {
send_init[_workers_num + i] = HOST_MAX_SEND_WQES - 2 * _workers_num + i; //will be inc. when calling grecv
}
/* for(int i = 0 ; i < send_arr_size ; i++) {
if( i < send_arr_size/2 ) { // PI part
send_init[i] = HOST_MAX_SEND_WQES - 1;//0;
} else { // CI part
send_init[i] = HOST_MAX_SEND_WQES - 2; // will be inc. when calling grecv
}
}*/
CUDA_CHECK(hipMemcpy(send_buf, send_init, 2 * _workers_num * sizeof(unsigned int), hipMemcpyHostToDevice));
mr_send = ibv_reg_mr(pd, send_buf, 2 * _workers_num * sizeof(unsigned int), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ);
if (!mr_send) {
std::cerr << "ibv_reg_mr() failed for data_from_host" << std::endl;
exit(1);
}
struct ibv_cq *recv_cq = ibv_create_cq(context, HOST_RECV_CQ_SIZE, NULL, NULL, 0);
if (!recv_cq) {
std::cerr << "ibv_create_cq() failed" << std::endl;
exit(1);
}
struct ibv_cq *send_cq = ibv_create_cq(context, HOST_SEND_CQ_SIZE, NULL, NULL, 0);
if (!send_cq) {
std::cerr << "ibv_create_cq() failed" << std::endl;
exit(1);
}
struct ibv_qp_init_attr qp_init_attr;
memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr));
qp_init_attr.send_cq = send_cq;
qp_init_attr.recv_cq = recv_cq;
qp_init_attr.qp_type = IBV_QPT_RC;
qp_init_attr.cap.max_send_wr = 0;
qp_init_attr.cap.max_recv_wr = HOST_MAX_RECV_WQES;
qp_init_attr.cap.max_send_sge = 0;
qp_init_attr.cap.max_recv_sge = 1;
// qp_init_attr.cap.max_inline_data = 512;
struct ibv_qp *qp = ibv_create_qp(pd, &qp_init_attr);
if (!qp) {
std::cerr << "ibv_create_qp() failed errno= " << errno << std::endl;
exit(1);
}
int ret;
struct ibv_port_attr port_attr;
ret = ibv_query_port(context, PORT_NUM, &port_attr);
if (ret) {
std::cerr << "ibv_query_port() failed ret= " << ret << std::endl;
exit(1);
}
struct ib_info_t my_info;
my_info.lid = port_attr.lid;
my_info.qpn = qp->qp_num;
my_info.mkey_data_buffer = mr_recv->rkey;
my_info.addr_data_buffer = (uintptr_t)mr_recv->addr;
my_info.mkey_response_buffer = mr_send->rkey;
my_info.addr_response_buffer = (uintptr_t)mr_send->addr;
int gid_index = get_gid_index(context);
if (ibv_query_gid(context, 1, gid_index, &(my_info.gid) )) {
std::cerr << "ibv_query_gid failed for gid " << gid_index << std::endl;
exit(1);
}
ret = send(sfd, &my_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "send" << std::endl;
exit(1);
}
struct ib_info_t client_info;
recv(sfd, &client_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "recv" << std::endl;
exit(1);
}
ib_resources->rmr_recv_key = client_info.mkey_data_buffer;
ib_resources->rmr_recv_addr = client_info.addr_data_buffer;
ib_resources->rmr_send_key = client_info.mkey_response_buffer;
ib_resources->rmr_send_addr = client_info.addr_response_buffer;
struct ibv_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_INIT;
qp_attr.pkey_index = 0;
qp_attr.port_num = PORT_NUM;
qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS);
if (ret) {
std::cerr << "ibv_modify_qp() to INIT failed" << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTR;
qp_attr.path_mtu = IBV_MTU_4096;
qp_attr.dest_qp_num = client_info.qpn;
qp_attr.rq_psn = 0 ;
qp_attr.max_dest_rd_atomic = 1;
qp_attr.min_rnr_timer = 12;
qp_attr.ah_attr.is_global = 1;
qp_attr.ah_attr.grh.dgid = client_info.gid;
qp_attr.ah_attr.grh.sgid_index = get_gid_index(context);
qp_attr.ah_attr.grh.flow_label = 0;
qp_attr.ah_attr.grh.hop_limit = 1;
qp_attr.ah_attr.grh.traffic_class = 0;
qp_attr.ah_attr.dlid = client_info.lid;
qp_attr.ah_attr.sl = 0;
qp_attr.ah_attr.src_path_bits = 0;
qp_attr.ah_attr.port_num = PORT_NUM;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER);
if (ret) {
std::cerr << "ibv_modify_qp() to RTR failed ret= " << ret<< std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTS;
qp_attr.sq_psn = 0;
qp_attr.timeout = 14;
qp_attr.retry_cnt = 7;
qp_attr.rnr_retry = 7;
qp_attr.max_rd_atomic = 1;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC);
if (ret) {
std::cerr << "ibv_modify_qp() to RTS failed" << std::endl;
exit(1);
}
ib_resources->context = context;
ib_resources->pd = pd;
ib_resources->qp = qp;
ib_resources->recv_cq = recv_cq;
ib_resources->send_cq = send_cq;
ib_resources->lrecv_buf = recv_buf;
ib_resources->lmr_recv = mr_recv;
ib_resources->lsend_buf = send_buf;
ib_resources->lmr_send = mr_send;
return ib_resources;
}
ib_resources_t* hostContext::setup_recv_data_connection(const string& interface, int sfd) {
ib_resources_t *ib_resources = (ib_resources_t *)malloc(sizeof(struct ib_resources_t));
ibv_device **device_list = ibv_get_device_list(NULL);
if (!device_list) {
std::cerr << "ERROR: ibv_get_device_list failed" << std::endl;
exit(1);
}
string device_name = ib_device_from_netdev(interface.c_str());
struct ibv_context *context = ibv_open_device_by_name(device_name);
struct ibv_pd *pd = ibv_alloc_pd(context);
if (!pd) {
std::cerr << "ibv_alloc_pd() failed" << std::endl;
exit(1);
}
struct ibv_mr *mr_recv;
char *recv_buf;
CUDA_CHECK(hipMalloc(&recv_buf,HOST_TOTAL_DATA_FROM_CLIENT_SIZE));
CUDA_CHECK(hipMemset(recv_buf, 0, HOST_TOTAL_DATA_FROM_CLIENT_SIZE));
// printf("ib_resources Data: recv_buf=%p size=%d\n",recv_buf,HOST_TOTAL_DATA_FROM_CLIENT_SIZE);
mr_recv = ibv_reg_mr(pd, recv_buf, HOST_TOTAL_DATA_FROM_CLIENT_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
if (!mr_recv) {
std::cerr << "ibv_reg_mr() failed for data_for_host" << std::endl;
exit(1);
}
struct ibv_cq *recv_cq = ibv_create_cq(context, HOST_RECV_CQ_SIZE, NULL, NULL, 0);
if (!recv_cq) {
printf("ERROR: ibv_create_cq() failed\n");
exit(1);
}
struct ibv_qp_init_attr qp_init_attr;
memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr));
qp_init_attr.send_cq = recv_cq;
qp_init_attr.recv_cq = recv_cq;
qp_init_attr.qp_type = IBV_QPT_RC;
qp_init_attr.cap.max_send_wr = 0;
qp_init_attr.cap.max_recv_wr = HOST_MAX_RECV_WQES;
qp_init_attr.cap.max_send_sge = 0;
qp_init_attr.cap.max_recv_sge = 1;
struct ibv_qp *qp = ibv_create_qp(pd, &qp_init_attr);
if (!qp) {
std::cerr << "ibv_create_qp() failed errno= " << errno << std::endl;
exit(1);
}
struct ibv_port_attr port_attr;
int ret = ibv_query_port(context, PORT_NUM, &port_attr);
if (ret) {
std::cerr << "ibv_query_port() failed ret=" << ret << std::endl;
exit(1);
}
struct ib_info_t my_info;
my_info.lid = port_attr.lid;
my_info.qpn = qp->qp_num;
my_info.mkey_data_buffer = mr_recv->rkey;
my_info.addr_data_buffer = (uintptr_t)mr_recv->addr;
int gid_index = get_gid_index(context);
if (ibv_query_gid(context, 1, gid_index, &(my_info.gid) )) {
std::cerr << "ibv_query_gid failed for gid " << gid_index << std::endl;
exit(1);
}
ret = send(sfd, &my_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "send" << std::endl;
exit(1);
}
struct ib_info_t client_info;
recv(sfd, &client_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "recv" << std::endl;
exit(1);
}
struct ibv_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_INIT;
qp_attr.pkey_index = 0;
qp_attr.port_num = PORT_NUM;
qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS);
if (ret) {
std::cerr << "ibv_modify_qp() to INIT failed" << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTR;
qp_attr.path_mtu = IBV_MTU_4096;
qp_attr.dest_qp_num = client_info.qpn;
qp_attr.rq_psn = 0 ;
qp_attr.max_dest_rd_atomic = 1;
qp_attr.min_rnr_timer = 12;
qp_attr.ah_attr.is_global = 1;
qp_attr.ah_attr.grh.dgid = client_info.gid;
qp_attr.ah_attr.grh.sgid_index = get_gid_index(context);
qp_attr.ah_attr.grh.flow_label = 0;
qp_attr.ah_attr.grh.hop_limit = 1;
qp_attr.ah_attr.grh.traffic_class = 0;
qp_attr.ah_attr.dlid = client_info.lid;
qp_attr.ah_attr.sl = 0;
qp_attr.ah_attr.src_path_bits = 0;
qp_attr.ah_attr.port_num = PORT_NUM;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER);
if (ret) {
std::cerr << "ibv_modify_qp() to RTR failed ret= " << ret << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTS;
qp_attr.sq_psn = 0;
qp_attr.timeout = 14;
qp_attr.retry_cnt = 7;
qp_attr.rnr_retry = 7;
qp_attr.max_rd_atomic = 1;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC);
if (ret) {
std::cerr << "ibv_modify_qp() to RTS failed" << std::endl;
exit(1);
}
ib_resources->context = context;
ib_resources->pd = pd;
ib_resources->qp = qp;
ib_resources->recv_cq = recv_cq;
ib_resources->lrecv_buf = recv_buf;
ib_resources->lmr_recv = mr_recv;
return ib_resources;
}
ib_resources_t* hostContext::setup_send_data_connection(const string& interface, int sfd) {
ib_resources_t *ib_resources = (ib_resources_t *)malloc(sizeof(struct ib_resources_t));
ibv_device **device_list = ibv_get_device_list(NULL);
if (!device_list) {
std::cerr << "ERROR: ibv_get_device_list failed" << std::endl;
exit(1);
}
string device_name = ib_device_from_netdev(interface.c_str());
struct ibv_context *context = ibv_open_device_by_name(device_name);
struct ibv_pd *pd = ibv_alloc_pd(context);
if (!pd) {
std::cerr << "ibv_alloc_pd() failed" << std::endl;
exit(1);
}
struct ibv_mr *mr_send;
char *send_buf;
CUDA_CHECK(hipMalloc(&send_buf, HOST_TOTAL_DATA_TO_CLIENT_SIZE));
mr_send = ibv_reg_mr(pd, send_buf, HOST_TOTAL_DATA_TO_CLIENT_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ);
if (!mr_send) {
std::cerr << "ibv_reg_mr() failed for data_from_host" << std::endl;
exit(1);
}
struct ibv_cq *recv_cq = ibv_create_cq(context, HOST_RECV_CQ_SIZE, NULL, NULL, 0);
if (!recv_cq) {
printf("ERROR: ibv_create_cq() failed\n");
exit(1);
}
struct ibv_qp_init_attr qp_init_attr;
memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr));
qp_init_attr.send_cq = recv_cq;
qp_init_attr.recv_cq = recv_cq;
qp_init_attr.qp_type = IBV_QPT_RC;
qp_init_attr.cap.max_send_wr = 0;
qp_init_attr.cap.max_recv_wr = HOST_MAX_RECV_WQES;
qp_init_attr.cap.max_send_sge = 0;
qp_init_attr.cap.max_recv_sge = 1;
// qp_init_attr.cap.max_inline_data = 0;
struct ibv_qp *qp = ibv_create_qp(pd, &qp_init_attr);
if (!qp) {
std::cerr << "ibv_create_qp() failed errno= " << errno << std::endl;
exit(1);
}
int ret;
struct ibv_port_attr port_attr;
ret = ibv_query_port(context, PORT_NUM, &port_attr);
if (ret) {
std::cerr << "ibv_query_port() failed ret=" << ret << std::endl;
exit(1);
}
struct ib_info_t my_info;
my_info.lid = port_attr.lid;
my_info.qpn = qp->qp_num;
my_info.mkey_response_buffer = mr_send->rkey;
my_info.addr_response_buffer = (uintptr_t)mr_send->addr;
int gid_index = get_gid_index(context);
if (ibv_query_gid(context, 1, gid_index, &(my_info.gid) )) {
std::cerr << "ibv_query_gid failed for gid " << gid_index << std::endl;
exit(1);
}
ret = send(sfd, &my_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "send" << std::endl;
exit(1);
}
struct ib_info_t client_info;
recv(sfd, &client_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "recv" << std::endl;
exit(1);
}
struct ibv_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_INIT;
qp_attr.pkey_index = 0;
qp_attr.port_num = PORT_NUM;
qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS);
if (ret) {
std::cerr << "ibv_modify_qp() to INIT failed" << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTR;
qp_attr.path_mtu = IBV_MTU_4096;
qp_attr.dest_qp_num = client_info.qpn;
qp_attr.rq_psn = 0 ;
qp_attr.max_dest_rd_atomic = 1;
qp_attr.min_rnr_timer = 12;
qp_attr.ah_attr.is_global = 1;
qp_attr.ah_attr.grh.dgid = client_info.gid;
qp_attr.ah_attr.grh.sgid_index = get_gid_index(context);
qp_attr.ah_attr.grh.flow_label = 0;
qp_attr.ah_attr.grh.hop_limit = 1;
qp_attr.ah_attr.grh.traffic_class = 0;
qp_attr.ah_attr.dlid = client_info.lid;
qp_attr.ah_attr.sl = 0;
qp_attr.ah_attr.src_path_bits = 0;
qp_attr.ah_attr.port_num = PORT_NUM;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER);
if (ret) {
std::cerr << "ibv_modify_qp() to RTR failed ret= " << ret << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTS;
qp_attr.sq_psn = 0;
qp_attr.timeout = 14;
qp_attr.retry_cnt = 7;
qp_attr.rnr_retry = 7;
qp_attr.max_rd_atomic = 1;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC);
if (ret) {
std::cerr << "ibv_modify_qp() to RTS failed" << std::endl;
exit(1);
}
ib_resources->context = context;
ib_resources->pd = pd;
ib_resources->qp = qp;
ib_resources->recv_cq = recv_cq;
ib_resources->send_cq = recv_cq;
ib_resources->lsend_buf = send_buf;
ib_resources->lmr_send = mr_send;
return ib_resources;
}
hostContext::hostContext(const string& interface, unsigned int workers_num, unsigned int tcp_port) : _workers_num(workers_num) {
int lfd, sfd;
int server_tcp_port = tcp_port;
lfd = socket(AF_INET, SOCK_STREAM, 0);
if (lfd < 0) {
std::cerr << "socket" << std::endl;
exit(1);
}
struct sockaddr_in server_addr;
memset(&server_addr, 0, sizeof(struct sockaddr_in));
server_addr.sin_family = AF_INET;
server_addr.sin_addr.s_addr = INADDR_ANY;
server_addr.sin_port = htons(server_tcp_port);
if (bind(lfd, (struct sockaddr *)&server_addr, sizeof(struct sockaddr_in)) < 0) {
std::cerr << "bind lfd" << std::endl;
exit(1);
}
listen(lfd, 1);
std::cout << "Host is waiting on port " << server_tcp_port << " to establish RX Queue. BlueField can connect." << std::endl;
sfd = accept(lfd, NULL, NULL);
if (sfd < 0) {
std::cerr << "accept sfd1" << std::endl;
exit(1);
}
std::cout << "BlueField is connected" << std::endl;
std::cout << "create RX Queue " << std::endl;
recv_data_ib_resources = setup_recv_data_connection(interface,sfd);
close(sfd);
std::cout << "Host is waiting on port " << server_tcp_port << " to establish TX Queue. BlueField can connect." << std::endl;
sfd = accept(lfd, NULL, NULL);
if(sfd < 0) {
std::cerr << "accept sfd" << std::endl;
exit(1);
}
std::cout << "create TX Queue " << std::endl;
send_data_ib_resources = setup_send_data_connection(interface,sfd);
std::cout << "create Side Channel Notification " << std::endl;
notify_ib_resources = setup_notify_connection(interface,sfd);
close(sfd);
close(lfd);
_d_req_base_addresses = NULL;
_d_resp_base_addresses = NULL;
}
hostContext::~hostContext() {
std::cout << "kill hostcontext" << std::endl;
teardown_connection(notify_ib_resources);
teardown_connection(recv_data_ib_resources);
teardown_connection(send_data_ib_resources);
free(recv_data_ib_resources);
free(send_data_ib_resources);
if(_d_req_base_addresses != NULL){
CUDA_CHECK(hipFree(_d_req_base_addresses));
}
if(_d_resp_base_addresses != NULL){
CUDA_CHECK(hipFree(_d_resp_base_addresses));
}
}
void* hostContext::getRequestBaseAddress() {
return recv_data_ib_resources->lrecv_buf;
}
void* hostContext::getResponseBaseAddress() {
return send_data_ib_resources->lsend_buf;
}
unsigned int* hostContext::getRequestCIBaseAddress() {
return (unsigned int*) (notify_ib_resources->lsend_buf) + _workers_num;
}
unsigned int* hostContext::getResponsePIBaseAddress() {
return (unsigned int*) notify_ib_resources->lsend_buf;
}
unsigned int* hostContext::getResponseCIBaseAddress() {
return (unsigned int*) (notify_ib_resources->lrecv_buf);
}
void** hostContext::getDeviceReqBaseAddresses() {
void *req_base_addresses = getRequestBaseAddress();
CUDA_CHECK(hipMalloc(&_d_req_base_addresses, sizeof(void*)));
CUDA_CHECK(hipMemcpy(_d_req_base_addresses,&req_base_addresses, sizeof(void*), hipMemcpyHostToDevice));
return _d_req_base_addresses;
}
void** hostContext::getDeviceRespBaseAddresses() {
void *resp_base_addresses = getResponseBaseAddress();
CUDA_CHECK(hipMalloc(&_d_resp_base_addresses, sizeof(void*)));
CUDA_CHECK(hipMemcpy(_d_resp_base_addresses, &resp_base_addresses, sizeof(void*), hipMemcpyHostToDevice));
return _d_resp_base_addresses;
}
void hostContext::waitDevice(){
CUDA_CHECK(hipDeviceSynchronize());
}
|
e2aebee95b59e29a00b41e30001740ecad193959.cu
|
/*
* Copyright (c) Maroun Tork, Lina Maudlej and Mark Silberstein
* All rights reserved.
* If used, please cite: PAPER NAME, AUTHORS, CONFERENCE WHERE PUBLISHED
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bf_host.cu.hpp"
void hostContext::teardown_connection(ib_resources_t* ib_resources) {
ibv_destroy_qp(ib_resources->qp);
ibv_destroy_cq(ib_resources->recv_cq);
ibv_destroy_cq(ib_resources->send_cq);
ibv_dereg_mr(ib_resources->lmr_recv);
ibv_dereg_mr(ib_resources->lmr_send);
free(ib_resources->lrecv_buf);
free(ib_resources->lsend_buf);
ibv_dealloc_pd(ib_resources->pd);
ibv_close_device(ib_resources->context);
free(ib_resources);
}
ib_resources_t* hostContext::setup_notify_connection(const string& interface, int sfd) {
ib_resources_t *ib_resources = (struct ib_resources_t *)malloc(sizeof(struct ib_resources_t));
struct ibv_device **device_list = ibv_get_device_list(NULL);
if (!device_list) {
std::cerr << "ibv_get_device_list failed" << std::endl;
exit(1);
}
string device_name = ib_device_from_netdev(interface.c_str());
struct ibv_context *context = ibv_open_device_by_name(device_name);
struct ibv_pd *pd = ibv_alloc_pd(context);
if (!pd) {
std::cerr << "ibv_alloc_pd() failed" << std::endl;
exit(1);
}
struct ibv_mr *mr_recv;
char* recv_buf;
CUDA_CHECK(cudaMalloc(&recv_buf, _workers_num * sizeof(unsigned int)));
unsigned int recv_arr_size = _workers_num;
unsigned int recv_init[recv_arr_size];
for(int i = 0 ; i < recv_arr_size ; i++) {
recv_init[i] = HOST_MAX_RECV_WQES - _workers_num + i;
}
CUDA_CHECK(cudaMemcpy(recv_buf, recv_init, _workers_num * sizeof(unsigned int), cudaMemcpyHostToDevice));
mr_recv = ibv_reg_mr(pd, recv_buf, _workers_num * sizeof(unsigned int), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
if (!mr_recv) {
std::cerr << "ibv_reg_mr() failed for data_for_host" << std::endl;
exit(1);
}
struct ibv_mr *mr_send;
char *send_buf;
CUDA_CHECK(cudaMalloc(&send_buf,2 * _workers_num * sizeof(unsigned int)));
unsigned int send_arr_size = 2 * _workers_num;
unsigned int send_init[send_arr_size];
for(int i = 0 ; i < _workers_num ; i++) {
send_init[i] = HOST_MAX_SEND_WQES - _workers_num + i;
}
for(int i = 0 ; i < _workers_num ; i++) {
send_init[_workers_num + i] = HOST_MAX_SEND_WQES - 2 * _workers_num + i; //will be inc. when calling grecv
}
/* for(int i = 0 ; i < send_arr_size ; i++) {
if( i < send_arr_size/2 ) { // PI part
send_init[i] = HOST_MAX_SEND_WQES - 1;//0;
} else { // CI part
send_init[i] = HOST_MAX_SEND_WQES - 2; // will be inc. when calling grecv
}
}*/
CUDA_CHECK(cudaMemcpy(send_buf, send_init, 2 * _workers_num * sizeof(unsigned int), cudaMemcpyHostToDevice));
mr_send = ibv_reg_mr(pd, send_buf, 2 * _workers_num * sizeof(unsigned int), IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ);
if (!mr_send) {
std::cerr << "ibv_reg_mr() failed for data_from_host" << std::endl;
exit(1);
}
struct ibv_cq *recv_cq = ibv_create_cq(context, HOST_RECV_CQ_SIZE, NULL, NULL, 0);
if (!recv_cq) {
std::cerr << "ibv_create_cq() failed" << std::endl;
exit(1);
}
struct ibv_cq *send_cq = ibv_create_cq(context, HOST_SEND_CQ_SIZE, NULL, NULL, 0);
if (!send_cq) {
std::cerr << "ibv_create_cq() failed" << std::endl;
exit(1);
}
struct ibv_qp_init_attr qp_init_attr;
memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr));
qp_init_attr.send_cq = send_cq;
qp_init_attr.recv_cq = recv_cq;
qp_init_attr.qp_type = IBV_QPT_RC;
qp_init_attr.cap.max_send_wr = 0;
qp_init_attr.cap.max_recv_wr = HOST_MAX_RECV_WQES;
qp_init_attr.cap.max_send_sge = 0;
qp_init_attr.cap.max_recv_sge = 1;
// qp_init_attr.cap.max_inline_data = 512;
struct ibv_qp *qp = ibv_create_qp(pd, &qp_init_attr);
if (!qp) {
std::cerr << "ibv_create_qp() failed errno= " << errno << std::endl;
exit(1);
}
int ret;
struct ibv_port_attr port_attr;
ret = ibv_query_port(context, PORT_NUM, &port_attr);
if (ret) {
std::cerr << "ibv_query_port() failed ret= " << ret << std::endl;
exit(1);
}
struct ib_info_t my_info;
my_info.lid = port_attr.lid;
my_info.qpn = qp->qp_num;
my_info.mkey_data_buffer = mr_recv->rkey;
my_info.addr_data_buffer = (uintptr_t)mr_recv->addr;
my_info.mkey_response_buffer = mr_send->rkey;
my_info.addr_response_buffer = (uintptr_t)mr_send->addr;
int gid_index = get_gid_index(context);
if (ibv_query_gid(context, 1, gid_index, &(my_info.gid) )) {
std::cerr << "ibv_query_gid failed for gid " << gid_index << std::endl;
exit(1);
}
ret = send(sfd, &my_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "send" << std::endl;
exit(1);
}
struct ib_info_t client_info;
recv(sfd, &client_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "recv" << std::endl;
exit(1);
}
ib_resources->rmr_recv_key = client_info.mkey_data_buffer;
ib_resources->rmr_recv_addr = client_info.addr_data_buffer;
ib_resources->rmr_send_key = client_info.mkey_response_buffer;
ib_resources->rmr_send_addr = client_info.addr_response_buffer;
struct ibv_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_INIT;
qp_attr.pkey_index = 0;
qp_attr.port_num = PORT_NUM;
qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS);
if (ret) {
std::cerr << "ibv_modify_qp() to INIT failed" << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTR;
qp_attr.path_mtu = IBV_MTU_4096;
qp_attr.dest_qp_num = client_info.qpn;
qp_attr.rq_psn = 0 ;
qp_attr.max_dest_rd_atomic = 1;
qp_attr.min_rnr_timer = 12;
qp_attr.ah_attr.is_global = 1;
qp_attr.ah_attr.grh.dgid = client_info.gid;
qp_attr.ah_attr.grh.sgid_index = get_gid_index(context);
qp_attr.ah_attr.grh.flow_label = 0;
qp_attr.ah_attr.grh.hop_limit = 1;
qp_attr.ah_attr.grh.traffic_class = 0;
qp_attr.ah_attr.dlid = client_info.lid;
qp_attr.ah_attr.sl = 0;
qp_attr.ah_attr.src_path_bits = 0;
qp_attr.ah_attr.port_num = PORT_NUM;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER);
if (ret) {
std::cerr << "ibv_modify_qp() to RTR failed ret= " << ret<< std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTS;
qp_attr.sq_psn = 0;
qp_attr.timeout = 14;
qp_attr.retry_cnt = 7;
qp_attr.rnr_retry = 7;
qp_attr.max_rd_atomic = 1;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC);
if (ret) {
std::cerr << "ibv_modify_qp() to RTS failed" << std::endl;
exit(1);
}
ib_resources->context = context;
ib_resources->pd = pd;
ib_resources->qp = qp;
ib_resources->recv_cq = recv_cq;
ib_resources->send_cq = send_cq;
ib_resources->lrecv_buf = recv_buf;
ib_resources->lmr_recv = mr_recv;
ib_resources->lsend_buf = send_buf;
ib_resources->lmr_send = mr_send;
return ib_resources;
}
ib_resources_t* hostContext::setup_recv_data_connection(const string& interface, int sfd) {
ib_resources_t *ib_resources = (ib_resources_t *)malloc(sizeof(struct ib_resources_t));
ibv_device **device_list = ibv_get_device_list(NULL);
if (!device_list) {
std::cerr << "ERROR: ibv_get_device_list failed" << std::endl;
exit(1);
}
string device_name = ib_device_from_netdev(interface.c_str());
struct ibv_context *context = ibv_open_device_by_name(device_name);
struct ibv_pd *pd = ibv_alloc_pd(context);
if (!pd) {
std::cerr << "ibv_alloc_pd() failed" << std::endl;
exit(1);
}
struct ibv_mr *mr_recv;
char *recv_buf;
CUDA_CHECK(cudaMalloc(&recv_buf,HOST_TOTAL_DATA_FROM_CLIENT_SIZE));
CUDA_CHECK(cudaMemset(recv_buf, 0, HOST_TOTAL_DATA_FROM_CLIENT_SIZE));
// printf("ib_resources Data: recv_buf=%p size=%d\n",recv_buf,HOST_TOTAL_DATA_FROM_CLIENT_SIZE);
mr_recv = ibv_reg_mr(pd, recv_buf, HOST_TOTAL_DATA_FROM_CLIENT_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
if (!mr_recv) {
std::cerr << "ibv_reg_mr() failed for data_for_host" << std::endl;
exit(1);
}
struct ibv_cq *recv_cq = ibv_create_cq(context, HOST_RECV_CQ_SIZE, NULL, NULL, 0);
if (!recv_cq) {
printf("ERROR: ibv_create_cq() failed\n");
exit(1);
}
struct ibv_qp_init_attr qp_init_attr;
memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr));
qp_init_attr.send_cq = recv_cq;
qp_init_attr.recv_cq = recv_cq;
qp_init_attr.qp_type = IBV_QPT_RC;
qp_init_attr.cap.max_send_wr = 0;
qp_init_attr.cap.max_recv_wr = HOST_MAX_RECV_WQES;
qp_init_attr.cap.max_send_sge = 0;
qp_init_attr.cap.max_recv_sge = 1;
struct ibv_qp *qp = ibv_create_qp(pd, &qp_init_attr);
if (!qp) {
std::cerr << "ibv_create_qp() failed errno= " << errno << std::endl;
exit(1);
}
struct ibv_port_attr port_attr;
int ret = ibv_query_port(context, PORT_NUM, &port_attr);
if (ret) {
std::cerr << "ibv_query_port() failed ret=" << ret << std::endl;
exit(1);
}
struct ib_info_t my_info;
my_info.lid = port_attr.lid;
my_info.qpn = qp->qp_num;
my_info.mkey_data_buffer = mr_recv->rkey;
my_info.addr_data_buffer = (uintptr_t)mr_recv->addr;
int gid_index = get_gid_index(context);
if (ibv_query_gid(context, 1, gid_index, &(my_info.gid) )) {
std::cerr << "ibv_query_gid failed for gid " << gid_index << std::endl;
exit(1);
}
ret = send(sfd, &my_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "send" << std::endl;
exit(1);
}
struct ib_info_t client_info;
recv(sfd, &client_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "recv" << std::endl;
exit(1);
}
struct ibv_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_INIT;
qp_attr.pkey_index = 0;
qp_attr.port_num = PORT_NUM;
qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS);
if (ret) {
std::cerr << "ibv_modify_qp() to INIT failed" << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTR;
qp_attr.path_mtu = IBV_MTU_4096;
qp_attr.dest_qp_num = client_info.qpn;
qp_attr.rq_psn = 0 ;
qp_attr.max_dest_rd_atomic = 1;
qp_attr.min_rnr_timer = 12;
qp_attr.ah_attr.is_global = 1;
qp_attr.ah_attr.grh.dgid = client_info.gid;
qp_attr.ah_attr.grh.sgid_index = get_gid_index(context);
qp_attr.ah_attr.grh.flow_label = 0;
qp_attr.ah_attr.grh.hop_limit = 1;
qp_attr.ah_attr.grh.traffic_class = 0;
qp_attr.ah_attr.dlid = client_info.lid;
qp_attr.ah_attr.sl = 0;
qp_attr.ah_attr.src_path_bits = 0;
qp_attr.ah_attr.port_num = PORT_NUM;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER);
if (ret) {
std::cerr << "ibv_modify_qp() to RTR failed ret= " << ret << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTS;
qp_attr.sq_psn = 0;
qp_attr.timeout = 14;
qp_attr.retry_cnt = 7;
qp_attr.rnr_retry = 7;
qp_attr.max_rd_atomic = 1;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC);
if (ret) {
std::cerr << "ibv_modify_qp() to RTS failed" << std::endl;
exit(1);
}
ib_resources->context = context;
ib_resources->pd = pd;
ib_resources->qp = qp;
ib_resources->recv_cq = recv_cq;
ib_resources->lrecv_buf = recv_buf;
ib_resources->lmr_recv = mr_recv;
return ib_resources;
}
ib_resources_t* hostContext::setup_send_data_connection(const string& interface, int sfd) {
ib_resources_t *ib_resources = (ib_resources_t *)malloc(sizeof(struct ib_resources_t));
ibv_device **device_list = ibv_get_device_list(NULL);
if (!device_list) {
std::cerr << "ERROR: ibv_get_device_list failed" << std::endl;
exit(1);
}
string device_name = ib_device_from_netdev(interface.c_str());
struct ibv_context *context = ibv_open_device_by_name(device_name);
struct ibv_pd *pd = ibv_alloc_pd(context);
if (!pd) {
std::cerr << "ibv_alloc_pd() failed" << std::endl;
exit(1);
}
struct ibv_mr *mr_send;
char *send_buf;
CUDA_CHECK(cudaMalloc(&send_buf, HOST_TOTAL_DATA_TO_CLIENT_SIZE));
mr_send = ibv_reg_mr(pd, send_buf, HOST_TOTAL_DATA_TO_CLIENT_SIZE, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ);
if (!mr_send) {
std::cerr << "ibv_reg_mr() failed for data_from_host" << std::endl;
exit(1);
}
struct ibv_cq *recv_cq = ibv_create_cq(context, HOST_RECV_CQ_SIZE, NULL, NULL, 0);
if (!recv_cq) {
printf("ERROR: ibv_create_cq() failed\n");
exit(1);
}
struct ibv_qp_init_attr qp_init_attr;
memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr));
qp_init_attr.send_cq = recv_cq;
qp_init_attr.recv_cq = recv_cq;
qp_init_attr.qp_type = IBV_QPT_RC;
qp_init_attr.cap.max_send_wr = 0;
qp_init_attr.cap.max_recv_wr = HOST_MAX_RECV_WQES;
qp_init_attr.cap.max_send_sge = 0;
qp_init_attr.cap.max_recv_sge = 1;
// qp_init_attr.cap.max_inline_data = 0;
struct ibv_qp *qp = ibv_create_qp(pd, &qp_init_attr);
if (!qp) {
std::cerr << "ibv_create_qp() failed errno= " << errno << std::endl;
exit(1);
}
int ret;
struct ibv_port_attr port_attr;
ret = ibv_query_port(context, PORT_NUM, &port_attr);
if (ret) {
std::cerr << "ibv_query_port() failed ret=" << ret << std::endl;
exit(1);
}
struct ib_info_t my_info;
my_info.lid = port_attr.lid;
my_info.qpn = qp->qp_num;
my_info.mkey_response_buffer = mr_send->rkey;
my_info.addr_response_buffer = (uintptr_t)mr_send->addr;
int gid_index = get_gid_index(context);
if (ibv_query_gid(context, 1, gid_index, &(my_info.gid) )) {
std::cerr << "ibv_query_gid failed for gid " << gid_index << std::endl;
exit(1);
}
ret = send(sfd, &my_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "send" << std::endl;
exit(1);
}
struct ib_info_t client_info;
recv(sfd, &client_info, sizeof(struct ib_info_t), 0);
if (ret < 0) {
std::cerr << "recv" << std::endl;
exit(1);
}
struct ibv_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_INIT;
qp_attr.pkey_index = 0;
qp_attr.port_num = PORT_NUM;
qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS);
if (ret) {
std::cerr << "ibv_modify_qp() to INIT failed" << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTR;
qp_attr.path_mtu = IBV_MTU_4096;
qp_attr.dest_qp_num = client_info.qpn;
qp_attr.rq_psn = 0 ;
qp_attr.max_dest_rd_atomic = 1;
qp_attr.min_rnr_timer = 12;
qp_attr.ah_attr.is_global = 1;
qp_attr.ah_attr.grh.dgid = client_info.gid;
qp_attr.ah_attr.grh.sgid_index = get_gid_index(context);
qp_attr.ah_attr.grh.flow_label = 0;
qp_attr.ah_attr.grh.hop_limit = 1;
qp_attr.ah_attr.grh.traffic_class = 0;
qp_attr.ah_attr.dlid = client_info.lid;
qp_attr.ah_attr.sl = 0;
qp_attr.ah_attr.src_path_bits = 0;
qp_attr.ah_attr.port_num = PORT_NUM;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER);
if (ret) {
std::cerr << "ibv_modify_qp() to RTR failed ret= " << ret << std::endl;
exit(1);
}
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
qp_attr.qp_state = IBV_QPS_RTS;
qp_attr.sq_psn = 0;
qp_attr.timeout = 14;
qp_attr.retry_cnt = 7;
qp_attr.rnr_retry = 7;
qp_attr.max_rd_atomic = 1;
ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC);
if (ret) {
std::cerr << "ibv_modify_qp() to RTS failed" << std::endl;
exit(1);
}
ib_resources->context = context;
ib_resources->pd = pd;
ib_resources->qp = qp;
ib_resources->recv_cq = recv_cq;
ib_resources->send_cq = recv_cq;
ib_resources->lsend_buf = send_buf;
ib_resources->lmr_send = mr_send;
return ib_resources;
}
hostContext::hostContext(const string& interface, unsigned int workers_num, unsigned int tcp_port) : _workers_num(workers_num) {
int lfd, sfd;
int server_tcp_port = tcp_port;
lfd = socket(AF_INET, SOCK_STREAM, 0);
if (lfd < 0) {
std::cerr << "socket" << std::endl;
exit(1);
}
struct sockaddr_in server_addr;
memset(&server_addr, 0, sizeof(struct sockaddr_in));
server_addr.sin_family = AF_INET;
server_addr.sin_addr.s_addr = INADDR_ANY;
server_addr.sin_port = htons(server_tcp_port);
if (bind(lfd, (struct sockaddr *)&server_addr, sizeof(struct sockaddr_in)) < 0) {
std::cerr << "bind lfd" << std::endl;
exit(1);
}
listen(lfd, 1);
std::cout << "Host is waiting on port " << server_tcp_port << " to establish RX Queue. BlueField can connect." << std::endl;
sfd = accept(lfd, NULL, NULL);
if (sfd < 0) {
std::cerr << "accept sfd1" << std::endl;
exit(1);
}
std::cout << "BlueField is connected" << std::endl;
std::cout << "create RX Queue " << std::endl;
recv_data_ib_resources = setup_recv_data_connection(interface,sfd);
close(sfd);
std::cout << "Host is waiting on port " << server_tcp_port << " to establish TX Queue. BlueField can connect." << std::endl;
sfd = accept(lfd, NULL, NULL);
if(sfd < 0) {
std::cerr << "accept sfd" << std::endl;
exit(1);
}
std::cout << "create TX Queue " << std::endl;
send_data_ib_resources = setup_send_data_connection(interface,sfd);
std::cout << "create Side Channel Notification " << std::endl;
notify_ib_resources = setup_notify_connection(interface,sfd);
close(sfd);
close(lfd);
_d_req_base_addresses = NULL;
_d_resp_base_addresses = NULL;
}
hostContext::~hostContext() {
std::cout << "kill hostcontext" << std::endl;
teardown_connection(notify_ib_resources);
teardown_connection(recv_data_ib_resources);
teardown_connection(send_data_ib_resources);
free(recv_data_ib_resources);
free(send_data_ib_resources);
if(_d_req_base_addresses != NULL){
CUDA_CHECK(cudaFree(_d_req_base_addresses));
}
if(_d_resp_base_addresses != NULL){
CUDA_CHECK(cudaFree(_d_resp_base_addresses));
}
}
void* hostContext::getRequestBaseAddress() {
return recv_data_ib_resources->lrecv_buf;
}
void* hostContext::getResponseBaseAddress() {
return send_data_ib_resources->lsend_buf;
}
unsigned int* hostContext::getRequestCIBaseAddress() {
return (unsigned int*) (notify_ib_resources->lsend_buf) + _workers_num;
}
unsigned int* hostContext::getResponsePIBaseAddress() {
return (unsigned int*) notify_ib_resources->lsend_buf;
}
unsigned int* hostContext::getResponseCIBaseAddress() {
return (unsigned int*) (notify_ib_resources->lrecv_buf);
}
void** hostContext::getDeviceReqBaseAddresses() {
void *req_base_addresses = getRequestBaseAddress();
CUDA_CHECK(cudaMalloc(&_d_req_base_addresses, sizeof(void*)));
CUDA_CHECK(cudaMemcpy(_d_req_base_addresses,&req_base_addresses, sizeof(void*), cudaMemcpyHostToDevice));
return _d_req_base_addresses;
}
void** hostContext::getDeviceRespBaseAddresses() {
void *resp_base_addresses = getResponseBaseAddress();
CUDA_CHECK(cudaMalloc(&_d_resp_base_addresses, sizeof(void*)));
CUDA_CHECK(cudaMemcpy(_d_resp_base_addresses, &resp_base_addresses, sizeof(void*), cudaMemcpyHostToDevice));
return _d_resp_base_addresses;
}
void hostContext::waitDevice(){
CUDA_CHECK(cudaDeviceSynchronize());
}
|
dbb114482100745eba1852801fde872ec64dfcfb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "plugin.h"
#include "hip/hip_fp16.h"
#include "gatherNMSOutputs.h"
#include <array>
// __half minus with fallback to float for old sm
inline __device__ __half minus_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a - b;
#else
return __float2half(__half2float(a) - __half2float(b));
#endif
}
// overload for float
inline __device__ float minus_fb(const float & a, const float & b) {
return a - b;
}
template <typename T_BBOX>
__device__ T_BBOX saturate(T_BBOX v)
{
return max(min(v, T_BBOX(1)), T_BBOX(0));
}
template <>
__device__ __half saturate(__half v)
{
#if __CUDA_ARCH__ >= 800
return __hmax(__hmin(v, __half(1)), __half(0));
#elif __CUDA_ARCH__ >= 530
return __hge(v, __half(1)) ? __half(1) : (__hle(v, __half(0)) ? __half(0) : v);
#else
return max(min(v, float(1)), float(0));
#endif
}
template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void gatherNMSOutputs_kernel(
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const int* indices,
const T_SCORE* scores,
const T_BBOX* bboxData,
int* numDetections,
T_BBOX* nmsedBoxes,
T_BBOX* nmsedScores,
T_BBOX* nmsedClasses,
bool clipBoxes,
const T_SCORE scoreShift
)
{
if (keepTopK > topK)
return;
for (int i = blockIdx.x * nthds_per_cta + threadIdx.x;
i < numImages * keepTopK;
i += gridDim.x * nthds_per_cta)
{
const int imgId = i / keepTopK;
const int detId = i % keepTopK;
const int offset = imgId * numClasses * topK;
const int index = indices[offset + detId];
const T_SCORE score = scores[offset + detId];
if (index == -1)
{
nmsedClasses[i] = -1;
nmsedScores[i] = 0;
nmsedBoxes[i * 4] = 0;
nmsedBoxes[i * 4 + 1] = 0;
nmsedBoxes[i * 4 + 2] = 0;
nmsedBoxes[i * 4 + 3] = 0;
}
else
{
const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass));
const int bboxId = ((shareLocation ? (index % numPredsPerClass)
: index % (numClasses * numPredsPerClass)) + bboxOffset) * 4;
nmsedClasses[i] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label
nmsedScores[i] = score; // confidence score
nmsedScores[i] = minus_fb(nmsedScores[i], scoreShift);
const T_BBOX xMin = bboxData[bboxId];
const T_BBOX yMin = bboxData[bboxId + 1];
const T_BBOX xMax = bboxData[bboxId + 2];
const T_BBOX yMax = bboxData[bboxId + 3];
// clipped bbox xmin
nmsedBoxes[i * 4] = clipBoxes ? saturate(xMin) : xMin;
// clipped bbox ymin
nmsedBoxes[i * 4 + 1] = clipBoxes ? saturate(yMin) : yMin;
// clipped bbox xmax
nmsedBoxes[i * 4 + 2] = clipBoxes ? saturate(xMax) : xMax;
// clipped bbox ymax
nmsedBoxes[i * 4 + 3] = clipBoxes ? saturate(yMax) : yMax;
atomicAdd(&numDetections[i / keepTopK], 1);
}
}
}
template <typename T_BBOX, typename T_SCORE>
pluginStatus_t gatherNMSOutputs_gpu(
hipStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const void* indices,
const void* scores,
const void* bboxData,
void* numDetections,
void* nmsedBoxes,
void* nmsedScores,
void* nmsedClasses,
bool clipBoxes,
const float scoreShift
)
{
hipMemsetAsync(numDetections, 0, numImages * sizeof(int), stream);
const int BS = 32;
const int GS = 32;
hipLaunchKernelGGL(( gatherNMSOutputs_kernel<T_BBOX, T_SCORE, BS>), dim3(GS), dim3(BS), 0, stream, shareLocation, numImages, numPredsPerClass,
numClasses, topK, keepTopK,
(int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData,
(int*) numDetections,
(T_BBOX*) nmsedBoxes,
(T_BBOX*) nmsedScores,
(T_BBOX*) nmsedClasses,
clipBoxes,
T_SCORE(scoreShift)
);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// gatherNMSOutputs LAUNCH CONFIG {{{
typedef pluginStatus_t (*nmsOutFunc)(hipStream_t,
const bool,
const int,
const int,
const int,
const int,
const int,
const void*,
const void*,
const void*,
void*,
void*,
void*,
void*,
bool,
const float);
struct nmsOutLaunchConfig
{
DataType t_bbox;
DataType t_score;
nmsOutFunc function;
nmsOutLaunchConfig(DataType t_bbox, DataType t_score)
: t_bbox(t_bbox)
, t_score(t_score)
{
}
nmsOutLaunchConfig(DataType t_bbox, DataType t_score, nmsOutFunc function)
: t_bbox(t_bbox)
, t_score(t_score)
, function(function)
{
}
bool operator==(const nmsOutLaunchConfig& other)
{
return t_bbox == other.t_bbox && t_score == other.t_score;
}
};
using nvinfer1::DataType;
static std::array<nmsOutLaunchConfig, 2> nmsOutLCOptions = {
nmsOutLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherNMSOutputs_gpu<float, float>),
nmsOutLaunchConfig(DataType::kHALF, DataType::kHALF, gatherNMSOutputs_gpu<__half, __half>)
};
pluginStatus_t gatherNMSOutputs(
hipStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const DataType DT_BBOX,
const DataType DT_SCORE,
const void* indices,
const void* scores,
const void* bboxData,
void* numDetections,
void* nmsedBoxes,
void* nmsedScores,
void* nmsedClasses,
bool clipBoxes,
const float scoreShift
)
{
nmsOutLaunchConfig lc = nmsOutLaunchConfig(DT_BBOX, DT_SCORE);
for (unsigned i = 0; i < nmsOutLCOptions.size(); ++i)
{
if (lc == nmsOutLCOptions[i])
{
DEBUG_PRINTF("gatherNMSOutputs kernel %d\n", i);
return nmsOutLCOptions[i].function(stream,
shareLocation,
numImages,
numPredsPerClass,
numClasses,
topK,
keepTopK,
indices,
scores,
bboxData,
numDetections,
nmsedBoxes,
nmsedScores,
nmsedClasses,
clipBoxes,
scoreShift
);
}
}
return STATUS_BAD_PARAM;
}
|
dbb114482100745eba1852801fde872ec64dfcfb.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "plugin.h"
#include "cuda_fp16.h"
#include "gatherNMSOutputs.h"
#include <array>
// __half minus with fallback to float for old sm
inline __device__ __half minus_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a - b;
#else
return __float2half(__half2float(a) - __half2float(b));
#endif
}
// overload for float
inline __device__ float minus_fb(const float & a, const float & b) {
return a - b;
}
template <typename T_BBOX>
__device__ T_BBOX saturate(T_BBOX v)
{
return max(min(v, T_BBOX(1)), T_BBOX(0));
}
template <>
__device__ __half saturate(__half v)
{
#if __CUDA_ARCH__ >= 800
return __hmax(__hmin(v, __half(1)), __half(0));
#elif __CUDA_ARCH__ >= 530
return __hge(v, __half(1)) ? __half(1) : (__hle(v, __half(0)) ? __half(0) : v);
#else
return max(min(v, float(1)), float(0));
#endif
}
template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void gatherNMSOutputs_kernel(
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const int* indices,
const T_SCORE* scores,
const T_BBOX* bboxData,
int* numDetections,
T_BBOX* nmsedBoxes,
T_BBOX* nmsedScores,
T_BBOX* nmsedClasses,
bool clipBoxes,
const T_SCORE scoreShift
)
{
if (keepTopK > topK)
return;
for (int i = blockIdx.x * nthds_per_cta + threadIdx.x;
i < numImages * keepTopK;
i += gridDim.x * nthds_per_cta)
{
const int imgId = i / keepTopK;
const int detId = i % keepTopK;
const int offset = imgId * numClasses * topK;
const int index = indices[offset + detId];
const T_SCORE score = scores[offset + detId];
if (index == -1)
{
nmsedClasses[i] = -1;
nmsedScores[i] = 0;
nmsedBoxes[i * 4] = 0;
nmsedBoxes[i * 4 + 1] = 0;
nmsedBoxes[i * 4 + 2] = 0;
nmsedBoxes[i * 4 + 3] = 0;
}
else
{
const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass));
const int bboxId = ((shareLocation ? (index % numPredsPerClass)
: index % (numClasses * numPredsPerClass)) + bboxOffset) * 4;
nmsedClasses[i] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label
nmsedScores[i] = score; // confidence score
nmsedScores[i] = minus_fb(nmsedScores[i], scoreShift);
const T_BBOX xMin = bboxData[bboxId];
const T_BBOX yMin = bboxData[bboxId + 1];
const T_BBOX xMax = bboxData[bboxId + 2];
const T_BBOX yMax = bboxData[bboxId + 3];
// clipped bbox xmin
nmsedBoxes[i * 4] = clipBoxes ? saturate(xMin) : xMin;
// clipped bbox ymin
nmsedBoxes[i * 4 + 1] = clipBoxes ? saturate(yMin) : yMin;
// clipped bbox xmax
nmsedBoxes[i * 4 + 2] = clipBoxes ? saturate(xMax) : xMax;
// clipped bbox ymax
nmsedBoxes[i * 4 + 3] = clipBoxes ? saturate(yMax) : yMax;
atomicAdd(&numDetections[i / keepTopK], 1);
}
}
}
template <typename T_BBOX, typename T_SCORE>
pluginStatus_t gatherNMSOutputs_gpu(
cudaStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const void* indices,
const void* scores,
const void* bboxData,
void* numDetections,
void* nmsedBoxes,
void* nmsedScores,
void* nmsedClasses,
bool clipBoxes,
const float scoreShift
)
{
cudaMemsetAsync(numDetections, 0, numImages * sizeof(int), stream);
const int BS = 32;
const int GS = 32;
gatherNMSOutputs_kernel<T_BBOX, T_SCORE, BS><<<GS, BS, 0, stream>>>(shareLocation, numImages, numPredsPerClass,
numClasses, topK, keepTopK,
(int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData,
(int*) numDetections,
(T_BBOX*) nmsedBoxes,
(T_BBOX*) nmsedScores,
(T_BBOX*) nmsedClasses,
clipBoxes,
T_SCORE(scoreShift)
);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// gatherNMSOutputs LAUNCH CONFIG {{{
typedef pluginStatus_t (*nmsOutFunc)(cudaStream_t,
const bool,
const int,
const int,
const int,
const int,
const int,
const void*,
const void*,
const void*,
void*,
void*,
void*,
void*,
bool,
const float);
struct nmsOutLaunchConfig
{
DataType t_bbox;
DataType t_score;
nmsOutFunc function;
nmsOutLaunchConfig(DataType t_bbox, DataType t_score)
: t_bbox(t_bbox)
, t_score(t_score)
{
}
nmsOutLaunchConfig(DataType t_bbox, DataType t_score, nmsOutFunc function)
: t_bbox(t_bbox)
, t_score(t_score)
, function(function)
{
}
bool operator==(const nmsOutLaunchConfig& other)
{
return t_bbox == other.t_bbox && t_score == other.t_score;
}
};
using nvinfer1::DataType;
static std::array<nmsOutLaunchConfig, 2> nmsOutLCOptions = {
nmsOutLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherNMSOutputs_gpu<float, float>),
nmsOutLaunchConfig(DataType::kHALF, DataType::kHALF, gatherNMSOutputs_gpu<__half, __half>)
};
pluginStatus_t gatherNMSOutputs(
cudaStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const DataType DT_BBOX,
const DataType DT_SCORE,
const void* indices,
const void* scores,
const void* bboxData,
void* numDetections,
void* nmsedBoxes,
void* nmsedScores,
void* nmsedClasses,
bool clipBoxes,
const float scoreShift
)
{
nmsOutLaunchConfig lc = nmsOutLaunchConfig(DT_BBOX, DT_SCORE);
for (unsigned i = 0; i < nmsOutLCOptions.size(); ++i)
{
if (lc == nmsOutLCOptions[i])
{
DEBUG_PRINTF("gatherNMSOutputs kernel %d\n", i);
return nmsOutLCOptions[i].function(stream,
shareLocation,
numImages,
numPredsPerClass,
numClasses,
topK,
keepTopK,
indices,
scores,
bboxData,
numDetections,
nmsedBoxes,
nmsedScores,
nmsedClasses,
clipBoxes,
scoreShift
);
}
}
return STATUS_BAD_PARAM;
}
|
19921c28d807369b718733d9ebb5a4c1ae8c7110.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stk/cuda/cuda.h>
#include <stk/cuda/stream.h>
#include <stk/cuda/volume.h>
namespace cuda = stk::cuda;
__global__ void ssd_kernel(
cuda::VolumePtr<float> fixed,
cuda::VolumePtr<float> moving,
cuda::VolumePtr<float4> df,
dim3 dims,
float3 fixed_origin,
float3 fixed_spacing,
float3 moving_origin,
float3 inv_moving_spacing,
float3 delta,
cuda::VolumePtr<float2> cost_acc
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= dims.x ||
y >= dims.y ||
z >= dims.z)
{
return;
}
float3 d0 { df(x,y,z).x, df(x,y,z).y, df(x,y,z).z };
float3 d1 = d0 + delta;
float3 world_p = fixed_origin + float3{float(x),float(y),float(z)} * fixed_spacing;
float3 moving_p0 = (world_p + d0 - moving_origin) * inv_moving_spacing;
float3 moving_p1 = (world_p + d1 - moving_origin) * inv_moving_spacing;
float f0 = fixed(x,y,z) - cuda::linear_at_border<float>(
moving, dims, moving_p0.x, moving_p0.y, moving_p0.z);
float f1 = fixed(x,y,z) - cuda::linear_at_border<float>(
moving, dims, moving_p1.x, moving_p1.y, moving_p1.z);
cost_acc(x,y,z).x = f0*f0;
cost_acc(x,y,z).y = f1*f1;
}
inline __device__ bool is_inside(const dim3& dims, const int3& p)
{
return (p.x >= 0 && p.x < int(dims.x) && p.y >= 0 && p.y < int(dims.y) && p.z >= 0 && p.z < int(dims.z));
}
__global__ void ncc_kernel(
cuda::VolumePtr<float> fixed,
cuda::VolumePtr<float> moving,
cuda::VolumePtr<float4> df,
dim3 fixed_dims, // Full fixed volume dims
dim3 moving_dims, // Full moving volume dims
int3 block_offset,
int3 block_dims,
float3 fixed_origin,
float3 fixed_spacing,
float3 moving_origin,
float3 inv_moving_spacing,
float3 delta,
cuda::VolumePtr<float2> cost_acc
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= block_dims.x ||
y >= block_dims.y ||
z >= block_dims.z)
{
return;
}
int gx = x + block_offset.x;
int gy = y + block_offset.y;
int gz = z + block_offset.z;
if (gx >= fixed_dims.x ||
gy >= fixed_dims.y ||
gz >= fixed_dims.z)
{
return;
}
float3 d0 { df(gx,gy,gz).x, df(gx, gy, gz).y, df(gx, gy, gz).z };
float3 d1 = d0 + delta;
float3 world_p = fixed_origin + float3{float(gx),float(gy),float(gz)} * fixed_spacing;
float3 moving_p0 = (world_p + d0 - moving_origin) * inv_moving_spacing;
float3 moving_p1 = (world_p + d1 - moving_origin) * inv_moving_spacing;
float sff = 0.0f;
float sf = 0.0f;
float smm0 = 0.0f;
float sfm0 = 0.0f;
float sm0 = 0.0f;
float smm1 = 0.0f;
float sfm1 = 0.0f;
float sm1 = 0.0f;
unsigned int n = 0;
int radius = 2;
for (int dz = -radius; dz <= radius; ++dz) {
for (int dy = -radius; dy <= radius; ++dy) {
for (int dx = -radius; dx <= radius; ++dx) {
// TODO: Does not account for anisotropic volumes
int r2 = dx*dx + dy*dy + dz*dz;
if (r2 > radius * radius)
continue;
int3 fp{gx + dx, gy + dy, gz + dz};
if (!is_inside(fixed_dims, fp))
continue;
float3 mp0{moving_p0.x + dx, moving_p0.y + dy, moving_p0.z + dz};
float3 mp1{moving_p1.x + dx, moving_p1.y + dy, moving_p1.z + dz};
float fixed_v = fixed(fp.x, fp.y, fp.z);
float moving_v0 = cuda::linear_at_border<float>(moving, moving_dims, mp0.x, mp0.y, mp0.z);
float moving_v1 = cuda::linear_at_border<float>(moving, moving_dims, mp1.x, mp1.y, mp1.z);
sff += fixed_v * fixed_v;
smm0 += moving_v0 * moving_v0;
smm1 += moving_v1 * moving_v1;
sfm0 += fixed_v*moving_v0;
sfm1 += fixed_v*moving_v1;
sm0 += moving_v0;
sm1 += moving_v1;
sf += fixed_v;
++n;
}
}
}
if (n == 0)
return;
// Subtract mean
sff -= (sf * sf / n);
smm0 -= (sm0 * sm0 / n);
sfm0 -= (sf * sm0 / n);
smm1 -= (sm1 * sm1 / n);
sfm1 -= (sf * sm1 / n);
float denom0 = sqrt(sff*smm0);
float denom1 = sqrt(sff*smm1);
float2 out = {0, 0};
if(denom0 > 1e-14) {
out.x = 0.5f*(1.0f-float(sfm0 / denom0));
}
if(denom1 > 1e-14) {
out.y = 0.5f*(1.0f-float(sfm1 / denom1));
}
cost_acc(gx,gy,gz) = out;
}
void gpu_compute_unary_cost(
const stk::GpuVolume& fixed,
const stk::GpuVolume& moving,
const stk::GpuVolume& df,
const int3& block_offset,
const int3& block_dims,
const float3& delta,
stk::GpuVolume& unary_cost, // float2
cuda::Stream& stream
)
{
float3 inv_moving_spacing = {
1.0f / moving.spacing().x,
1.0f / moving.spacing().y,
1.0f / moving.spacing().z
};
dim3 block_size {16,16,1};
dim3 grid_size {
(block_dims.x + block_size.x - 1) / block_size.x,
(block_dims.y + block_size.y - 1) / block_size.y,
(block_dims.z + block_size.z - 1) / block_size.z
};
hipLaunchKernelGGL(( ncc_kernel), dim3(grid_size), dim3(block_size), 0, stream,
fixed,
moving,
df,
fixed.size(),
moving.size(),
block_offset,
block_dims,
fixed.origin(),
fixed.spacing(),
moving.origin(),
inv_moving_spacing,
delta,
unary_cost
);
}
__global__ void regularizer_kernel(
cuda::VolumePtr<float4> df,
dim3 dims,
int3 block_offset,
int3 block_dims,
float3 delta,
float weight,
cuda::VolumePtr<float4> cost_x,
cuda::VolumePtr<float4> cost_y,
cuda::VolumePtr<float4> cost_z
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= block_dims.x ||
y >= block_dims.y ||
z >= block_dims.z)
{
return;
}
int gx = x + block_offset.x;
int gy = y + block_offset.y;
int gz = z + block_offset.z;
if (gx >= dims.x ||
gy >= dims.y ||
gz >= dims.z)
{
return;
}
// Assume spacing is 1,1,1
// Cost ordered as f_same, f01, f10, f_same
float3 d = {df(gx,gy,gz).x, df(gx,gy,gz).y, df(gx,gy,gz).z};
{
float4 o_x = {0, 0, 0, 0};
float4 o_y = {0, 0, 0, 0};
float4 o_z = {0, 0, 0, 0};
float3 dx = {df(gx+1,gy,gz).x, df(gx+1,gy,gz).y, df(gx+1,gy,gz).z};
float3 dy = {df(gx,gy+1,gz).x, df(gx,gy+1,gz).y, df(gx,gy+1,gz).z};
float3 dz = {df(gx,gy,gz+1).x, df(gx,gy,gz+1).y, df(gx,gy,gz+1).z};
if (x + 1 < dims.x) {
float3 diff_00 = d - dx;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = d - (dx+delta);
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
float3 diff_10 = (d+delta) - dx;
float dist2_10 = diff_10.x*diff_10.x + diff_10.y*diff_10.y + diff_10.z*diff_10.z;
o_x.x = dist2_00;
o_x.y = dist2_01;
o_x.z = dist2_10;
}
if (y + 1 < dims.y) {
float3 diff_00 = d - dy;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = d - (dy+delta);
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
float3 diff_10 = (d+delta) - dy;
float dist2_10 = diff_10.x*diff_10.x + diff_10.y*diff_10.y + diff_10.z*diff_10.z;
o_y.x = dist2_00;
o_y.y = dist2_01;
o_y.z = dist2_10;
}
if (z + 1 < dims.z) {
float3 diff_00 = d - dz;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = d - (dz+delta);
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
float3 diff_10 = (d+delta) - dz;
float dist2_10 = diff_10.x*diff_10.x + diff_10.y*diff_10.y + diff_10.z*diff_10.z;
o_z.x = dist2_00;
o_z.y = dist2_01;
o_z.z = dist2_10;
}
cost_x(gx,gy,gz) = weight*o_x;
cost_y(gx,gy,gz) = weight*o_y;
cost_z(gx,gy,gz) = weight*o_z;
}
// TODO:
// Compute cost at block border
if (x == 0 && gx != 0) {
float3 dx = {df(gx-1,gy,gz).x, df(gx-1,gy,gz).y, df(gx-1,gy,gz).z};
float3 diff_00 = d - dx;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = (d+delta) - dx;
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
cost_x(gx-1,gy,gz).x = weight*dist2_00;
cost_x(gx-1,gy,gz).y = weight*dist2_01;
cost_x(gx-1,gy,gz).z = weight*dist2_00; // border nodes can't move
}
if (y == 0 && gy != 0) {
float3 dy = {df(gx,gy-1,gz).x, df(gx,gy-1,gz).y, df(gx,gy-1,gz).z};
float3 diff_00 = d - dy;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = (d+delta) - dy;
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
cost_y(gx,gy-1,gz).x = weight*dist2_00;
cost_y(gx,gy-1,gz).y = weight*dist2_01;
cost_y(gx,gy-1,gz).z = weight*dist2_00; // border nodes can't move
}
if (z == 0 && gz != 0) {
float3 dz = {df(gx,gy,gz-1).x, df(gx,gy,gz-1).y, df(gx,gy,gz-1).z};
float3 diff_00 = d - dz;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = (d+delta) - dz;
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
cost_z(gx,gy,gz-1).x = weight*dist2_00;
cost_z(gx,gy,gz-1).y = weight*dist2_01;
cost_z(gx,gy,gz-1).z = weight*dist2_00; // border nodes can't move
}
}
void gpu_compute_binary_cost(
const stk::GpuVolume& df,
const int3& block_offset,
const int3& block_dims,
const float3& delta,
float weight,
stk::GpuVolume& cost_x, // float4
stk::GpuVolume& cost_y, // float4
stk::GpuVolume& cost_z, // float4
cuda::Stream& stream
)
{
dim3 dims = df.size();
dim3 block_size {16,16,1};
dim3 grid_size {
(dims.x + block_size.x - 1) / block_size.x,
(dims.y + block_size.y - 1) / block_size.y,
(dims.z + block_size.z - 1) / block_size.z
};
hipLaunchKernelGGL(( regularizer_kernel), dim3(grid_size), dim3(block_size), 0, stream,
df,
dims,
block_offset,
block_dims,
delta,
weight,
cost_x,
cost_y,
cost_z
);
CUDA_CHECK_ERRORS(hipPeekAtLastError());
}
__global__ void apply_displacement_delta(
cuda::VolumePtr<float4> df,
cuda::VolumePtr<uint8_t> labels,
dim3 dims,
float4 delta
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= dims.x ||
y >= dims.y ||
z >= dims.z)
{
return;
}
df(x,y,z) = df(x,y,z) + delta * labels(x,y,z);
}
void gpu_apply_displacement_delta(
stk::GpuVolume& df,
const stk::GpuVolume& labels,
const float3& delta,
cuda::Stream& stream
)
{
dim3 dims = df.size();
dim3 block_size {16,16,1};
dim3 grid_size {
(dims.x + block_size.x - 1) / block_size.x,
(dims.y + block_size.y - 1) / block_size.y,
(dims.z + block_size.z - 1) / block_size.z
};
hipLaunchKernelGGL(( apply_displacement_delta), dim3(grid_size), dim3(block_size), 0, stream,
df,
labels,
dims,
float4{delta.x, delta.y, delta.z, 0.0f}
);
CUDA_CHECK_ERRORS(hipPeekAtLastError());
}
|
19921c28d807369b718733d9ebb5a4c1ae8c7110.cu
|
#include <stk/cuda/cuda.h>
#include <stk/cuda/stream.h>
#include <stk/cuda/volume.h>
namespace cuda = stk::cuda;
__global__ void ssd_kernel(
cuda::VolumePtr<float> fixed,
cuda::VolumePtr<float> moving,
cuda::VolumePtr<float4> df,
dim3 dims,
float3 fixed_origin,
float3 fixed_spacing,
float3 moving_origin,
float3 inv_moving_spacing,
float3 delta,
cuda::VolumePtr<float2> cost_acc
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= dims.x ||
y >= dims.y ||
z >= dims.z)
{
return;
}
float3 d0 { df(x,y,z).x, df(x,y,z).y, df(x,y,z).z };
float3 d1 = d0 + delta;
float3 world_p = fixed_origin + float3{float(x),float(y),float(z)} * fixed_spacing;
float3 moving_p0 = (world_p + d0 - moving_origin) * inv_moving_spacing;
float3 moving_p1 = (world_p + d1 - moving_origin) * inv_moving_spacing;
float f0 = fixed(x,y,z) - cuda::linear_at_border<float>(
moving, dims, moving_p0.x, moving_p0.y, moving_p0.z);
float f1 = fixed(x,y,z) - cuda::linear_at_border<float>(
moving, dims, moving_p1.x, moving_p1.y, moving_p1.z);
cost_acc(x,y,z).x = f0*f0;
cost_acc(x,y,z).y = f1*f1;
}
inline __device__ bool is_inside(const dim3& dims, const int3& p)
{
return (p.x >= 0 && p.x < int(dims.x) && p.y >= 0 && p.y < int(dims.y) && p.z >= 0 && p.z < int(dims.z));
}
__global__ void ncc_kernel(
cuda::VolumePtr<float> fixed,
cuda::VolumePtr<float> moving,
cuda::VolumePtr<float4> df,
dim3 fixed_dims, // Full fixed volume dims
dim3 moving_dims, // Full moving volume dims
int3 block_offset,
int3 block_dims,
float3 fixed_origin,
float3 fixed_spacing,
float3 moving_origin,
float3 inv_moving_spacing,
float3 delta,
cuda::VolumePtr<float2> cost_acc
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= block_dims.x ||
y >= block_dims.y ||
z >= block_dims.z)
{
return;
}
int gx = x + block_offset.x;
int gy = y + block_offset.y;
int gz = z + block_offset.z;
if (gx >= fixed_dims.x ||
gy >= fixed_dims.y ||
gz >= fixed_dims.z)
{
return;
}
float3 d0 { df(gx,gy,gz).x, df(gx, gy, gz).y, df(gx, gy, gz).z };
float3 d1 = d0 + delta;
float3 world_p = fixed_origin + float3{float(gx),float(gy),float(gz)} * fixed_spacing;
float3 moving_p0 = (world_p + d0 - moving_origin) * inv_moving_spacing;
float3 moving_p1 = (world_p + d1 - moving_origin) * inv_moving_spacing;
float sff = 0.0f;
float sf = 0.0f;
float smm0 = 0.0f;
float sfm0 = 0.0f;
float sm0 = 0.0f;
float smm1 = 0.0f;
float sfm1 = 0.0f;
float sm1 = 0.0f;
unsigned int n = 0;
int radius = 2;
for (int dz = -radius; dz <= radius; ++dz) {
for (int dy = -radius; dy <= radius; ++dy) {
for (int dx = -radius; dx <= radius; ++dx) {
// TODO: Does not account for anisotropic volumes
int r2 = dx*dx + dy*dy + dz*dz;
if (r2 > radius * radius)
continue;
int3 fp{gx + dx, gy + dy, gz + dz};
if (!is_inside(fixed_dims, fp))
continue;
float3 mp0{moving_p0.x + dx, moving_p0.y + dy, moving_p0.z + dz};
float3 mp1{moving_p1.x + dx, moving_p1.y + dy, moving_p1.z + dz};
float fixed_v = fixed(fp.x, fp.y, fp.z);
float moving_v0 = cuda::linear_at_border<float>(moving, moving_dims, mp0.x, mp0.y, mp0.z);
float moving_v1 = cuda::linear_at_border<float>(moving, moving_dims, mp1.x, mp1.y, mp1.z);
sff += fixed_v * fixed_v;
smm0 += moving_v0 * moving_v0;
smm1 += moving_v1 * moving_v1;
sfm0 += fixed_v*moving_v0;
sfm1 += fixed_v*moving_v1;
sm0 += moving_v0;
sm1 += moving_v1;
sf += fixed_v;
++n;
}
}
}
if (n == 0)
return;
// Subtract mean
sff -= (sf * sf / n);
smm0 -= (sm0 * sm0 / n);
sfm0 -= (sf * sm0 / n);
smm1 -= (sm1 * sm1 / n);
sfm1 -= (sf * sm1 / n);
float denom0 = sqrt(sff*smm0);
float denom1 = sqrt(sff*smm1);
float2 out = {0, 0};
if(denom0 > 1e-14) {
out.x = 0.5f*(1.0f-float(sfm0 / denom0));
}
if(denom1 > 1e-14) {
out.y = 0.5f*(1.0f-float(sfm1 / denom1));
}
cost_acc(gx,gy,gz) = out;
}
void gpu_compute_unary_cost(
const stk::GpuVolume& fixed,
const stk::GpuVolume& moving,
const stk::GpuVolume& df,
const int3& block_offset,
const int3& block_dims,
const float3& delta,
stk::GpuVolume& unary_cost, // float2
cuda::Stream& stream
)
{
float3 inv_moving_spacing = {
1.0f / moving.spacing().x,
1.0f / moving.spacing().y,
1.0f / moving.spacing().z
};
dim3 block_size {16,16,1};
dim3 grid_size {
(block_dims.x + block_size.x - 1) / block_size.x,
(block_dims.y + block_size.y - 1) / block_size.y,
(block_dims.z + block_size.z - 1) / block_size.z
};
ncc_kernel<<<grid_size, block_size, 0, stream>>>(
fixed,
moving,
df,
fixed.size(),
moving.size(),
block_offset,
block_dims,
fixed.origin(),
fixed.spacing(),
moving.origin(),
inv_moving_spacing,
delta,
unary_cost
);
}
__global__ void regularizer_kernel(
cuda::VolumePtr<float4> df,
dim3 dims,
int3 block_offset,
int3 block_dims,
float3 delta,
float weight,
cuda::VolumePtr<float4> cost_x,
cuda::VolumePtr<float4> cost_y,
cuda::VolumePtr<float4> cost_z
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= block_dims.x ||
y >= block_dims.y ||
z >= block_dims.z)
{
return;
}
int gx = x + block_offset.x;
int gy = y + block_offset.y;
int gz = z + block_offset.z;
if (gx >= dims.x ||
gy >= dims.y ||
gz >= dims.z)
{
return;
}
// Assume spacing is 1,1,1
// Cost ordered as f_same, f01, f10, f_same
float3 d = {df(gx,gy,gz).x, df(gx,gy,gz).y, df(gx,gy,gz).z};
{
float4 o_x = {0, 0, 0, 0};
float4 o_y = {0, 0, 0, 0};
float4 o_z = {0, 0, 0, 0};
float3 dx = {df(gx+1,gy,gz).x, df(gx+1,gy,gz).y, df(gx+1,gy,gz).z};
float3 dy = {df(gx,gy+1,gz).x, df(gx,gy+1,gz).y, df(gx,gy+1,gz).z};
float3 dz = {df(gx,gy,gz+1).x, df(gx,gy,gz+1).y, df(gx,gy,gz+1).z};
if (x + 1 < dims.x) {
float3 diff_00 = d - dx;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = d - (dx+delta);
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
float3 diff_10 = (d+delta) - dx;
float dist2_10 = diff_10.x*diff_10.x + diff_10.y*diff_10.y + diff_10.z*diff_10.z;
o_x.x = dist2_00;
o_x.y = dist2_01;
o_x.z = dist2_10;
}
if (y + 1 < dims.y) {
float3 diff_00 = d - dy;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = d - (dy+delta);
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
float3 diff_10 = (d+delta) - dy;
float dist2_10 = diff_10.x*diff_10.x + diff_10.y*diff_10.y + diff_10.z*diff_10.z;
o_y.x = dist2_00;
o_y.y = dist2_01;
o_y.z = dist2_10;
}
if (z + 1 < dims.z) {
float3 diff_00 = d - dz;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = d - (dz+delta);
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
float3 diff_10 = (d+delta) - dz;
float dist2_10 = diff_10.x*diff_10.x + diff_10.y*diff_10.y + diff_10.z*diff_10.z;
o_z.x = dist2_00;
o_z.y = dist2_01;
o_z.z = dist2_10;
}
cost_x(gx,gy,gz) = weight*o_x;
cost_y(gx,gy,gz) = weight*o_y;
cost_z(gx,gy,gz) = weight*o_z;
}
// TODO:
// Compute cost at block border
if (x == 0 && gx != 0) {
float3 dx = {df(gx-1,gy,gz).x, df(gx-1,gy,gz).y, df(gx-1,gy,gz).z};
float3 diff_00 = d - dx;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = (d+delta) - dx;
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
cost_x(gx-1,gy,gz).x = weight*dist2_00;
cost_x(gx-1,gy,gz).y = weight*dist2_01;
cost_x(gx-1,gy,gz).z = weight*dist2_00; // border nodes can't move
}
if (y == 0 && gy != 0) {
float3 dy = {df(gx,gy-1,gz).x, df(gx,gy-1,gz).y, df(gx,gy-1,gz).z};
float3 diff_00 = d - dy;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = (d+delta) - dy;
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
cost_y(gx,gy-1,gz).x = weight*dist2_00;
cost_y(gx,gy-1,gz).y = weight*dist2_01;
cost_y(gx,gy-1,gz).z = weight*dist2_00; // border nodes can't move
}
if (z == 0 && gz != 0) {
float3 dz = {df(gx,gy,gz-1).x, df(gx,gy,gz-1).y, df(gx,gy,gz-1).z};
float3 diff_00 = d - dz;
float dist2_00 = diff_00.x*diff_00.x + diff_00.y*diff_00.y + diff_00.z*diff_00.z;
float3 diff_01 = (d+delta) - dz;
float dist2_01 = diff_01.x*diff_01.x + diff_01.y*diff_01.y + diff_01.z*diff_01.z;
cost_z(gx,gy,gz-1).x = weight*dist2_00;
cost_z(gx,gy,gz-1).y = weight*dist2_01;
cost_z(gx,gy,gz-1).z = weight*dist2_00; // border nodes can't move
}
}
void gpu_compute_binary_cost(
const stk::GpuVolume& df,
const int3& block_offset,
const int3& block_dims,
const float3& delta,
float weight,
stk::GpuVolume& cost_x, // float4
stk::GpuVolume& cost_y, // float4
stk::GpuVolume& cost_z, // float4
cuda::Stream& stream
)
{
dim3 dims = df.size();
dim3 block_size {16,16,1};
dim3 grid_size {
(dims.x + block_size.x - 1) / block_size.x,
(dims.y + block_size.y - 1) / block_size.y,
(dims.z + block_size.z - 1) / block_size.z
};
regularizer_kernel<<<grid_size, block_size, 0, stream>>>(
df,
dims,
block_offset,
block_dims,
delta,
weight,
cost_x,
cost_y,
cost_z
);
CUDA_CHECK_ERRORS(cudaPeekAtLastError());
}
__global__ void apply_displacement_delta(
cuda::VolumePtr<float4> df,
cuda::VolumePtr<uint8_t> labels,
dim3 dims,
float4 delta
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= dims.x ||
y >= dims.y ||
z >= dims.z)
{
return;
}
df(x,y,z) = df(x,y,z) + delta * labels(x,y,z);
}
void gpu_apply_displacement_delta(
stk::GpuVolume& df,
const stk::GpuVolume& labels,
const float3& delta,
cuda::Stream& stream
)
{
dim3 dims = df.size();
dim3 block_size {16,16,1};
dim3 grid_size {
(dims.x + block_size.x - 1) / block_size.x,
(dims.y + block_size.y - 1) / block_size.y,
(dims.z + block_size.z - 1) / block_size.z
};
apply_displacement_delta<<<grid_size, block_size, 0, stream>>>(
df,
labels,
dims,
float4{delta.x, delta.y, delta.z, 0.0f}
);
CUDA_CHECK_ERRORS(cudaPeekAtLastError());
}
|
46423123c28f2f284bf8b31543adb8fb209c65ad.hip
|
// !!! This is a file automatically generated by hipify!!!
// Approximation of Pi using a simple, and not optimized, CUDA program
// Copyleft Alessandro Re
// From https://gist.github.com/akiross/17e722c5bea92bd2c310324eac643df6
//
// GCC 6.x not supported by CUDA 8, I used compat version
//
// nvcc -std=c++11 -ccbin=gcc5 pigreco.cu -c
// g++5 pigreco.o -lcudart -L/usr/local/cuda/lib64 -o pigreco
//
// This code is basically equivalent to the following Python code:
//
// def pigreco(NUM):
// from random import random as rand
// def sqrad():
// x, y = rand(), rand()
// return x*x + y*y
// return 4 * sum(1 - int(test()) for _ in range(NUM)) / NUM
//
// Python version takes, on this machine, 3.5 seconds to compute 10M tests
// CUDA version takes, on this machine, 1.6 seconds to compute 20.48G tests
//
#include <stdio.h>
#include <iostream>
#include <limits>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
using std::cout;
using std::endl;
typedef unsigned long long Count;
typedef std::numeric_limits<double> DblLim;
const Count WARP_SIZE = 32; // Warp size
const Count NBLOCKS = 1792; // Number of total cuda cores on my GPU
// 5120 for v100; 1792 for Quadro P4000
const Count ITERATIONS = 1000000; // Number of points to generate (each thread)
// This kernel is
__global__ void picount(Count *totals) {
// Define some shared memory: all threads in this block
__shared__ Count counter[WARP_SIZE];
// Unique ID of the thread
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize RNG
hiprandState_t rng;
hiprand_init(clock64(), tid, 0, &rng);
// Initialize the counter
counter[threadIdx.x] = 0;
// Computation loop
for (int i = 0; i < ITERATIONS; i++) {
float x = hiprand_uniform(&rng); // Random x position in [0,1]
float y = hiprand_uniform(&rng); // Random y position in [0,1]
counter[threadIdx.x] += 1 - int(x * x + y * y); // Hit test - I think this is clever- CA
}
// The first thread in *every block* should sum the results
if (threadIdx.x == 0) {
// Reset count for this block
totals[blockIdx.x] = 0;
// Accumulate results
for (int i = 0; i < WARP_SIZE; i++) {
totals[blockIdx.x] += counter[i];
}
}
}
int main(int argc, char **argv) {
struct timespec start , stop ; // variables for timing
double accum ; // elapsed time variable
double pi25DT=3.141592653589793238462643;
double estimate;
int numDev;
hipGetDeviceCount(&numDev);
if (numDev < 1) {
cout << "CUDA device missing! Do you need to use optirun?\n";
return 1;
}
cout << "Starting simulation with " << NBLOCKS << " blocks, " << WARP_SIZE << " threads, and " << ITERATIONS << " iterations\n";
// Allocate host and device memory to store the counters
Count *hOut, *dOut;
hOut = new Count[NBLOCKS]; // Host memory
hipMalloc(&dOut, sizeof(Count) * NBLOCKS); // Device memory
clock_gettime ( CLOCK_REALTIME ,&start ); // timer start
// Launch kernel
hipLaunchKernelGGL(( picount), dim3(NBLOCKS), dim3(WARP_SIZE), 0, 0, dOut);
hipDeviceSynchronize(); // Need matrices to be defined before continue
clock_gettime ( CLOCK_REALTIME ,&stop ); // timer stop
// Copy back memory used on device and free
hipMemcpy(hOut, dOut, sizeof(Count) * NBLOCKS, hipMemcpyDeviceToHost);
hipFree(dOut);
// Compute total hits
Count total = 0;
for (int i = 0; i < NBLOCKS; i++) {
total += hOut[i];
}
Count tests = NBLOCKS * ITERATIONS * WARP_SIZE;
cout << "Approximated PI using " << tests << " random tests\n";
// Set maximum precision for decimal printing
cout.precision(DblLim::digits10); // Original code failed with max_digits10
estimate = 4.0 * (double)total/(double)tests;
cout << "PI ~= " << estimate << endl;
printf("Pi error is %.16f \n", pi25DT-estimate);
accum =( stop.tv_sec - start.tv_sec )+ // elapsed time create A
( stop.tv_nsec - start.tv_nsec )*1.e-9 ;
printf ("Monte pi took : %lf sec .\n",accum ); // print el. time
return 0;
}
|
46423123c28f2f284bf8b31543adb8fb209c65ad.cu
|
// Approximation of Pi using a simple, and not optimized, CUDA program
// Copyleft Alessandro Re
// From https://gist.github.com/akiross/17e722c5bea92bd2c310324eac643df6
//
// GCC 6.x not supported by CUDA 8, I used compat version
//
// nvcc -std=c++11 -ccbin=gcc5 pigreco.cu -c
// g++5 pigreco.o -lcudart -L/usr/local/cuda/lib64 -o pigreco
//
// This code is basically equivalent to the following Python code:
//
// def pigreco(NUM):
// from random import random as rand
// def sqrad():
// x, y = rand(), rand()
// return x*x + y*y
// return 4 * sum(1 - int(test()) for _ in range(NUM)) / NUM
//
// Python version takes, on this machine, 3.5 seconds to compute 10M tests
// CUDA version takes, on this machine, 1.6 seconds to compute 20.48G tests
//
#include <stdio.h>
#include <iostream>
#include <limits>
#include <cuda.h>
#include <curand_kernel.h>
using std::cout;
using std::endl;
typedef unsigned long long Count;
typedef std::numeric_limits<double> DblLim;
const Count WARP_SIZE = 32; // Warp size
const Count NBLOCKS = 1792; // Number of total cuda cores on my GPU
// 5120 for v100; 1792 for Quadro P4000
const Count ITERATIONS = 1000000; // Number of points to generate (each thread)
// This kernel is
__global__ void picount(Count *totals) {
// Define some shared memory: all threads in this block
__shared__ Count counter[WARP_SIZE];
// Unique ID of the thread
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize RNG
curandState_t rng;
curand_init(clock64(), tid, 0, &rng);
// Initialize the counter
counter[threadIdx.x] = 0;
// Computation loop
for (int i = 0; i < ITERATIONS; i++) {
float x = curand_uniform(&rng); // Random x position in [0,1]
float y = curand_uniform(&rng); // Random y position in [0,1]
counter[threadIdx.x] += 1 - int(x * x + y * y); // Hit test - I think this is clever- CA
}
// The first thread in *every block* should sum the results
if (threadIdx.x == 0) {
// Reset count for this block
totals[blockIdx.x] = 0;
// Accumulate results
for (int i = 0; i < WARP_SIZE; i++) {
totals[blockIdx.x] += counter[i];
}
}
}
int main(int argc, char **argv) {
struct timespec start , stop ; // variables for timing
double accum ; // elapsed time variable
double pi25DT=3.141592653589793238462643;
double estimate;
int numDev;
cudaGetDeviceCount(&numDev);
if (numDev < 1) {
cout << "CUDA device missing! Do you need to use optirun?\n";
return 1;
}
cout << "Starting simulation with " << NBLOCKS << " blocks, " << WARP_SIZE << " threads, and " << ITERATIONS << " iterations\n";
// Allocate host and device memory to store the counters
Count *hOut, *dOut;
hOut = new Count[NBLOCKS]; // Host memory
cudaMalloc(&dOut, sizeof(Count) * NBLOCKS); // Device memory
clock_gettime ( CLOCK_REALTIME ,&start ); // timer start
// Launch kernel
picount<<<NBLOCKS, WARP_SIZE>>>(dOut);
cudaDeviceSynchronize(); // Need matrices to be defined before continue
clock_gettime ( CLOCK_REALTIME ,&stop ); // timer stop
// Copy back memory used on device and free
cudaMemcpy(hOut, dOut, sizeof(Count) * NBLOCKS, cudaMemcpyDeviceToHost);
cudaFree(dOut);
// Compute total hits
Count total = 0;
for (int i = 0; i < NBLOCKS; i++) {
total += hOut[i];
}
Count tests = NBLOCKS * ITERATIONS * WARP_SIZE;
cout << "Approximated PI using " << tests << " random tests\n";
// Set maximum precision for decimal printing
cout.precision(DblLim::digits10); // Original code failed with max_digits10
estimate = 4.0 * (double)total/(double)tests;
cout << "PI ~= " << estimate << endl;
printf("Pi error is %.16f \n", pi25DT-estimate);
accum =( stop.tv_sec - start.tv_sec )+ // elapsed time create A
( stop.tv_nsec - start.tv_nsec )*1.e-9 ;
printf ("Monte pi took : %lf sec .\n",accum ); // print el. time
return 0;
}
|
f61b4dccae56bdcc8f3f02ff0240c798b074878b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=2048 --gridDim=64
#if __CUDA_ARCH__ < 300
#error Unexpected __CUDA_ARCH__
#endif
__global__ void foo() {
}
|
f61b4dccae56bdcc8f3f02ff0240c798b074878b.cu
|
//pass
//--blockDim=2048 --gridDim=64
#if __CUDA_ARCH__ < 300
#error Unexpected __CUDA_ARCH__
#endif
__global__ void foo() {
}
|
6e9efee053b4fbafce937f3715f49569fd68be34.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
/*
Function: pillar pooling
Args:
b : batch size
d : depth of the feature map
h : height of pooled feature map
w : width of pooled feature map
n : number of input points
c : number of channels
n_intervals : number of unique points
x : input features, FloatTensor[n, c]
geom_feats : input coordinates, IntTensor[n, 4]
interval_lengths : starting position for pooled point, IntTensor[n_intervals]
interval_starts : how many points in each pooled point, IntTensor[n_intervals]
out : output features, FloatTensor[b, d, h, w, c]
*/
__global__ void bev_pool_kernel(int b, int d, int h, int w, int n, int c, int n_intervals,
const float *__restrict__ x,
const int *__restrict__ geom_feats,
const int *__restrict__ interval_starts,
const int *__restrict__ interval_lengths,
float* __restrict__ out) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index = idx / c;
int cur_c = idx % c;
if (index >= n_intervals) return;
int interval_start = interval_starts[index];
int interval_length = interval_lengths[index];
const int* cur_geom_feats = geom_feats + interval_start * 4;
const float* cur_x = x + interval_start * c + cur_c;
float* cur_out = out + cur_geom_feats[3] * d * h * w * c +
cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c +
cur_geom_feats[1] * c + cur_c;
float psum = 0;
for(int i = 0; i < interval_length; i++){
psum += cur_x[i * c];
}
*cur_out = psum;
}
/*
Function: pillar pooling backward
Args:
b : batch size
d : depth of the feature map
h : height of pooled feature map
w : width of pooled feature map
n : number of input points
c : number of channels
n_intervals : number of unique points
out_grad : gradient of the BEV fmap from top, FloatTensor[b, d, h, w, c]
geom_feats : input coordinates, IntTensor[n, 4]
interval_lengths : starting position for pooled point, IntTensor[n_intervals]
interval_starts : how many points in each pooled point, IntTensor[n_intervals]
x_grad : gradient of the image fmap, FloatTensor
*/
__global__ void bev_pool_grad_kernel(int b, int d, int h, int w, int n, int c, int n_intervals,
const float *__restrict__ out_grad,
const int *__restrict__ geom_feats,
const int *__restrict__ interval_starts,
const int *__restrict__ interval_lengths,
float* __restrict__ x_grad) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index = idx / c;
int cur_c = idx % c;
if (index >= n_intervals) return;
int interval_start = interval_starts[index];
int interval_length = interval_lengths[index];
const int* cur_geom_feats = geom_feats + interval_start * 4;
float* cur_x_grad = x_grad + interval_start * c + cur_c;
const float* cur_out_grad = out_grad + cur_geom_feats[3] * d * h * w * c +
cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c +
cur_geom_feats[1] * c + cur_c;
for(int i = 0; i < interval_length; i++){
cur_x_grad[i * c] = *cur_out_grad;
}
}
void bev_pool(int b, int d, int h, int w, int n, int c, int n_intervals, const float* x,
const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* out) {
hipLaunchKernelGGL(( bev_pool_kernel), dim3((int)ceil(((double)n_intervals * c / 256))), dim3(256), 0, 0,
b, d, h, w, n, c, n_intervals, x, geom_feats, interval_starts, interval_lengths, out
);
}
void bev_pool_grad(int b, int d, int h, int w, int n, int c, int n_intervals, const float* out_grad,
const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* x_grad) {
hipLaunchKernelGGL(( bev_pool_grad_kernel), dim3((int)ceil(((double)n_intervals * c / 256))), dim3(256), 0, 0,
b, d, h, w, n, c, n_intervals, out_grad, geom_feats, interval_starts, interval_lengths, x_grad
);
}
|
6e9efee053b4fbafce937f3715f49569fd68be34.cu
|
#include <stdio.h>
#include <stdlib.h>
/*
Function: pillar pooling
Args:
b : batch size
d : depth of the feature map
h : height of pooled feature map
w : width of pooled feature map
n : number of input points
c : number of channels
n_intervals : number of unique points
x : input features, FloatTensor[n, c]
geom_feats : input coordinates, IntTensor[n, 4]
interval_lengths : starting position for pooled point, IntTensor[n_intervals]
interval_starts : how many points in each pooled point, IntTensor[n_intervals]
out : output features, FloatTensor[b, d, h, w, c]
*/
__global__ void bev_pool_kernel(int b, int d, int h, int w, int n, int c, int n_intervals,
const float *__restrict__ x,
const int *__restrict__ geom_feats,
const int *__restrict__ interval_starts,
const int *__restrict__ interval_lengths,
float* __restrict__ out) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index = idx / c;
int cur_c = idx % c;
if (index >= n_intervals) return;
int interval_start = interval_starts[index];
int interval_length = interval_lengths[index];
const int* cur_geom_feats = geom_feats + interval_start * 4;
const float* cur_x = x + interval_start * c + cur_c;
float* cur_out = out + cur_geom_feats[3] * d * h * w * c +
cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c +
cur_geom_feats[1] * c + cur_c;
float psum = 0;
for(int i = 0; i < interval_length; i++){
psum += cur_x[i * c];
}
*cur_out = psum;
}
/*
Function: pillar pooling backward
Args:
b : batch size
d : depth of the feature map
h : height of pooled feature map
w : width of pooled feature map
n : number of input points
c : number of channels
n_intervals : number of unique points
out_grad : gradient of the BEV fmap from top, FloatTensor[b, d, h, w, c]
geom_feats : input coordinates, IntTensor[n, 4]
interval_lengths : starting position for pooled point, IntTensor[n_intervals]
interval_starts : how many points in each pooled point, IntTensor[n_intervals]
x_grad : gradient of the image fmap, FloatTensor
*/
__global__ void bev_pool_grad_kernel(int b, int d, int h, int w, int n, int c, int n_intervals,
const float *__restrict__ out_grad,
const int *__restrict__ geom_feats,
const int *__restrict__ interval_starts,
const int *__restrict__ interval_lengths,
float* __restrict__ x_grad) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index = idx / c;
int cur_c = idx % c;
if (index >= n_intervals) return;
int interval_start = interval_starts[index];
int interval_length = interval_lengths[index];
const int* cur_geom_feats = geom_feats + interval_start * 4;
float* cur_x_grad = x_grad + interval_start * c + cur_c;
const float* cur_out_grad = out_grad + cur_geom_feats[3] * d * h * w * c +
cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c +
cur_geom_feats[1] * c + cur_c;
for(int i = 0; i < interval_length; i++){
cur_x_grad[i * c] = *cur_out_grad;
}
}
void bev_pool(int b, int d, int h, int w, int n, int c, int n_intervals, const float* x,
const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* out) {
bev_pool_kernel<<<(int)ceil(((double)n_intervals * c / 256)), 256>>>(
b, d, h, w, n, c, n_intervals, x, geom_feats, interval_starts, interval_lengths, out
);
}
void bev_pool_grad(int b, int d, int h, int w, int n, int c, int n_intervals, const float* out_grad,
const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* x_grad) {
bev_pool_grad_kernel<<<(int)ceil(((double)n_intervals * c / 256)), 256>>>(
b, d, h, w, n, c, n_intervals, out_grad, geom_feats, interval_starts, interval_lengths, x_grad
);
}
|
aea8227b62afd77a0a83b788e7425451bf6e9ece.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <iostream>
#ifdef __cplusplus
extern "C" {
#endif
#include <float.h>
#include <stdio.h>
#include "highway_lstm_kernel.h"
#define BLOCK 256
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
// Device functions
__forceinline__ __device__ float sigmoidf(float in) {
return 1.f / (1.f + expf(-in));
}
__forceinline__ __device__ float dsigmoidf(float in) {
float s = sigmoidf(in);
return s * (1.f - s);
}
__forceinline__ __device__ float tanh2f(float in) {
float t = tanhf(in);
return t*t;
}
__global__ void elementWise_bp(int hiddenSize, int miniBatch, int numCovered,
// Inputs
float *out_grad,
float *c_out_grad_ext,
float *h_out_grad,
float *c_out_grad,
float *c_in,
float *c_out,
float *h_out,
float *gates_out,
float *dropout_in,
// Outputs
float *c_in_grad,
float *i_gates_grad,
float *h_gates_grad,
int training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numCovered * hiddenSize) return;
int batch = index / hiddenSize;
int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize;
int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize;
float d_h = out_grad[index] + h_out_grad[index];
d_h = d_h * dropout_in[index];
float in_gate = gates_out[i_gateIndex];
float forget_gate = gates_out[i_gateIndex + 1 * hiddenSize];
float act_gate = gates_out[i_gateIndex + 2 * hiddenSize];
float out_gate = gates_out[i_gateIndex + 3 * hiddenSize];
float r_gate = gates_out[i_gateIndex + 4 * hiddenSize];
float lin_gate = gates_out[i_gateIndex + 5 * hiddenSize];
float d_out = d_h * r_gate;
float d_c = d_out * out_gate * (1.f - tanh2f(c_out[index])) + c_out_grad[index] + c_out_grad_ext[index];
float h_prime = out_gate * tanhf(c_out[index]);
float d_in_gate = d_c * act_gate * in_gate * (1.f - in_gate);
float d_forget_gate = d_c * c_in[index] * forget_gate * (1.f - forget_gate);
float d_act_gate = d_c * in_gate * (1.f - act_gate * act_gate);
float d_out_gate = d_out * tanhf(c_out[index]) * out_gate * (1.f - out_gate);
float d_r_gate = d_h * (h_prime - lin_gate) * r_gate * (1.f - r_gate);
float d_lin_gate = d_h * (1 - r_gate);
i_gates_grad[i_gateIndex] = d_in_gate;
i_gates_grad[i_gateIndex + 1 * hiddenSize] = d_forget_gate;
i_gates_grad[i_gateIndex + 2 * hiddenSize] = d_act_gate;
i_gates_grad[i_gateIndex + 3 * hiddenSize] = d_out_gate;
i_gates_grad[i_gateIndex + 4 * hiddenSize] = d_r_gate;
i_gates_grad[i_gateIndex + 5 * hiddenSize] = d_lin_gate;
h_gates_grad[h_gateIndex] = d_in_gate;
h_gates_grad[h_gateIndex + 1 * hiddenSize] = d_forget_gate;
h_gates_grad[h_gateIndex + 2 * hiddenSize] = d_act_gate;
h_gates_grad[h_gateIndex + 3 * hiddenSize] = d_out_gate;
h_gates_grad[h_gateIndex + 4 * hiddenSize] = d_r_gate;
c_in_grad[index] = forget_gate * d_c;
}
// Fused forward kernel
__global__ void elementWise_fp(int hiddenSize, int miniBatch, int numCovered,
float *tmp_h,
float *tmp_i,
float *bias,
float *linearGates,
float *h_out,
float *dropout_in,
float *c_in,
float *c_out,
int training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numCovered * hiddenSize) return;
int batch = index / hiddenSize;
int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize;
int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize;
float g[6];
for (int i = 0; i < 5; i++) {
g[i] = tmp_i[i * hiddenSize + i_gateIndex] + tmp_h[i * hiddenSize + h_gateIndex];
g[i] += bias[i * hiddenSize + index % hiddenSize];
}
// extra for highway
g[5] = tmp_i[5 * hiddenSize + i_gateIndex];
float in_gate = sigmoidf(g[0]);
float forget_gate = sigmoidf(g[1]);
float act_gate = tanhf(g[2]);
float out_gate = sigmoidf(g[3]);
float r_gate = sigmoidf(g[4]);
float lin_gate = g[5];
if (training == 1) {
linearGates[i_gateIndex] = in_gate;
linearGates[i_gateIndex + 1 * hiddenSize] = forget_gate;
linearGates[i_gateIndex + 2 * hiddenSize] = act_gate;
linearGates[i_gateIndex + 3 * hiddenSize] = out_gate;
linearGates[i_gateIndex + 4 * hiddenSize] = r_gate;
linearGates[i_gateIndex + 5 * hiddenSize] = lin_gate;
}
float val = (forget_gate * c_in[index]) + (in_gate * act_gate);
c_out[index] = val;
val = out_gate * tanhf(val);
val = val * r_gate + (1. - r_gate) * lin_gate;
val = val * dropout_in[index];
h_out[index] = val;
}
void highway_lstm_backward_ongpu(int inputSize, int hiddenSize, int miniBatch,
int numLayers, int seqLength, float *out_grad, int *lengths,
float *h_data_grad, float * c_data_grad, float *x, float *h_data,
float *c_data, float *T,
float *gates_out, float *dropout_in, float *h_gates_grad,
float *i_gates_grad, float *h_out_grad, float *c_out_grad, float *x_grad, float *T_grad, float *bias_grad,
int isTraining, int do_weight_grad, hipStream_t stream, hipblasHandle_t handle) {
const int numElements = hiddenSize * miniBatch;
hipStream_t stream_i;
hipStream_t stream_h;
hipStream_t stream_wi;
hipStream_t stream_wh;
hipStream_t stream_wb;
cudaErrCheck(hipStreamCreate(&stream_i));
cudaErrCheck(hipStreamCreate(&stream_h));
cudaErrCheck(hipStreamCreate(&stream_wi));
cudaErrCheck(hipStreamCreate(&stream_wh));
cudaErrCheck(hipStreamCreate(&stream_wb));
float one = 1.f;
float zero = 0.f;
float *ones_host = new float[miniBatch];
for (int i=0; i < miniBatch; i++) {
ones_host[i] = 1.f;
}
float *ones;
cudaErrCheck(hipMalloc((void**)&ones, miniBatch * sizeof(float)));
cudaErrCheck(hipMemcpy(ones, ones_host, miniBatch * sizeof(float), hipMemcpyHostToDevice));
for (int layer = numLayers-1; layer >= 0; layer--) {
int direction;
int startInd;
int currNumCovered;
if (layer % 2 == 0) {
// forward direction
direction = -1;
startInd = seqLength-1;
currNumCovered = 0;
} else {
// backward direction
direction = 1;
startInd = 0;
currNumCovered = miniBatch;
}
for (int t = startInd; t < seqLength && t >= 0; t = t + direction) {
int prevIndex;
int prevGradIndex;
if (direction == 1) {
while (lengths[currNumCovered-1] <= t) {
currNumCovered--;
}
prevGradIndex = t;
prevIndex = (t+2)%(seqLength+1);
} else {
while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) {
currNumCovered++;
}
prevGradIndex = (t+2)%(seqLength+1);
prevIndex = t;
}
float * gradPtr;
if (layer == numLayers-1) {
gradPtr = out_grad + t * numElements;
} else {
gradPtr = h_out_grad + t * numElements + layer * seqLength * numElements;
}
float * cGradPtr = c_out_grad + t * numElements + layer * seqLength * numElements;
cublasErrCheck(hipblasSetStream(handle, stream_i));
dim3 blockDim;
dim3 gridDim;
blockDim.x = BLOCK;
gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x;
hipLaunchKernelGGL(( elementWise_bp) , dim3(gridDim), dim3(blockDim) , 0, stream,
hiddenSize, miniBatch, currNumCovered,
gradPtr,
cGradPtr,
h_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements,
c_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + (t+1) * numElements + layer * (seqLength + 1) * numElements,
h_data + (t+1) * numElements + layer * (seqLength + 1) * numElements,
gates_out + t * 6 * numElements + layer * seqLength * 6 * numElements,
dropout_in + layer * numElements,
c_data_grad + (t+1) * numElements + layer * (seqLength + 1) * numElements,
i_gates_grad,
h_gates_grad,
isTraining);
cudaErrCheck(hipGetLastError());
// END
cudaErrCheck(hipDeviceSynchronize());
float *out_grad_ptr;
int weightStart;
int inSize;
if (layer == 0) {
inSize = inputSize;
out_grad_ptr = x_grad + t * inputSize * miniBatch;
weightStart = 0;
} else {
inSize = hiddenSize;
out_grad_ptr = h_out_grad + t * numElements + (layer-1) * seqLength * numElements;
weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize;
}
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
inSize, currNumCovered, 6*hiddenSize,
&one,
&T[weightStart],
6 * hiddenSize,
i_gates_grad,
6 * hiddenSize,
&zero,
out_grad_ptr,
inSize));
cublasErrCheck(hipblasSetStream(handle, stream_h));
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
hiddenSize, currNumCovered, 5*hiddenSize,
&one,
&T[weightStart + 6*hiddenSize*inSize],
5 * hiddenSize,
h_gates_grad,
5 * hiddenSize,
&zero,
h_data_grad + (t+1) * numElements + layer * (seqLength+1) * numElements,
hiddenSize));
if (do_weight_grad == 1) {
float *inputPtr;
if (layer == 0) {
inputPtr = x + t * inputSize * miniBatch;
} else {
inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements;
}
cublasErrCheck(hipblasSetStream(handle, stream_wi));
// Update i_weights
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
6 * hiddenSize, inSize, currNumCovered,
&one,
i_gates_grad,
6 * hiddenSize,
inputPtr,
inSize,
&one,
&T_grad[weightStart],
6 * hiddenSize));
cublasErrCheck(hipblasSetStream(handle, stream_wh));
// Update h_weights
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
5 * hiddenSize, hiddenSize, currNumCovered,
&one,
h_gates_grad,
5 * hiddenSize,
h_data + prevIndex * numElements + layer * (seqLength+1) * numElements,
hiddenSize,
&one,
&T_grad[weightStart + 6 *hiddenSize*inSize],
5 * hiddenSize));
cublasErrCheck(hipblasSetStream(handle, stream_wb));
// Update bias_weights
cublasErrCheck(hipblasSgemv(handle,
HIPBLAS_OP_N,
5 * hiddenSize, currNumCovered,
&one,
h_gates_grad,
5 * hiddenSize,
ones,
1,
&one,
&bias_grad[layer * 5 * hiddenSize],
1));
}
cudaErrCheck(hipDeviceSynchronize());
}
}
cublasErrCheck(hipblasSetStream(handle, stream));
cudaErrCheck(hipStreamDestroy(stream_i));
cudaErrCheck(hipStreamDestroy(stream_h));
cudaErrCheck(hipStreamDestroy(stream_wi));
cudaErrCheck(hipStreamDestroy(stream_wh));
cudaErrCheck(hipStreamDestroy(stream_wb));
free(ones_host);
cudaErrCheck(hipDeviceSynchronize());
}
void highway_lstm_forward_ongpu(int inputSize, int hiddenSize, int miniBatch,
int numLayers, int seqLength, float *x, int *lengths, float *h_data,
float *c_data, float *tmp_i, float *tmp_h, float *T, float *bias,
float *dropout, float *gates, int is_training, hipStream_t stream, hipblasHandle_t handle) {
const int numElements = hiddenSize * miniBatch;
float zero = 0.f;
float one = 1.f;
hipStream_t stream_i;
hipStream_t stream_h;
cudaErrCheck(hipStreamCreate(&stream_i));
cudaErrCheck(hipStreamCreate(&stream_h));
for (int layer = 0; layer < numLayers; layer++) {
int direction;
int startInd;
int currNumCovered;
if (layer % 2 == 0) {
// forward direction
direction = 1;
startInd = 0;
currNumCovered = miniBatch;
} else {
// backward direction
direction = -1;
startInd = seqLength-1;
currNumCovered = 0;
}
cublasErrCheck(hipblasSetStream(handle, stream));
for (int t = startInd; t < seqLength && t >= 0; t = t + direction) {
int prevIndex;
if (direction == 1) {
while (lengths[currNumCovered-1] <= t) {
currNumCovered--;
}
prevIndex = t;
} else {
while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) {
currNumCovered++;
}
prevIndex = (t+2)%(seqLength+1);
}
int inSize;
int weightStart;
float *inputPtr;
if (layer == 0) {
inSize = inputSize;
weightStart = 0;
inputPtr = x + t * inputSize * miniBatch;
prevIndex = t;
} else {
inSize = hiddenSize;
weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize;
inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements;
}
cublasErrCheck(hipblasSetStream(handle, stream_i));
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
6*hiddenSize, currNumCovered, inSize,
&one,
&T[weightStart],
6 * hiddenSize,
inputPtr,
inSize,
&zero,
tmp_i,
6 * hiddenSize));
cublasErrCheck(hipblasSetStream(handle, stream_h));
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
5*hiddenSize, currNumCovered, hiddenSize,
&one,
&T[6 * hiddenSize * inSize + weightStart],
5 * hiddenSize,
h_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
hiddenSize,
&zero,
tmp_h,
5 * hiddenSize));
cudaErrCheck(hipDeviceSynchronize());
dim3 blockDim;
dim3 gridDim;
blockDim.x = BLOCK;
gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x;
hipLaunchKernelGGL(( elementWise_fp) , dim3(gridDim), dim3(blockDim) , 0, stream,
hiddenSize, miniBatch, currNumCovered,
tmp_h,
tmp_i,
bias + 5 * layer * hiddenSize,
is_training ? gates + 6 * (t * numElements + layer * seqLength * numElements) : NULL,
h_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements,
dropout + layer * numElements,
c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements,
is_training);
cudaErrCheck(hipGetLastError());
cudaErrCheck(hipDeviceSynchronize());
}
}
cublasErrCheck(hipblasSetStream(handle, stream));
cudaErrCheck(hipStreamDestroy(stream_i));
cudaErrCheck(hipStreamDestroy(stream_h));
cudaErrCheck(hipDeviceSynchronize());
}
#ifdef __cplusplus
}
#endif
|
aea8227b62afd77a0a83b788e7425451bf6e9ece.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <iostream>
#ifdef __cplusplus
extern "C" {
#endif
#include <float.h>
#include <stdio.h>
#include "highway_lstm_kernel.h"
#define BLOCK 256
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
// Device functions
__forceinline__ __device__ float sigmoidf(float in) {
return 1.f / (1.f + expf(-in));
}
__forceinline__ __device__ float dsigmoidf(float in) {
float s = sigmoidf(in);
return s * (1.f - s);
}
__forceinline__ __device__ float tanh2f(float in) {
float t = tanhf(in);
return t*t;
}
__global__ void elementWise_bp(int hiddenSize, int miniBatch, int numCovered,
// Inputs
float *out_grad,
float *c_out_grad_ext,
float *h_out_grad,
float *c_out_grad,
float *c_in,
float *c_out,
float *h_out,
float *gates_out,
float *dropout_in,
// Outputs
float *c_in_grad,
float *i_gates_grad,
float *h_gates_grad,
int training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numCovered * hiddenSize) return;
int batch = index / hiddenSize;
int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize;
int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize;
float d_h = out_grad[index] + h_out_grad[index];
d_h = d_h * dropout_in[index];
float in_gate = gates_out[i_gateIndex];
float forget_gate = gates_out[i_gateIndex + 1 * hiddenSize];
float act_gate = gates_out[i_gateIndex + 2 * hiddenSize];
float out_gate = gates_out[i_gateIndex + 3 * hiddenSize];
float r_gate = gates_out[i_gateIndex + 4 * hiddenSize];
float lin_gate = gates_out[i_gateIndex + 5 * hiddenSize];
float d_out = d_h * r_gate;
float d_c = d_out * out_gate * (1.f - tanh2f(c_out[index])) + c_out_grad[index] + c_out_grad_ext[index];
float h_prime = out_gate * tanhf(c_out[index]);
float d_in_gate = d_c * act_gate * in_gate * (1.f - in_gate);
float d_forget_gate = d_c * c_in[index] * forget_gate * (1.f - forget_gate);
float d_act_gate = d_c * in_gate * (1.f - act_gate * act_gate);
float d_out_gate = d_out * tanhf(c_out[index]) * out_gate * (1.f - out_gate);
float d_r_gate = d_h * (h_prime - lin_gate) * r_gate * (1.f - r_gate);
float d_lin_gate = d_h * (1 - r_gate);
i_gates_grad[i_gateIndex] = d_in_gate;
i_gates_grad[i_gateIndex + 1 * hiddenSize] = d_forget_gate;
i_gates_grad[i_gateIndex + 2 * hiddenSize] = d_act_gate;
i_gates_grad[i_gateIndex + 3 * hiddenSize] = d_out_gate;
i_gates_grad[i_gateIndex + 4 * hiddenSize] = d_r_gate;
i_gates_grad[i_gateIndex + 5 * hiddenSize] = d_lin_gate;
h_gates_grad[h_gateIndex] = d_in_gate;
h_gates_grad[h_gateIndex + 1 * hiddenSize] = d_forget_gate;
h_gates_grad[h_gateIndex + 2 * hiddenSize] = d_act_gate;
h_gates_grad[h_gateIndex + 3 * hiddenSize] = d_out_gate;
h_gates_grad[h_gateIndex + 4 * hiddenSize] = d_r_gate;
c_in_grad[index] = forget_gate * d_c;
}
// Fused forward kernel
__global__ void elementWise_fp(int hiddenSize, int miniBatch, int numCovered,
float *tmp_h,
float *tmp_i,
float *bias,
float *linearGates,
float *h_out,
float *dropout_in,
float *c_in,
float *c_out,
int training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numCovered * hiddenSize) return;
int batch = index / hiddenSize;
int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize;
int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize;
float g[6];
for (int i = 0; i < 5; i++) {
g[i] = tmp_i[i * hiddenSize + i_gateIndex] + tmp_h[i * hiddenSize + h_gateIndex];
g[i] += bias[i * hiddenSize + index % hiddenSize];
}
// extra for highway
g[5] = tmp_i[5 * hiddenSize + i_gateIndex];
float in_gate = sigmoidf(g[0]);
float forget_gate = sigmoidf(g[1]);
float act_gate = tanhf(g[2]);
float out_gate = sigmoidf(g[3]);
float r_gate = sigmoidf(g[4]);
float lin_gate = g[5];
if (training == 1) {
linearGates[i_gateIndex] = in_gate;
linearGates[i_gateIndex + 1 * hiddenSize] = forget_gate;
linearGates[i_gateIndex + 2 * hiddenSize] = act_gate;
linearGates[i_gateIndex + 3 * hiddenSize] = out_gate;
linearGates[i_gateIndex + 4 * hiddenSize] = r_gate;
linearGates[i_gateIndex + 5 * hiddenSize] = lin_gate;
}
float val = (forget_gate * c_in[index]) + (in_gate * act_gate);
c_out[index] = val;
val = out_gate * tanhf(val);
val = val * r_gate + (1. - r_gate) * lin_gate;
val = val * dropout_in[index];
h_out[index] = val;
}
void highway_lstm_backward_ongpu(int inputSize, int hiddenSize, int miniBatch,
int numLayers, int seqLength, float *out_grad, int *lengths,
float *h_data_grad, float * c_data_grad, float *x, float *h_data,
float *c_data, float *T,
float *gates_out, float *dropout_in, float *h_gates_grad,
float *i_gates_grad, float *h_out_grad, float *c_out_grad, float *x_grad, float *T_grad, float *bias_grad,
int isTraining, int do_weight_grad, cudaStream_t stream, cublasHandle_t handle) {
const int numElements = hiddenSize * miniBatch;
cudaStream_t stream_i;
cudaStream_t stream_h;
cudaStream_t stream_wi;
cudaStream_t stream_wh;
cudaStream_t stream_wb;
cudaErrCheck(cudaStreamCreate(&stream_i));
cudaErrCheck(cudaStreamCreate(&stream_h));
cudaErrCheck(cudaStreamCreate(&stream_wi));
cudaErrCheck(cudaStreamCreate(&stream_wh));
cudaErrCheck(cudaStreamCreate(&stream_wb));
float one = 1.f;
float zero = 0.f;
float *ones_host = new float[miniBatch];
for (int i=0; i < miniBatch; i++) {
ones_host[i] = 1.f;
}
float *ones;
cudaErrCheck(cudaMalloc((void**)&ones, miniBatch * sizeof(float)));
cudaErrCheck(cudaMemcpy(ones, ones_host, miniBatch * sizeof(float), cudaMemcpyHostToDevice));
for (int layer = numLayers-1; layer >= 0; layer--) {
int direction;
int startInd;
int currNumCovered;
if (layer % 2 == 0) {
// forward direction
direction = -1;
startInd = seqLength-1;
currNumCovered = 0;
} else {
// backward direction
direction = 1;
startInd = 0;
currNumCovered = miniBatch;
}
for (int t = startInd; t < seqLength && t >= 0; t = t + direction) {
int prevIndex;
int prevGradIndex;
if (direction == 1) {
while (lengths[currNumCovered-1] <= t) {
currNumCovered--;
}
prevGradIndex = t;
prevIndex = (t+2)%(seqLength+1);
} else {
while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) {
currNumCovered++;
}
prevGradIndex = (t+2)%(seqLength+1);
prevIndex = t;
}
float * gradPtr;
if (layer == numLayers-1) {
gradPtr = out_grad + t * numElements;
} else {
gradPtr = h_out_grad + t * numElements + layer * seqLength * numElements;
}
float * cGradPtr = c_out_grad + t * numElements + layer * seqLength * numElements;
cublasErrCheck(cublasSetStream(handle, stream_i));
dim3 blockDim;
dim3 gridDim;
blockDim.x = BLOCK;
gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x;
elementWise_bp <<< gridDim, blockDim , 0, stream>>>
(hiddenSize, miniBatch, currNumCovered,
gradPtr,
cGradPtr,
h_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements,
c_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + (t+1) * numElements + layer * (seqLength + 1) * numElements,
h_data + (t+1) * numElements + layer * (seqLength + 1) * numElements,
gates_out + t * 6 * numElements + layer * seqLength * 6 * numElements,
dropout_in + layer * numElements,
c_data_grad + (t+1) * numElements + layer * (seqLength + 1) * numElements,
i_gates_grad,
h_gates_grad,
isTraining);
cudaErrCheck(cudaGetLastError());
// END
cudaErrCheck(cudaDeviceSynchronize());
float *out_grad_ptr;
int weightStart;
int inSize;
if (layer == 0) {
inSize = inputSize;
out_grad_ptr = x_grad + t * inputSize * miniBatch;
weightStart = 0;
} else {
inSize = hiddenSize;
out_grad_ptr = h_out_grad + t * numElements + (layer-1) * seqLength * numElements;
weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize;
}
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
inSize, currNumCovered, 6*hiddenSize,
&one,
&T[weightStart],
6 * hiddenSize,
i_gates_grad,
6 * hiddenSize,
&zero,
out_grad_ptr,
inSize));
cublasErrCheck(cublasSetStream(handle, stream_h));
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
hiddenSize, currNumCovered, 5*hiddenSize,
&one,
&T[weightStart + 6*hiddenSize*inSize],
5 * hiddenSize,
h_gates_grad,
5 * hiddenSize,
&zero,
h_data_grad + (t+1) * numElements + layer * (seqLength+1) * numElements,
hiddenSize));
if (do_weight_grad == 1) {
float *inputPtr;
if (layer == 0) {
inputPtr = x + t * inputSize * miniBatch;
} else {
inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements;
}
cublasErrCheck(cublasSetStream(handle, stream_wi));
// Update i_weights
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_T,
6 * hiddenSize, inSize, currNumCovered,
&one,
i_gates_grad,
6 * hiddenSize,
inputPtr,
inSize,
&one,
&T_grad[weightStart],
6 * hiddenSize));
cublasErrCheck(cublasSetStream(handle, stream_wh));
// Update h_weights
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_T,
5 * hiddenSize, hiddenSize, currNumCovered,
&one,
h_gates_grad,
5 * hiddenSize,
h_data + prevIndex * numElements + layer * (seqLength+1) * numElements,
hiddenSize,
&one,
&T_grad[weightStart + 6 *hiddenSize*inSize],
5 * hiddenSize));
cublasErrCheck(cublasSetStream(handle, stream_wb));
// Update bias_weights
cublasErrCheck(cublasSgemv(handle,
CUBLAS_OP_N,
5 * hiddenSize, currNumCovered,
&one,
h_gates_grad,
5 * hiddenSize,
ones,
1,
&one,
&bias_grad[layer * 5 * hiddenSize],
1));
}
cudaErrCheck(cudaDeviceSynchronize());
}
}
cublasErrCheck(cublasSetStream(handle, stream));
cudaErrCheck(cudaStreamDestroy(stream_i));
cudaErrCheck(cudaStreamDestroy(stream_h));
cudaErrCheck(cudaStreamDestroy(stream_wi));
cudaErrCheck(cudaStreamDestroy(stream_wh));
cudaErrCheck(cudaStreamDestroy(stream_wb));
free(ones_host);
cudaErrCheck(cudaDeviceSynchronize());
}
void highway_lstm_forward_ongpu(int inputSize, int hiddenSize, int miniBatch,
int numLayers, int seqLength, float *x, int *lengths, float *h_data,
float *c_data, float *tmp_i, float *tmp_h, float *T, float *bias,
float *dropout, float *gates, int is_training, cudaStream_t stream, cublasHandle_t handle) {
const int numElements = hiddenSize * miniBatch;
float zero = 0.f;
float one = 1.f;
cudaStream_t stream_i;
cudaStream_t stream_h;
cudaErrCheck(cudaStreamCreate(&stream_i));
cudaErrCheck(cudaStreamCreate(&stream_h));
for (int layer = 0; layer < numLayers; layer++) {
int direction;
int startInd;
int currNumCovered;
if (layer % 2 == 0) {
// forward direction
direction = 1;
startInd = 0;
currNumCovered = miniBatch;
} else {
// backward direction
direction = -1;
startInd = seqLength-1;
currNumCovered = 0;
}
cublasErrCheck(cublasSetStream(handle, stream));
for (int t = startInd; t < seqLength && t >= 0; t = t + direction) {
int prevIndex;
if (direction == 1) {
while (lengths[currNumCovered-1] <= t) {
currNumCovered--;
}
prevIndex = t;
} else {
while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) {
currNumCovered++;
}
prevIndex = (t+2)%(seqLength+1);
}
int inSize;
int weightStart;
float *inputPtr;
if (layer == 0) {
inSize = inputSize;
weightStart = 0;
inputPtr = x + t * inputSize * miniBatch;
prevIndex = t;
} else {
inSize = hiddenSize;
weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize;
inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements;
}
cublasErrCheck(cublasSetStream(handle, stream_i));
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
6*hiddenSize, currNumCovered, inSize,
&one,
&T[weightStart],
6 * hiddenSize,
inputPtr,
inSize,
&zero,
tmp_i,
6 * hiddenSize));
cublasErrCheck(cublasSetStream(handle, stream_h));
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
5*hiddenSize, currNumCovered, hiddenSize,
&one,
&T[6 * hiddenSize * inSize + weightStart],
5 * hiddenSize,
h_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
hiddenSize,
&zero,
tmp_h,
5 * hiddenSize));
cudaErrCheck(cudaDeviceSynchronize());
dim3 blockDim;
dim3 gridDim;
blockDim.x = BLOCK;
gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x;
elementWise_fp <<< gridDim, blockDim , 0, stream>>>
(hiddenSize, miniBatch, currNumCovered,
tmp_h,
tmp_i,
bias + 5 * layer * hiddenSize,
is_training ? gates + 6 * (t * numElements + layer * seqLength * numElements) : NULL,
h_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements,
dropout + layer * numElements,
c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements,
is_training);
cudaErrCheck(cudaGetLastError());
cudaErrCheck(cudaDeviceSynchronize());
}
}
cublasErrCheck(cublasSetStream(handle, stream));
cudaErrCheck(cudaStreamDestroy(stream_i));
cudaErrCheck(cudaStreamDestroy(stream_h));
cudaErrCheck(cudaDeviceSynchronize());
}
#ifdef __cplusplus
}
#endif
|
6ded4cc5dd658213c7e4343a41f0d98276c113d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/native/sparse/hip/SparseHIPTensorMath.cuh>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#include <hipsparse.h>
#include <hip/hip_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
void s_addmm_out_csr_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& crow_indices, Tensor& col_indices, Tensor& values, const Tensor& dense) {
TORCH_INTERNAL_ASSERT(nnz > 0);
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
Tensor r__;
if (cast_beta == scalar_t(0)) {
r_.zero_();
} else if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// Note: This storage arrangement is preferred due to most of the CUDA kernels handle only contiguous tensors
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) == dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
crow_indices.data_ptr<int32_t>(),
col_indices.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
if (!is_same_tensor(r__, r_)) {
r_.copy_(r__);
}
}
);
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor crow_indices = _to_csr_int(rowIndices, m, nnz);
Tensor col_indices = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
col_indices.copy_(colIndices);
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, crow_indices, col_indices, values, dense);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
if (nnz == 0) {
at::mul_out(r_, t, at::scalar_tensor(beta, r_.options()));
return r_;
}
s_addmm_out_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& result
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, *b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(*b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(
const SparseTensor& sparse_,
const Tensor& dense,
SparseTensor& r_
/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(sparse, dense, r);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar), dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
THCudaCheck(hipGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != scalar_t(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
SparseTensor& mul_out_sparse_cuda(const SparseTensor& t_, const SparseTensor& src_, SparseTensor& r_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
Tensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
Tensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel), dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
Tensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(self, mat2, false, result);
}
Tensor _bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(self, mat2, deterministic, result);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
hipLaunchKernelGGL(( search_end_matrix_indices_cuda_kernel), dim3(grid_size), dim3(block_size), 0, stream,
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipDeviceSynchronize();
}
hipDataType getTensorCudaDataType(Tensor self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor& result) {
return _bmm_out_sparse_cuda(self, mat2, false, result);
}
Tensor& _bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic, Tensor& result) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = self.coalesce();
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since hipsparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
{
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto dataPtr = allocator.allocate(num_matrices*sizeof(int64_t));
int64_t* mat_el_end_indices_device = static_cast<int64_t*>(dataPtr.get());
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
AT_CUDA_CHECK(hipMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
hipMemcpyDeviceToHost
));
}
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
::c10::DataPtr dataPtr;
// See Note [Enabling Deterministic Operations]
deterministic = deterministic || globalContext().deterministicAlgorithms();
hipsparseSpMMAlg_t mm_alg = deterministic ? HIPSPARSE_COOMM_ALG2 : HIPSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
hipsparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
hipsparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
hipsparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
workspace_buffer_size = required_workspace_buffer_size;
dataPtr = allocator.allocate(workspace_buffer_size);
workspace_buffer = dataPtr.get();
}
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
6ded4cc5dd658213c7e4343a41f0d98276c113d0.cu
|
#include <ATen/native/sparse/cuda/SparseCUDATensorMath.cuh>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#include <cusparse.h>
#include <cuda_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
void s_addmm_out_csr_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& crow_indices, Tensor& col_indices, Tensor& values, const Tensor& dense) {
TORCH_INTERNAL_ASSERT(nnz > 0);
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
Tensor r__;
if (cast_beta == scalar_t(0)) {
r_.zero_();
} else if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// Note: This storage arrangement is preferred due to most of the CUDA kernels handle only contiguous tensors
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) == dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
crow_indices.data_ptr<int32_t>(),
col_indices.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
if (!is_same_tensor(r__, r_)) {
r_.copy_(r__);
}
}
);
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor crow_indices = _to_csr_int(rowIndices, m, nnz);
Tensor col_indices = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
col_indices.copy_(colIndices);
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, crow_indices, col_indices, values, dense);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
if (nnz == 0) {
at::mul_out(r_, t, at::scalar_tensor(beta, r_.options()));
return r_;
}
s_addmm_out_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& result
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, *b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(*b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(
const SparseTensor& sparse_,
const Tensor& dense,
SparseTensor& r_
/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(sparse, dense, r);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
THCudaCheck(cudaGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != scalar_t(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
SparseTensor& mul_out_sparse_cuda(const SparseTensor& t_, const SparseTensor& src_, SparseTensor& r_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
Tensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
Tensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
Tensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(self, mat2, false, result);
}
Tensor _bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(self, mat2, deterministic, result);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
search_end_matrix_indices_cuda_kernel<<<grid_size, block_size, 0, stream>>>(
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
cudaDeviceSynchronize();
}
cudaDataType getTensorCudaDataType(Tensor self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor& result) {
return _bmm_out_sparse_cuda(self, mat2, false, result);
}
Tensor& _bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic, Tensor& result) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = self.coalesce();
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since cusparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
{
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
auto dataPtr = allocator.allocate(num_matrices*sizeof(int64_t));
int64_t* mat_el_end_indices_device = static_cast<int64_t*>(dataPtr.get());
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
AT_CUDA_CHECK(cudaMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
cudaMemcpyDeviceToHost
));
}
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
::c10::DataPtr dataPtr;
// See Note [Enabling Deterministic Operations]
deterministic = deterministic || globalContext().deterministicAlgorithms();
cusparseSpMMAlg_t mm_alg = deterministic ? CUSPARSE_COOMM_ALG2 : CUSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
cusparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
cusparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
cusparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
workspace_buffer_size = required_workspace_buffer_size;
dataPtr = allocator.allocate(workspace_buffer_size);
workspace_buffer = dataPtr.get();
}
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
df09f258da7da5de871987e19cc43e37308bbb6f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*$Id: main.cu 755 2009-11-18 13:22:54Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
/******************************************************************
*WordCount (WC): It counts the number of occurrences for each word in a file. Each Map
* task processes a portion of the input file and emits intermediate data pairs, each of which consists
* of a word as the key and a value of 1 for the occurrence. Group is required, and no reduce is
* needed, because the Mars runtime provides the size of each group, after the Group stage.
******************************************************************/
#include "MarsInc.h"
#include "global.h"
#include <ctype.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#define __OUTPUT__
void validate(char* h_filebuf, Spec_t* spec, int num)
{
char* key = (char*)spec->outputKeys;
char* val = (char*)spec->outputVals;
int4* offsetSizes = (int4*)spec->outputOffsetSizes;
int2* range = (int2*)spec->outputKeyListRange;
printf("# of words:%d\n", spec->outputDiffKeyCount);
if (num > spec->outputDiffKeyCount) num = spec->outputDiffKeyCount;
for (int i = 0; i < num; i++)
{
int keyOffset = offsetSizes[range[i].x].x;
int valOffset = offsetSizes[range[i].x].z;
char* word = key + keyOffset;
int wordsize = *(int*)(val + valOffset);
printf("%s - size: %d - count: %d\n", word, wordsize, range[i].y - range[i].x);
}
}
//-----------------------------------------------------------------------
//usage: WordCount datafile
//param: datafile
//-----------------------------------------------------------------------
int main( int argc, char** argv)
{
if (argc != 2)
{
printf("usage: %s datafile\n", argv[0]);
exit(-1);
}
Spec_t *spec = GetDefaultSpec();
spec->workflow = MAP_GROUP;
#ifdef __OUTPUT__
spec->outputToHost = 1;
#endif
TimeVal_t allTimer;
startTimer(&allTimer);
TimeVal_t preTimer;
startTimer(&preTimer);
FILE* fp = fopen(argv[1], "r");
fseek(fp, 0, SEEK_END);
size_t fileSize = ftell(fp) + 1;
rewind(fp);
char* h_filebuf = (char*)malloc(fileSize + 1);
char* d_filebuf = NULL;
size_t tmp = fread(h_filebuf, fileSize, 1, fp);
checkCudaErrors(hipMalloc((void**)&d_filebuf, fileSize));
fclose(fp);
WC_KEY_T key;
key.file = d_filebuf;
for (int i = 0; i < fileSize; i++)
h_filebuf[i] = toupper(h_filebuf[i]);
WC_VAL_T val;
int offset = 0;
char* p = h_filebuf;
char* start = h_filebuf;
while (1)
{
int blockSize = 2048;
if (offset + blockSize > fileSize) blockSize = fileSize - offset;
p += blockSize;
for (; *p >= 'A' && *p <= 'Z'; p++);
if (*p != '\0')
{
*p = '\0';
++p;
blockSize = (int)(p - start);
val.line_offset = offset;
val.line_size = blockSize;
AddMapInputRecord(spec, &key, &val, sizeof(WC_KEY_T), sizeof(WC_VAL_T));
offset += blockSize;
start = p;
}
else
{
*p = '\0';
blockSize = (int)(fileSize - offset);
val.line_offset = offset;
val.line_size = blockSize;
AddMapInputRecord(spec, &key, &val, sizeof(WC_KEY_T), sizeof(WC_VAL_T));
break;
}
}
checkCudaErrors(hipMemcpy(d_filebuf, h_filebuf, fileSize, hipMemcpyHostToDevice));
endTimer("preprocess", &preTimer);
//----------------------------------------------
//map/reduce
//----------------------------------------------
MapReduce(spec);
endTimer("all", &allTimer);
//----------------------------------------------
//further processing
//----------------------------------------------
#ifdef __OUTPUT__
checkCudaErrors(hipMemcpy(h_filebuf, d_filebuf, fileSize, hipMemcpyDeviceToHost));
validate(h_filebuf, spec, 10);
#endif
//----------------------------------------------
//finish
//----------------------------------------------
FinishMapReduce(spec);
hipFree(d_filebuf);
if(h_filebuf)
free(h_filebuf);
return 0;
}
|
df09f258da7da5de871987e19cc43e37308bbb6f.cu
|
/*$Id: main.cu 755 2009-11-18 13:22:54Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
/******************************************************************
*WordCount (WC): It counts the number of occurrences for each word in a file. Each Map
* task processes a portion of the input file and emits intermediate data pairs, each of which consists
* of a word as the key and a value of 1 for the occurrence. Group is required, and no reduce is
* needed, because the Mars runtime provides the size of each group, after the Group stage.
******************************************************************/
#include "MarsInc.h"
#include "global.h"
#include <ctype.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#define __OUTPUT__
void validate(char* h_filebuf, Spec_t* spec, int num)
{
char* key = (char*)spec->outputKeys;
char* val = (char*)spec->outputVals;
int4* offsetSizes = (int4*)spec->outputOffsetSizes;
int2* range = (int2*)spec->outputKeyListRange;
printf("# of words:%d\n", spec->outputDiffKeyCount);
if (num > spec->outputDiffKeyCount) num = spec->outputDiffKeyCount;
for (int i = 0; i < num; i++)
{
int keyOffset = offsetSizes[range[i].x].x;
int valOffset = offsetSizes[range[i].x].z;
char* word = key + keyOffset;
int wordsize = *(int*)(val + valOffset);
printf("%s - size: %d - count: %d\n", word, wordsize, range[i].y - range[i].x);
}
}
//-----------------------------------------------------------------------
//usage: WordCount datafile
//param: datafile
//-----------------------------------------------------------------------
int main( int argc, char** argv)
{
if (argc != 2)
{
printf("usage: %s datafile\n", argv[0]);
exit(-1);
}
Spec_t *spec = GetDefaultSpec();
spec->workflow = MAP_GROUP;
#ifdef __OUTPUT__
spec->outputToHost = 1;
#endif
TimeVal_t allTimer;
startTimer(&allTimer);
TimeVal_t preTimer;
startTimer(&preTimer);
FILE* fp = fopen(argv[1], "r");
fseek(fp, 0, SEEK_END);
size_t fileSize = ftell(fp) + 1;
rewind(fp);
char* h_filebuf = (char*)malloc(fileSize + 1);
char* d_filebuf = NULL;
size_t tmp = fread(h_filebuf, fileSize, 1, fp);
checkCudaErrors(cudaMalloc((void**)&d_filebuf, fileSize));
fclose(fp);
WC_KEY_T key;
key.file = d_filebuf;
for (int i = 0; i < fileSize; i++)
h_filebuf[i] = toupper(h_filebuf[i]);
WC_VAL_T val;
int offset = 0;
char* p = h_filebuf;
char* start = h_filebuf;
while (1)
{
int blockSize = 2048;
if (offset + blockSize > fileSize) blockSize = fileSize - offset;
p += blockSize;
for (; *p >= 'A' && *p <= 'Z'; p++);
if (*p != '\0')
{
*p = '\0';
++p;
blockSize = (int)(p - start);
val.line_offset = offset;
val.line_size = blockSize;
AddMapInputRecord(spec, &key, &val, sizeof(WC_KEY_T), sizeof(WC_VAL_T));
offset += blockSize;
start = p;
}
else
{
*p = '\0';
blockSize = (int)(fileSize - offset);
val.line_offset = offset;
val.line_size = blockSize;
AddMapInputRecord(spec, &key, &val, sizeof(WC_KEY_T), sizeof(WC_VAL_T));
break;
}
}
checkCudaErrors(cudaMemcpy(d_filebuf, h_filebuf, fileSize, cudaMemcpyHostToDevice));
endTimer("preprocess", &preTimer);
//----------------------------------------------
//map/reduce
//----------------------------------------------
MapReduce(spec);
endTimer("all", &allTimer);
//----------------------------------------------
//further processing
//----------------------------------------------
#ifdef __OUTPUT__
checkCudaErrors(cudaMemcpy(h_filebuf, d_filebuf, fileSize, cudaMemcpyDeviceToHost));
validate(h_filebuf, spec, 10);
#endif
//----------------------------------------------
//finish
//----------------------------------------------
FinishMapReduce(spec);
cudaFree(d_filebuf);
if(h_filebuf)
free(h_filebuf);
return 0;
}
|
a02196dce72ded0228745840dfe570c3348f5a68.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
__global__ void
dgemm_kernel_ab_0(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y *16;
const int idt = ty * 16 + tx;
C += ibx +idt +__mul24(iby,ldc);
ibx = ibx+idt - m ;
if( (iby+16)>=n) {
lda = n-iby;
}
else {
lda = 16;
}
if( ibx >= 0 )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
C[12*ldc] =0;
C[13*ldc] =0;
C[14*ldc] =0;
C[15*ldc] =0;
break;
case 0:
break;
case 15:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
C[12*ldc] =0;
C[13*ldc] =0;
C[14*ldc] =0;
break;
case 14:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
C[12*ldc] =0;
C[13*ldc] =0;
break;
case 13:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
C[12*ldc] =0;
break;
case 12:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
break;
case 11:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
break;
case 10:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
break;
case 9:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
break;
case 8:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
break;
case 7:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
break;
case 6:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
break;
case 5:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
break;
case 4:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
break;
case 3:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
break;
case 2:
C[ 0 ] =0;
C[ 1*ldc] =0;
break;
case 1:
C[ 0 ] =0;
break;
}
}
extern "C" void
magmablas_dgemm_kernel_ab_0(double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64+(m%64!=0),n/16+(n%16!=0));
hipLaunchKernelGGL(( dgemm_kernel_ab_0), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
|
a02196dce72ded0228745840dfe570c3348f5a68.cu
|
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
__global__ void
dgemm_kernel_ab_0(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y *16;
const int idt = ty * 16 + tx;
C += ibx +idt +__mul24(iby,ldc);
ibx = ibx+idt - m ;
if( (iby+16)>=n) {
lda = n-iby;
}
else {
lda = 16;
}
if( ibx >= 0 )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
C[12*ldc] =0;
C[13*ldc] =0;
C[14*ldc] =0;
C[15*ldc] =0;
break;
case 0:
break;
case 15:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
C[12*ldc] =0;
C[13*ldc] =0;
C[14*ldc] =0;
break;
case 14:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
C[12*ldc] =0;
C[13*ldc] =0;
break;
case 13:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
C[12*ldc] =0;
break;
case 12:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
C[11*ldc] =0;
break;
case 11:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
C[10*ldc] =0;
break;
case 10:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
C[ 9*ldc] =0;
break;
case 9:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
C[ 8*ldc] =0;
break;
case 8:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
C[ 7*ldc] =0;
break;
case 7:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
C[ 6*ldc] =0;
break;
case 6:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
C[ 5*ldc] =0;
break;
case 5:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
C[ 4*ldc] =0;
break;
case 4:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
C[ 3*ldc] =0;
break;
case 3:
C[ 0 ] =0;
C[ 1*ldc] =0;
C[ 2*ldc] =0;
break;
case 2:
C[ 0 ] =0;
C[ 1*ldc] =0;
break;
case 1:
C[ 0 ] =0;
break;
}
}
extern "C" void
magmablas_dgemm_kernel_ab_0(double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64+(m%64!=0),n/16+(n%16!=0));
dgemm_kernel_ab_0<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
|
f16b232e32eb63be2f89f664369e42346c60e38c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
__global__ void add32(float* A, float* B, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = A[idx] + B[idx];
}
#ifdef __cplusplus
}
#endif
|
f16b232e32eb63be2f89f664369e42346c60e38c.cu
|
#ifdef __cplusplus
extern "C" {
#endif
__global__ void add32(float* A, float* B, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = A[idx] + B[idx];
}
#ifdef __cplusplus
}
#endif
|
5146417a031fd52579f5270dabb75c02c876f5ed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zlarfg-v2.cu, normal z -> d, Thu Oct 8 23:05:33 2020
*/
#include "magma_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define REAL
__global__
void magma_dlarfg_gpu_kernel( int n, double* dx0, double* dx,
double *dtau, double *dxnorm, double* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ double scale;
double xnorm;
double dxi;
#ifdef REAL
if ( n <= 1 )
#else
if ( n <= 0 )
#endif
{
*dtau = MAGMA_D_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
double alpha = *dx0;
#ifdef REAL
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_D_REAL(alpha);
double alphai = MAGMA_D_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_D_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_D_MAKE(beta, 0.);
alpha = MAGMA_D_MAKE( MAGMA_D_REAL(alpha) - beta, MAGMA_D_IMAG(alpha));
scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_D_MUL(dxi, scale);
}
else {
*dtau = MAGMA_D_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's dlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_dlarfg_gpu(
magma_int_t n,
magmaDouble_ptr dx0,
magmaDouble_ptr dx,
magmaDouble_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDouble_ptr dAkk,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dnrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dnrm2_cols(n-1, 1, dx0+1, n, dxnorm, queue);
hipLaunchKernelGGL(( magma_dlarfg_gpu_kernel)
, dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
n, dx0, dx, dtau, dxnorm, dAkk);
}
|
5146417a031fd52579f5270dabb75c02c876f5ed.cu
|
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zlarfg-v2.cu, normal z -> d, Thu Oct 8 23:05:33 2020
*/
#include "magma_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define REAL
__global__
void magma_dlarfg_gpu_kernel( int n, double* dx0, double* dx,
double *dtau, double *dxnorm, double* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ double scale;
double xnorm;
double dxi;
#ifdef REAL
if ( n <= 1 )
#else
if ( n <= 0 )
#endif
{
*dtau = MAGMA_D_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
double alpha = *dx0;
#ifdef REAL
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_D_REAL(alpha);
double alphai = MAGMA_D_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_D_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_D_MAKE(beta, 0.);
alpha = MAGMA_D_MAKE( MAGMA_D_REAL(alpha) - beta, MAGMA_D_IMAG(alpha));
scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_D_MUL(dxi, scale);
}
else {
*dtau = MAGMA_D_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's dlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_dlarfg_gpu(
magma_int_t n,
magmaDouble_ptr dx0,
magmaDouble_ptr dx,
magmaDouble_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDouble_ptr dAkk,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dnrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dnrm2_cols(n-1, 1, dx0+1, n, dxnorm, queue);
magma_dlarfg_gpu_kernel
<<< blocks, threads, 0, queue->cuda_stream() >>>
(n, dx0, dx, dtau, dxnorm, dAkk);
}
|
c83945c4af1c6a165deca67367a567e312d14c99.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/ClassNLLCriterion.cu"
#else
void THNN_(ClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
bool sizeAverage,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index,
bool reduce) {
if (THCIndexTensor_(_nDimension)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(_nDimension)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
ignore_index -= TH_INDEX_BASE;
if (weights) {
THCUNN_assertSameGPU(
state, 5, input, target, weights, output, total_weight
);
} else {
THCUNN_assertSameGPU(
state, 4, input, target, output, total_weight
);
}
THArgCheck(n_dims <= 2 && n_dims > 0, 2, "vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_size(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights);
THError("weight tensor should be defined either for all %d classes or no classes"
" but got weight tensor of shape: %s", n_classes, s1.str);
}
if (!reduce && n_dims == 2) {
THCTensor_(resize1d)(state, output, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
hipLaunchKernelGGL(( ClassNLLCriterion_updateOutput_no_reduce_kernel<real>)
, dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
batch_size,
toDeviceTensor<real, 2>(state, input),
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<real, 1>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
if (!reduce && n_dims <= 1) {
sizeAverage = false;
}
THCTensor_(resize1d)(state, output, 1);
THCTensor_(resize1d)(state, total_weight, 1);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
real *input_data = THCTensor_(data)(state, input);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *output_data = THCTensor_(data)(state, output);
real *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(_nDimension)(state, input) == 1) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel1<real>)
, dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
sizeAverage,
n_classes,
ignore_index
);
} else if (THCTensor_(_nDimension)(state, input) == 2) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel<real, accreal>)
, dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
sizeAverage,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(ClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index,
bool reduce) {
if (THCIndexTensor_(_nDimension)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(_nDimension)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous");
if (weights) {
THCUNN_assertSameGPU(
state, 5, weights, input, target, gradInput, total_weight
);
}
else {
THCUNN_assertSameGPU(
state, 4, input, target, gradInput, total_weight
);
}
THArgCheck(n_dims <= 2 && n_dims > 0, 2, "vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_size(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THError("weight tensor should be defined either for all or no classes");
}
if (!reduce && n_dims == 2) {
THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
hipLaunchKernelGGL(( ClassNLLCriterion_updateGradInput_no_reduce_kernel<real>)
, dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
batch_size,
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<real, 1>(state, gradOutput),
toDeviceTensor<real, 2>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
if (!reduce && n_dims <= 1) {
sizeAverage = false;
}
ignore_index -= TH_INDEX_BASE;
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
real *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(_nDimension)(state, input) == 1) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel1<real>)
, dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
gradInput_data,
gradOutput_data,
weights_data,
target_data,
total_weight_data,
sizeAverage,
n_classes,
ignore_index
);
} else {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel<real>)
, dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state),
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
sizeAverage,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
}
#endif
|
c83945c4af1c6a165deca67367a567e312d14c99.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/ClassNLLCriterion.cu"
#else
void THNN_(ClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
bool sizeAverage,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index,
bool reduce) {
if (THCIndexTensor_(_nDimension)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(_nDimension)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
ignore_index -= TH_INDEX_BASE;
if (weights) {
THCUNN_assertSameGPU(
state, 5, input, target, weights, output, total_weight
);
} else {
THCUNN_assertSameGPU(
state, 4, input, target, output, total_weight
);
}
THArgCheck(n_dims <= 2 && n_dims > 0, 2, "vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_size(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights);
THError("weight tensor should be defined either for all %d classes or no classes"
" but got weight tensor of shape: %s", n_classes, s1.str);
}
if (!reduce && n_dims == 2) {
THCTensor_(resize1d)(state, output, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
ClassNLLCriterion_updateOutput_no_reduce_kernel<real>
<<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
batch_size,
toDeviceTensor<real, 2>(state, input),
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<real, 1>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
if (!reduce && n_dims <= 1) {
sizeAverage = false;
}
THCTensor_(resize1d)(state, output, 1);
THCTensor_(resize1d)(state, total_weight, 1);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
real *input_data = THCTensor_(data)(state, input);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *output_data = THCTensor_(data)(state, output);
real *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(_nDimension)(state, input) == 1) {
cunn_ClassNLLCriterion_updateOutput_kernel1<real>
<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
sizeAverage,
n_classes,
ignore_index
);
} else if (THCTensor_(_nDimension)(state, input) == 2) {
cunn_ClassNLLCriterion_updateOutput_kernel<real, accreal>
<<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
sizeAverage,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(ClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index,
bool reduce) {
if (THCIndexTensor_(_nDimension)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(_nDimension)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous");
if (weights) {
THCUNN_assertSameGPU(
state, 5, weights, input, target, gradInput, total_weight
);
}
else {
THCUNN_assertSameGPU(
state, 4, input, target, gradInput, total_weight
);
}
THArgCheck(n_dims <= 2 && n_dims > 0, 2, "vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_size(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THError("weight tensor should be defined either for all or no classes");
}
if (!reduce && n_dims == 2) {
THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
ClassNLLCriterion_updateGradInput_no_reduce_kernel<real>
<<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
batch_size,
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<real, 1>(state, gradOutput),
toDeviceTensor<real, 2>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
if (!reduce && n_dims <= 1) {
sizeAverage = false;
}
ignore_index -= TH_INDEX_BASE;
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
real *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(_nDimension)(state, input) == 1) {
cunn_ClassNLLCriterion_updateGradInput_kernel1<real>
<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
gradInput_data,
gradOutput_data,
weights_data,
target_data,
total_weight_data,
sizeAverage,
n_classes,
ignore_index
);
} else {
cunn_ClassNLLCriterion_updateGradInput_kernel<real>
<<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>(
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
sizeAverage,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
}
#endif
|
2fbd4758482135cb3365399686ce30b153c92df8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 9
__global__ void min(int *input)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]<input[first])
input[first]=input[second];
}
step_size*= 2;
number_of_threads/=2;
}
}
__global__ void max(int *input)u
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]>input[first])
input[first]=input[second];
}
step_size*= 2;
number_of_threads/=2;
}
}
__global__ void summation(int *input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
input[first] += input[second];
}
step_size*=2;
number_of_threads/=2;
}
}
__global__ void average(int *input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
input[first] += input[second];
}
step_size*=2;
number_of_threads/=2;
}
input[0] = input[0]/10;
}
__global__ void standardDeviation(int *input,int mean)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
int std = 0;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
std = ((input[first]-mean)*(input[first]-mean))+((input[second]-mean)*(input[second]-mean));
}
step_size*=2;
number_of_threads/=2;
}
input[0] = std;
}
int main()
{
int input[SIZE],i;
for( i = 0 ; i < SIZE ; i++)
{
input[i] = rand()% 100;
}
for( i = 0 ; i < SIZE ; i++)
{
printf("%d ",input[i]);
}
printf("\n");
int byte_size = SIZE*sizeof(int);
//Allocate mem for min
int *arr_min, result_min;
hipMalloc(&arr_min,byte_size);
hipMemcpy(arr_min,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( min), dim3(1),dim3(SIZE/2), 0, 0, arr_min);
hipMemcpy(&result_min,arr_min,sizeof(int),hipMemcpyDeviceToHost);
printf("Minimun: %d\n",result_min);
//Allocate mem for max
int *arr_max, result_max;
hipMalloc(&arr_max,byte_size);
hipMemcpy(arr_max,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( max), dim3(1),dim3(SIZE/2), 0, 0, arr_max);
hipMemcpy(&result_max,arr_max,sizeof(int),hipMemcpyDeviceToHost);
printf("Maximum: %d\n",result_max);
//Allocate mem for sum
int *arr_sum, sum;
hipMalloc(&arr_sum,byte_size);
hipMemcpy(arr_sum,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( summation), dim3(1),dim3(SIZE), 0, 0, arr_sum);
hipMemcpy(&sum,arr_sum,sizeof(int),hipMemcpyDeviceToHost);
printf("Sum: %d\n",sum);
//Allocate mem for avg
int *arr_avg, avg;
hipMalloc(&arr_avg,byte_size);
hipMemcpy(arr_avg,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( average), dim3(1),dim3(SIZE), 0, 0, arr_avg);
hipMemcpy(&avg,arr_avg,sizeof(int),hipMemcpyDeviceToHost);
printf("Average: %d\n",avg);
printf("CPUAVG: %d\n",(sum/SIZE));
//Allcate mem for std
int *arr_std, std;
const int mean = avg;
hipMalloc(&arr_std,byte_size);
hipMemcpy(arr_std,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( standardDeviation), dim3(1),dim3(SIZE), 0, 0, arr_std,mean);
hipMemcpy(&std,arr_std,sizeof(int),hipMemcpyDeviceToHost);
std = sqrt(std/10);
printf("Standard Deviation: %d\n",std);
return 0;
}
|
2fbd4758482135cb3365399686ce30b153c92df8.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 9
__global__ void min(int *input)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]<input[first])
input[first]=input[second];
}
step_size*= 2;
number_of_threads/=2;
}
}
__global__ void max(int *input)u
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]>input[first])
input[first]=input[second];
}
step_size*= 2;
number_of_threads/=2;
}
}
__global__ void summation(int *input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
input[first] += input[second];
}
step_size*=2;
number_of_threads/=2;
}
}
__global__ void average(int *input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
input[first] += input[second];
}
step_size*=2;
number_of_threads/=2;
}
input[0] = input[0]/10;
}
__global__ void standardDeviation(int *input,int mean)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
int std = 0;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
std = ((input[first]-mean)*(input[first]-mean))+((input[second]-mean)*(input[second]-mean));
}
step_size*=2;
number_of_threads/=2;
}
input[0] = std;
}
int main()
{
int input[SIZE],i;
for( i = 0 ; i < SIZE ; i++)
{
input[i] = rand()% 100;
}
for( i = 0 ; i < SIZE ; i++)
{
printf("%d ",input[i]);
}
printf("\n");
int byte_size = SIZE*sizeof(int);
//Allocate mem for min
int *arr_min, result_min;
cudaMalloc(&arr_min,byte_size);
cudaMemcpy(arr_min,input,byte_size,cudaMemcpyHostToDevice);
min<<<1,SIZE/2>>>(arr_min);
cudaMemcpy(&result_min,arr_min,sizeof(int),cudaMemcpyDeviceToHost);
printf("Minimun: %d\n",result_min);
//Allocate mem for max
int *arr_max, result_max;
cudaMalloc(&arr_max,byte_size);
cudaMemcpy(arr_max,input,byte_size,cudaMemcpyHostToDevice);
max<<<1,SIZE/2>>>(arr_max);
cudaMemcpy(&result_max,arr_max,sizeof(int),cudaMemcpyDeviceToHost);
printf("Maximum: %d\n",result_max);
//Allocate mem for sum
int *arr_sum, sum;
cudaMalloc(&arr_sum,byte_size);
cudaMemcpy(arr_sum,input,byte_size,cudaMemcpyHostToDevice);
summation<<<1,SIZE>>>(arr_sum);
cudaMemcpy(&sum,arr_sum,sizeof(int),cudaMemcpyDeviceToHost);
printf("Sum: %d\n",sum);
//Allocate mem for avg
int *arr_avg, avg;
cudaMalloc(&arr_avg,byte_size);
cudaMemcpy(arr_avg,input,byte_size,cudaMemcpyHostToDevice);
average<<<1,SIZE>>>(arr_avg);
cudaMemcpy(&avg,arr_avg,sizeof(int),cudaMemcpyDeviceToHost);
printf("Average: %d\n",avg);
printf("CPUAVG: %d\n",(sum/SIZE));
//Allcate mem for std
int *arr_std, std;
const int mean = avg;
cudaMalloc(&arr_std,byte_size);
cudaMemcpy(arr_std,input,byte_size,cudaMemcpyHostToDevice);
standardDeviation<<<1,SIZE>>>(arr_std,mean);
cudaMemcpy(&std,arr_std,sizeof(int),cudaMemcpyDeviceToHost);
std = sqrt(std/10);
printf("Standard Deviation: %d\n",std);
return 0;
}
|
5eb91318db40c255696b3b462b2a03dfcc7c76c2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuda_deque.h"
#include "implementations.h"
#include "io.h"
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "resources.h"
#include <timer.h>
#include "errors.h"
#include "cuda_schedule.h"
#include <iomanip>
#include <ctime>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#define ROUND_UP(N, S) (N%S == 0) ? N/S : N/S+1
#define BETWEEN(value, min, max) (value <= max && value >= min)
#define DEV_ID (0)
#define MIN_WIN_SIZE (3)
__global__ void lemire_one_thread(double *matrix, double *minval, double *maxval, int arrlen, int window_size);
__global__ void par_alg_inc_blocks(double *matrix, double *minval, double *maxval, int arrlen, int window_size);
__global__ void par_alg_thrust(thrust::device_ptr<double> matrix, double *minval, double *maxval, int arrlen, int window_size);
__device__ void print_matrixx(double *matrix, int length);
__global__ void cuda_print_arr(double *arr, size_t len);
struct arbitrary_functor
{
//using namespace thrust;
int window_size;
template <typename Tuple>
__host__ __device__
void operator()(const Tuple &t)
{
using namespace thrust;
double* d_first = &get<0>(t);
double *min = min_element(device, d_first, d_first + window_size);
get<1>(t) = min[0];
double *max = max_element(device, d_first, d_first + window_size);
get<2>(t) = max[0];
}
};
__device__ void print_matrixx(double *matrix, int length)
{
assert(matrix != NULL);
__syncthreads();
if(blockIdx.x == 0 && threadIdx.x == 0){
/* image row */
for (int i = 0; i < length; i++){
printf("%.0f ", (matrix[i] == -0.0)? 0.0 : matrix[i]);
}
printf("\n");
}
}
double cuda_parallel_approach(cuda_matrix *matrix){
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop, DEV_ID));
int blocks = matrix->core_count,
threads = matrix->thread_count,
max_threads = prop.maxThreadsPerBlock,
max_sm = prop.multiProcessorCount;
assert(max_threads >= threads);
assert(max_sm >= blocks);
checkCudaErrors(hipDeviceSynchronize());
StartTimer();
hipLaunchKernelGGL(( par_alg_inc_blocks), dim3(blocks), dim3(threads), 0, 0, matrix->d_matrix, matrix->d_minval, matrix->d_maxval, matrix->arrlen, matrix->window_size);
double time_spent = GetTimer()/1000;
return time_spent;
}
#define DEC_FACTOR (10)
double thrust_approach(cuda_matrix *matrix) {
using namespace thrust; //we dont have to write thrust:: anymore
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop, DEV_ID));
assert(matrix->window_size >= MIN_WIN_SIZE);
checkCudaErrors(hipDeviceSynchronize());
StartTimer();
{
arbitrary_functor arb;
arb.window_size = matrix->window_size;
device_ptr<double> d_first = device_pointer_cast(matrix->d_matrix);
device_ptr<double> d_last = device_pointer_cast(matrix->d_matrix) + matrix->arrlen - matrix->window_size + 1;
device_ptr<double> min_first = device_pointer_cast(matrix->d_minval);
device_ptr<double> min_last = device_pointer_cast(matrix->d_minval) + matrix->arrlen - matrix->window_size + 1;
device_ptr<double> max_first = device_pointer_cast(matrix->d_maxval);
device_ptr<double> max_last = device_pointer_cast(matrix->d_maxval) + matrix->arrlen - matrix->window_size + 1;
for_each(make_zip_iterator(make_tuple(d_first, min_first, max_first)),
make_zip_iterator(make_tuple(d_last, min_last, max_last)),
arb
);
checkCudaErrors(hipDeviceSynchronize());
};
double time_spent = GetTimer()/1000;
return time_spent;
}
double streams_approach(cuda_matrix *matrix) {
hipError_t error;
//hipDeviceProp_t prop;
//checkCudaErrors(hipGetDeviceProperties(&prop, DEV_ID));
StartTimer();
size_t offset = CHUNK_SIZE - matrix->window_size + 1;
size_t stream_cnt = ROUND_UP(matrix->arrlen, offset);
hipStream_t streams[stream_cnt];
for(int i = 0; i < stream_cnt; ++i) {
error = hipStreamCreate(&(streams[i]));
checkCudaErrors(error);
}
//print_matrix(matrix->h_matrix, matrix->arrlen);
for (int i = 0; i < stream_cnt; ++i)
{
size_t tmp = offset*i,
data_size = (matrix->arrlen - tmp > CHUNK_SIZE) ? CHUNK_SIZE : matrix->arrlen - tmp;
//printf("%d %d\n", tmp, data_size);
error = hipMemcpyAsync(matrix->d_matrix+tmp, matrix->h_matrix+tmp, data_size*sizeof(double), hipMemcpyHostToDevice, streams[i]);
checkCudaErrors(error);
hipLaunchKernelGGL(( par_alg_inc_blocks), dim3(matrix->core_count), dim3(matrix->thread_count), 0, streams[i], matrix->d_matrix+tmp, matrix->d_minval+tmp, matrix->d_maxval+tmp, data_size, matrix->window_size);
}
/*for (int i = 0; i < stream_cnt; ++i)
{
size_t tmp = offset*i,
data_size = (i < stream_cnt-1) ? CHUNK_SIZE : CHUNK_SIZE + matrix->arrlen % CHUNK_SIZE;
}*/
for (int i = 0; i < stream_cnt; ++i)
hipStreamSynchronize(streams[i]);
//cuda_print_arr<<<1,1>>>(matrix->d_matrix, matrix->arrlen);
double time = GetTimer()/1000;
return time;
}
double sequential_approach(cuda_matrix *matrix){
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop, DEV_ID));
int blocks,
threads = matrix->thread_count;
//max_threads = prop.maxThreadsPerBlock,
//max_sm = prop.multiProcessorCount;
blocks = matrix->core_count;
//assert(max_threads >= threads);
//assert(max_sm >= blocks);
//print_matrix(matrix->h_matrix, matrix->arrlen);
blocks = 1;
threads = 1;
checkCudaErrors(hipDeviceSynchronize());
StartTimer();
{
hipLaunchKernelGGL(( lemire_one_thread), dim3(blocks), dim3(threads), 0, 0, matrix->d_matrix, matrix->d_minval, matrix->d_maxval, matrix->arrlen, matrix->window_size);
checkCudaErrors(hipDeviceSynchronize());
hipError_t error = hipMemcpy(matrix->h_maxval, matrix->d_maxval, matrix->arrlen*sizeof(double), hipMemcpyDeviceToHost);
checkCudaErrors(error);
error = hipMemcpy(matrix->h_minval, matrix->d_minval, matrix->arrlen*sizeof(double), hipMemcpyDeviceToHost);
checkCudaErrors(error);
};
double time = GetTimer()/1000;
//print_matrix(matrix->h_minval, matrix->arrlen);
//print_matrix(matrix->h_maxval, matrix->arrlen);
printf("Lemire alg time: %f\n", time);
return time;
}
#define MIN_WIN_SIZE (3)
__global__ void lemire_one_thread(double *matrix, double *minval, double *maxval, int arrlen, int window_size){
assert(window_size >= MIN_WIN_SIZE);
cuda_deque U, L;
double *a = matrix;
if (threadIdx.x == 0 && blockIdx.x == 0) //only one thread
{
for(u_int i = 1; i < arrlen; ++i){
if(i >= window_size){
maxval[i-window_size] = a[U.size() > 0 ? U.front():i-1];
minval[i-window_size] = a[L.size() > 0 ? L.front():i-1];
}
if(a[i] > a[i-1]){
L.push_back(i-1);
if(i == window_size + L.front())
L.pop_front();
while(U.size() > 0){
if(a[i] <= a[U.back()]){
if(i == window_size + U.front())
U.pop_front();
break;
}
U.pop_back();
}
}else{
U.push_back(i-1);
if(i == window_size + U.front())
U.pop_front();
while(L.size() > 0){
if(a[i] >= a[L.back()]){
if(i == window_size + L.front())
L.pop_front();
break;
}
L.pop_back();
}
}
}
maxval[arrlen - window_size] = a[U.size() > 0 ? U.front() : arrlen-1];
minval[arrlen - window_size] = a[L.size() > 0 ? L.front() : arrlen-1];
}
}
__global__ void par_alg_inc_blocks(double *matrix, double *minval, double *maxval, int arrlen, int window_size){
int tid = threadIdx.x,
bid = blockIdx.x;
assert(window_size >= MIN_WIN_SIZE);
int addr_offs = tid + bid*blockDim.x;
while(addr_offs+window_size < arrlen + 1) {
double min, max;
assert(addr_offs < arrlen);
min = max = matrix[addr_offs];
for (int i = addr_offs + 1; i < addr_offs + window_size; ++i)
{
assert(i < arrlen);
min = (matrix[i] < min)? matrix[i] : min;
max = (matrix[i] > max)? matrix[i] : max;
}
//assert(minval[addr_offs] == 0.0); //shows if there is overlapping
//assert(maxval[addr_offs] == 0.0); //shows if there is overlapping
minval[addr_offs] = min;
maxval[addr_offs] = max;
addr_offs += blockDim.x*gridDim.x;
}
//print_matrixx(minval, 15);
}
__global__ void cuda_print_arr(double *arr, size_t len){
print_matrixx(arr, len);
}
|
5eb91318db40c255696b3b462b2a03dfcc7c76c2.cu
|
#include "cuda_deque.h"
#include "implementations.h"
#include "io.h"
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <time.h>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "resources.h"
#include <timer.h>
#include "errors.h"
#include "cuda_schedule.h"
#include <iomanip>
#include <ctime>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#define ROUND_UP(N, S) (N%S == 0) ? N/S : N/S+1
#define BETWEEN(value, min, max) (value <= max && value >= min)
#define DEV_ID (0)
#define MIN_WIN_SIZE (3)
__global__ void lemire_one_thread(double *matrix, double *minval, double *maxval, int arrlen, int window_size);
__global__ void par_alg_inc_blocks(double *matrix, double *minval, double *maxval, int arrlen, int window_size);
__global__ void par_alg_thrust(thrust::device_ptr<double> matrix, double *minval, double *maxval, int arrlen, int window_size);
__device__ void print_matrixx(double *matrix, int length);
__global__ void cuda_print_arr(double *arr, size_t len);
struct arbitrary_functor
{
//using namespace thrust;
int window_size;
template <typename Tuple>
__host__ __device__
void operator()(const Tuple &t)
{
using namespace thrust;
double* d_first = &get<0>(t);
double *min = min_element(device, d_first, d_first + window_size);
get<1>(t) = min[0];
double *max = max_element(device, d_first, d_first + window_size);
get<2>(t) = max[0];
}
};
__device__ void print_matrixx(double *matrix, int length)
{
assert(matrix != NULL);
__syncthreads();
if(blockIdx.x == 0 && threadIdx.x == 0){
/* image row */
for (int i = 0; i < length; i++){
printf("%.0f ", (matrix[i] == -0.0)? 0.0 : matrix[i]);
}
printf("\n");
}
}
double cuda_parallel_approach(cuda_matrix *matrix){
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop, DEV_ID));
int blocks = matrix->core_count,
threads = matrix->thread_count,
max_threads = prop.maxThreadsPerBlock,
max_sm = prop.multiProcessorCount;
assert(max_threads >= threads);
assert(max_sm >= blocks);
checkCudaErrors(cudaDeviceSynchronize());
StartTimer();
par_alg_inc_blocks<<<blocks, threads>>>(matrix->d_matrix, matrix->d_minval, matrix->d_maxval, matrix->arrlen, matrix->window_size);
double time_spent = GetTimer()/1000;
return time_spent;
}
#define DEC_FACTOR (10)
double thrust_approach(cuda_matrix *matrix) {
using namespace thrust; //we dont have to write thrust:: anymore
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop, DEV_ID));
assert(matrix->window_size >= MIN_WIN_SIZE);
checkCudaErrors(cudaDeviceSynchronize());
StartTimer();
{
arbitrary_functor arb;
arb.window_size = matrix->window_size;
device_ptr<double> d_first = device_pointer_cast(matrix->d_matrix);
device_ptr<double> d_last = device_pointer_cast(matrix->d_matrix) + matrix->arrlen - matrix->window_size + 1;
device_ptr<double> min_first = device_pointer_cast(matrix->d_minval);
device_ptr<double> min_last = device_pointer_cast(matrix->d_minval) + matrix->arrlen - matrix->window_size + 1;
device_ptr<double> max_first = device_pointer_cast(matrix->d_maxval);
device_ptr<double> max_last = device_pointer_cast(matrix->d_maxval) + matrix->arrlen - matrix->window_size + 1;
for_each(make_zip_iterator(make_tuple(d_first, min_first, max_first)),
make_zip_iterator(make_tuple(d_last, min_last, max_last)),
arb
);
checkCudaErrors(cudaDeviceSynchronize());
};
double time_spent = GetTimer()/1000;
return time_spent;
}
double streams_approach(cuda_matrix *matrix) {
cudaError error;
//cudaDeviceProp prop;
//checkCudaErrors(cudaGetDeviceProperties(&prop, DEV_ID));
StartTimer();
size_t offset = CHUNK_SIZE - matrix->window_size + 1;
size_t stream_cnt = ROUND_UP(matrix->arrlen, offset);
cudaStream_t streams[stream_cnt];
for(int i = 0; i < stream_cnt; ++i) {
error = cudaStreamCreate(&(streams[i]));
checkCudaErrors(error);
}
//print_matrix(matrix->h_matrix, matrix->arrlen);
for (int i = 0; i < stream_cnt; ++i)
{
size_t tmp = offset*i,
data_size = (matrix->arrlen - tmp > CHUNK_SIZE) ? CHUNK_SIZE : matrix->arrlen - tmp;
//printf("%d %d\n", tmp, data_size);
error = cudaMemcpyAsync(matrix->d_matrix+tmp, matrix->h_matrix+tmp, data_size*sizeof(double), cudaMemcpyHostToDevice, streams[i]);
checkCudaErrors(error);
par_alg_inc_blocks<<<matrix->core_count, matrix->thread_count, 0, streams[i]>>>(matrix->d_matrix+tmp, matrix->d_minval+tmp, matrix->d_maxval+tmp, data_size, matrix->window_size);
}
/*for (int i = 0; i < stream_cnt; ++i)
{
size_t tmp = offset*i,
data_size = (i < stream_cnt-1) ? CHUNK_SIZE : CHUNK_SIZE + matrix->arrlen % CHUNK_SIZE;
}*/
for (int i = 0; i < stream_cnt; ++i)
cudaStreamSynchronize(streams[i]);
//cuda_print_arr<<<1,1>>>(matrix->d_matrix, matrix->arrlen);
double time = GetTimer()/1000;
return time;
}
double sequential_approach(cuda_matrix *matrix){
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop, DEV_ID));
int blocks,
threads = matrix->thread_count;
//max_threads = prop.maxThreadsPerBlock,
//max_sm = prop.multiProcessorCount;
blocks = matrix->core_count;
//assert(max_threads >= threads);
//assert(max_sm >= blocks);
//print_matrix(matrix->h_matrix, matrix->arrlen);
blocks = 1;
threads = 1;
checkCudaErrors(cudaDeviceSynchronize());
StartTimer();
{
lemire_one_thread<<<blocks, threads>>>(matrix->d_matrix, matrix->d_minval, matrix->d_maxval, matrix->arrlen, matrix->window_size);
checkCudaErrors(cudaDeviceSynchronize());
cudaError error = cudaMemcpy(matrix->h_maxval, matrix->d_maxval, matrix->arrlen*sizeof(double), cudaMemcpyDeviceToHost);
checkCudaErrors(error);
error = cudaMemcpy(matrix->h_minval, matrix->d_minval, matrix->arrlen*sizeof(double), cudaMemcpyDeviceToHost);
checkCudaErrors(error);
};
double time = GetTimer()/1000;
//print_matrix(matrix->h_minval, matrix->arrlen);
//print_matrix(matrix->h_maxval, matrix->arrlen);
printf("Lemire alg time: %f\n", time);
return time;
}
#define MIN_WIN_SIZE (3)
__global__ void lemire_one_thread(double *matrix, double *minval, double *maxval, int arrlen, int window_size){
assert(window_size >= MIN_WIN_SIZE);
cuda_deque U, L;
double *a = matrix;
if (threadIdx.x == 0 && blockIdx.x == 0) //only one thread
{
for(u_int i = 1; i < arrlen; ++i){
if(i >= window_size){
maxval[i-window_size] = a[U.size() > 0 ? U.front():i-1];
minval[i-window_size] = a[L.size() > 0 ? L.front():i-1];
}
if(a[i] > a[i-1]){
L.push_back(i-1);
if(i == window_size + L.front())
L.pop_front();
while(U.size() > 0){
if(a[i] <= a[U.back()]){
if(i == window_size + U.front())
U.pop_front();
break;
}
U.pop_back();
}
}else{
U.push_back(i-1);
if(i == window_size + U.front())
U.pop_front();
while(L.size() > 0){
if(a[i] >= a[L.back()]){
if(i == window_size + L.front())
L.pop_front();
break;
}
L.pop_back();
}
}
}
maxval[arrlen - window_size] = a[U.size() > 0 ? U.front() : arrlen-1];
minval[arrlen - window_size] = a[L.size() > 0 ? L.front() : arrlen-1];
}
}
__global__ void par_alg_inc_blocks(double *matrix, double *minval, double *maxval, int arrlen, int window_size){
int tid = threadIdx.x,
bid = blockIdx.x;
assert(window_size >= MIN_WIN_SIZE);
int addr_offs = tid + bid*blockDim.x;
while(addr_offs+window_size < arrlen + 1) {
double min, max;
assert(addr_offs < arrlen);
min = max = matrix[addr_offs];
for (int i = addr_offs + 1; i < addr_offs + window_size; ++i)
{
assert(i < arrlen);
min = (matrix[i] < min)? matrix[i] : min;
max = (matrix[i] > max)? matrix[i] : max;
}
//assert(minval[addr_offs] == 0.0); //shows if there is overlapping
//assert(maxval[addr_offs] == 0.0); //shows if there is overlapping
minval[addr_offs] = min;
maxval[addr_offs] = max;
addr_offs += blockDim.x*gridDim.x;
}
//print_matrixx(minval, 15);
}
__global__ void cuda_print_arr(double *arr, size_t len){
print_matrixx(arr, len);
}
|
39b349cd4c0f45def9a94cdb1bba878308b3e1db.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "htgrappa.h"
#include "cuNDArray_elemwise.h"
#include "cuNDFFT.h"
#include "GadgetronTimer.h"
#include "GPUTimer.h"
#include "cuNDArray_elemwise.h"
#include "CUBLASContextProvider.h"
#include "hoNDArray_fileio.h"
#include <rocblas.h>
//#include <cula_lapack_device.h>
#include <iostream>
namespace Gadgetron {
static int2 vec_to_int2(std::vector<unsigned int> vec)
{
int2 ret; ret.x = 0; ret.y = 0;
if (vec.size() < 2) {
GDEBUG_STREAM("vec_to_uint2 dimensions of vector too small" << std::endl);
return ret;
}
ret.x = vec[0]; ret.y = vec[1];
return ret;
}
template <class T> static int write_cuNDArray_to_disk(cuNDArray<T>* a, const char* filename)
{
boost::shared_ptr< hoNDArray<T> > host = a->to_host();
write_nd_array<complext<float> >(host.get(), filename);
return 0;
}
template <class T> __global__ void form_grappa_system_matrix_kernel_2d(const T* __restrict__ ref_data,
int2 dims,
int source_coils,
int target_coils,
int2 ros,
int2 ros_offset,
int2 kernel_size,
int acceleration_factor,
int set_number,
T* __restrict__ out_matrix,
T* __restrict__ b)
{
long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
int klocations = ros.x*ros.y;
int image_elements = dims.x*dims.y;
//int coefficients = kernel_size.x*kernel_size.y*coils;
if (idx_in < klocations) {
//unsigned int y = idx_in/ros.x;
//unsigned int x = idx_in - y*ros.x;
unsigned int x = idx_in/ros.y;
unsigned int y = idx_in - x*ros.y;
unsigned int idx_ref = 0;
unsigned int coeff_counter = 0;
int kernel_size_x = kernel_size.x;
int kernel_size_y = kernel_size.y;
for (int c = 0; c < source_coils; c++) {
for (int ky = -((kernel_size_y*acceleration_factor)>>1)+set_number+1;
ky < ((kernel_size_y*acceleration_factor+1)>>1); ky+=acceleration_factor) {
for (int kx = -(kernel_size_x>>1); kx < ((kernel_size_x+1)>>1); kx++) {
idx_ref = c*image_elements + x+kx+ros_offset.x + (y+ky+ros_offset.y)*dims.x;
//out_matrix[idx_in*coefficients+coeff_counter++] = ref_data[idx_ref];
out_matrix[idx_in+(coeff_counter++)*klocations] = ref_data[idx_ref];
}
}
}
//Loop over target coils here
for (unsigned int c = 0; c < target_coils; c++) {
//b[idx_in*coils + c] = ref_data[c*image_elements + y*dims.x+x];
b[idx_in + c*klocations] = ref_data[c*image_elements + (y+ros_offset.y)*dims.x+(x+ros_offset.x)];
}
}
}
//TODO: This should take source and target coils into consideration
template <class T> __global__ void copy_grappa_coefficients_to_kernel_2d(const T* __restrict__ coeffs,
T* __restrict__ kernel,
int source_coils,
int target_coils,
int2 kernel_size,
int acceleration_factor,
int set)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int coefficients_in_set = source_coils*kernel_size.x*kernel_size.y*target_coils;
if (idx_in < coefficients_in_set) {
int idx_in_tmp = idx_in;
int kx = idx_in%kernel_size.x;
idx_in = (idx_in-kx)/kernel_size.x;
int ky = idx_in%kernel_size.y;
idx_in = (idx_in-ky)/kernel_size.y;
int coil = idx_in%source_coils;
idx_in = (idx_in-coil)/source_coils;
int coilg = idx_in;
kernel[coilg*source_coils*(kernel_size.y*acceleration_factor)*kernel_size.x +
coil*(kernel_size.y*acceleration_factor)*kernel_size.x +
(ky*acceleration_factor + set + 1)*kernel_size.x + kx] = coeffs[idx_in_tmp];
if ((coil == coilg) && (kx == 0) && (ky == 0) && (set == 0)) {
kernel[coilg*source_coils*(kernel_size.y*acceleration_factor)*kernel_size.x +
coil*(kernel_size.y*acceleration_factor)*kernel_size.x +
((kernel_size.y>>1)*acceleration_factor)*kernel_size.x + (kernel_size.x>>1) ]._real = 1;
}
}
}
template <class T> __global__ void copy_grappa_kernel_to_kspace_2d(const T* __restrict__ kernel,
T* __restrict__ out,
int2 dims,
int2 kernel_size,
int coils)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
if (idx_in < kernel_size.x*kernel_size.y*coils) {
int idx_in_tmp = idx_in;
int kx = idx_in%kernel_size.x;
idx_in = (idx_in-kx)/kernel_size.x;
int ky = idx_in%kernel_size.y;
idx_in = (idx_in-ky)/kernel_size.y;
int coil = idx_in;
int outx = -(kx- (kernel_size.x>>1)) + (dims.x>>1); //Flipping the kernel for conv
int outy = -(ky- (kernel_size.y>>1)) + (dims.y>>1);
out[coil*dims.x*dims.y + outy*dims.x + outx] = kernel[idx_in_tmp];
}
}
__global__ void scale_and_add_unmixing_coeffs(const complext<float> * __restrict__ unmixing,
const complext<float> * __restrict__ csm,
complext<float> * __restrict__ out,
int elements,
int coils,
float scale_factor)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
complext<float> tmp;
if (idx_in < elements) {
for (int c = 0; c < coils; c++) {
tmp = unmixing[c*elements + idx_in]*conj(csm[idx_in]);
out[c*elements + idx_in] += scale_factor*tmp;
}
}
}
__global__ void scale_and_copy_unmixing_coeffs(const complext<float> * __restrict__ unmixing,
complext<float> * __restrict__ out,
int elements,
int coils,
float scale_factor)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
if (idx_in < elements) {
for (int c = 0; c < coils; c++) {
out[c*elements + idx_in] = scale_factor*unmixing[c*elements + idx_in];
}
}
}
__global__ void conj_csm_coeffs(const complext<float> * __restrict__ csm,
complext<float> * __restrict__ out,
int source_elements,
int target_elements)
{
//TODO: Here we need to have both src_elements and target_elements and we use conj(csm) for all target_elements and 0.0 when element > target_elements
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
if (idx_in < source_elements) {
if (idx_in >= target_elements) {
out[idx_in] = complext<float> (0.0,0.0);
} else {
out[idx_in] = conj(csm[idx_in]);
}
}
}
__global__ void single_channel_coeffs(complext<float> * out,
int channel_no,
int elements_per_channel)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
if (idx_in < elements_per_channel) {
out[idx_in + channel_no*elements_per_channel] = complext<float>(1.0,0.0);
}
}
template <class T> int htgrappa_calculate_grappa_unmixing(cuNDArray<T>* ref_data,
cuNDArray<T>* b1,
unsigned int acceleration_factor,
std::vector<unsigned int>* kernel_size,
cuNDArray<T>* out_mixing_coeff,
std::vector< std::pair<unsigned int, unsigned int> >* sampled_region,
std::list< unsigned int >* uncombined_channels)
{
if (ref_data->get_number_of_dimensions() != b1->get_number_of_dimensions()) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Dimensions mismatch" << std::endl;
return -1;
}
for (unsigned int i = 0; i < (ref_data->get_number_of_dimensions()-1); i++) {
if (ref_data->get_size(i) != b1->get_size(i)) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Dimensions mismatch" << std::endl;
return -1;
}
}
unsigned int RO = ref_data->get_size(0);
unsigned int E1 = ref_data->get_size(1);
unsigned int source_coils = ref_data->get_size(ref_data->get_number_of_dimensions()-1);
unsigned int target_coils = b1->get_size(b1->get_number_of_dimensions()-1);
unsigned int elements_per_coil = b1->get_number_of_elements()/target_coils;
if (target_coils > source_coils) {
std::cerr << "target_coils > source_coils" << std::endl;
return -1;
}
if (acceleration_factor == 1) {
dim3 blockDim(512,1,1);
dim3 gridDim((unsigned int) ::ceil((1.0f*elements_per_coil*source_coils)/blockDim.x), 1, 1 );
hipLaunchKernelGGL(( conj_csm_coeffs), dim3(gridDim), dim3(blockDim) , 0, 0, b1->get_data_ptr(),
out_mixing_coeff->get_data_ptr(),
out_mixing_coeff->get_number_of_elements(),
b1->get_number_of_elements());
std::list<unsigned int>::iterator it;
gridDim = dim3((unsigned int) ::ceil((1.0f*(elements_per_coil))/blockDim.x), 1, 1 );
int uncombined_channel_no = 0;
if (uncombined_channels) {
for ( it = uncombined_channels->begin(); it != uncombined_channels->end(); it++ ) {
uncombined_channel_no++;
//TODO: Adjust pointers to reflect that number of target/source may not be qual
hipLaunchKernelGGL(( single_channel_coeffs), dim3(gridDim), dim3(blockDim) , 0, 0, out_mixing_coeff->get_data_ptr() + uncombined_channel_no*source_coils*elements_per_coil,
*it,
(elements_per_coil));
}
}
return 0;
}
if (kernel_size->size() != (ref_data->get_number_of_dimensions()-1)) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Kernel size does not match the data dimensions" << std::endl;
return -1;
}
if (ref_data->get_number_of_dimensions() > 3) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Not yet implemented for 3D" << std::endl;
return -1;
}
//Calculate region of support + offsets
std::vector<size_t> rosTmp = *ref_data->get_dimensions();
std::vector<unsigned int> ros(rosTmp.size());
for ( unsigned int ii=0; ii<rosTmp.size(); ii++ ){
ros[ii] = rosTmp[ii];
}
ros.pop_back(); //Remove the number of coils
std::vector<unsigned int> ros_offset(ref_data->get_number_of_dimensions(),0);
unsigned long int kspace_locations = 1;
if (sampled_region) {
for (unsigned int i = 0; i < ros.size(); i++) {
if (i > 0) {
ros[i] = (*sampled_region)[i].second-(*sampled_region)[i].first-((*kernel_size)[i]*acceleration_factor);
} else {
ros[i] = (*sampled_region)[i].second-(*sampled_region)[i].first-(*kernel_size)[i];
}
ros_offset[i] = (*sampled_region)[i].first+(((*sampled_region)[i].second-(*sampled_region)[i].first-ros[i])>>1);
kspace_locations *= ros[i];
}
} else {
for (unsigned int i = 0; i < ros.size(); i++) {
if (i > 0) {
ros[i] -= ((*kernel_size)[i]*acceleration_factor);
} else {
ros[i] -= (*kernel_size)[i];
}
ros_offset[i] = (ref_data->get_size(i)-ros[i])>>1;
kspace_locations *= ros[i];
}
}
/*
for (unsigned int i = 0; i < ros.size(); i++) {
GDEBUG_STREAM("ROS[" << i << "] = " << ros[i] << " + " << ros_offset[i] << std::endl);
}
*/
std::vector<size_t> sys_matrix_size;
sys_matrix_size.push_back(kspace_locations);
sys_matrix_size.push_back(source_coils*(*kernel_size)[0]*(*kernel_size)[1]);
std::vector<size_t> b_size;
b_size.push_back(kspace_locations);
b_size.push_back(target_coils);
cuNDArray<T> system_matrix = cuNDArray<T>(&sys_matrix_size);
clear(&system_matrix);
cuNDArray<T> b = cuNDArray<T>(&b_size);
boost::shared_ptr< std::vector<size_t> > dimTmp = ref_data->get_dimensions();
std::vector<unsigned int> dimInt(2, 0);
dimInt[0] = (*dimTmp)[0];
dimInt[1] = (*dimTmp)[1];
int2 dims = vec_to_int2(dimInt);
int2 dros = vec_to_int2(ros);
int2 dros_offset = vec_to_int2(ros_offset);
int2 dkernel_size = vec_to_int2(*kernel_size);
//TODO: Use source coils here
int n = source_coils*(*kernel_size)[0]*(*kernel_size)[1];
int m = kspace_locations;
std::vector<size_t> AHA_dims(2,n);
cuNDArray<T> AHA = cuNDArray<T>(&AHA_dims);
cuNDArray<T> AHA_set0 = cuNDArray<T>(&AHA_dims);
hoNDArray<T> AHA_host(n, n);
float2* pAHA = (float2*) AHA_host.get_data_ptr();
//TODO: Use target coils here
std::vector<size_t> AHrhs_dims;
AHrhs_dims.push_back(n);
AHrhs_dims.push_back(target_coils);
cuNDArray<T> AHrhs = cuNDArray<T>(&AHrhs_dims);
hipblasHandle_t handle = *CUBLASContextProvider::instance()->getCublasHandle();
std::vector<size_t> gkernel_dims;
gkernel_dims.push_back((*kernel_size)[0]);
gkernel_dims.push_back((*kernel_size)[1]*acceleration_factor);
gkernel_dims.push_back(source_coils);
gkernel_dims.push_back(target_coils);
cuNDArray<T> gkernel = cuNDArray<T>(&gkernel_dims);
clear(&gkernel);
//GadgetronTimer timer;
for (unsigned int set = 0; set < acceleration_factor-1; set++)
{
//GDEBUG_STREAM("Calculating coefficients for set " << set << std::endl);
//GDEBUG_STREAM("dros.x = " << dros.x << ", dros.y = " << dros.y << std::endl);
std::ostringstream ostr;
ostr << "Set_" << set << "_";
std::string appendix = ostr.str();
dim3 blockDim(512,1,1);
dim3 gridDim((unsigned int) ::ceil((1.0f*kspace_locations)/blockDim.x), 1, 1 );
hipLaunchKernelGGL(( form_grappa_system_matrix_kernel_2d), dim3(gridDim), dim3(blockDim) , 0, 0, ref_data->get_data_ptr(), dims,
source_coils, target_coils, dros, dros_offset,
dkernel_size, acceleration_factor, set,
system_matrix.get_data_ptr(),
b.get_data_ptr());
hipError_t err = hipGetLastError();
if( err != hipSuccess ){
std::cerr << "htgrappa_calculate_grappa_unmixing: Unable to form system matrix: " <<
hipGetErrorString(err) << std::endl;
return -1;
}
// {
// std::string filename = debugFolder+appendix+"A.cplx";
//write_cuNDArray_to_disk(&system_matrix, filename.c_str());
// }
// {
// std::string filename = debugFolder+appendix+"b.cplx";
//write_cuNDArray_to_disk(&b, filename.c_str());
// }
complext<float> alpha = complext<float>(1);
complext<float> beta = complext<float>(0);
hipblasStatus_t stat;
if ( set == 0 )
{
{
//GPUTimer t2("Cgemm call");
stat = hipblasCgemm(handle, HIPBLAS_OP_C, HIPBLAS_OP_N,
n,n,m,(float2*) &alpha,
(float2*) system_matrix.get_data_ptr(), m,
(float2*) system_matrix.get_data_ptr(), m,
(float2*) &beta, (float2*) AHA.get_data_ptr(), n);
if (stat != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to form AHA product using cublas gemm" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
}
{
//timer.start("copy AHA to host");
if (hipMemcpy(pAHA, AHA.get_data_ptr(), AHA_host.get_number_of_bytes(), hipMemcpyDeviceToHost) != hipSuccess)
{
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to copy AHA to host" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
//timer.stop();
//timer.start("apply the regularization");
// apply the regularization
double lamda = 0.0005;
double trA = std::sqrt(pAHA[0].x*pAHA[0].x + pAHA[0].y*pAHA[0].y);
size_t c;
for ( c=1; c<n; c++ )
{
float x = pAHA[c+c*n].x;
float y = pAHA[c+c*n].y;
trA += std::sqrt(x*x+y*y);
}
double value = trA*lamda/n;
for ( c=0; c<n; c++ )
{
float x = pAHA[c+c*n].x;
float y = pAHA[c+c*n].y;
pAHA[c+c*n].x = std::sqrt(x*x+y*y) + value;
pAHA[c+c*n].y = 0;
}
//timer.stop();
//timer.start("copy the AHA to device");
if (hipMemcpy(AHA.get_data_ptr(), pAHA, AHA_host.get_number_of_bytes(), hipMemcpyHostToDevice) != hipSuccess)
{
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to copy regularized AHA to device" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
//timer.stop();
}
AHA_set0 = AHA;
}
else
{
AHA = AHA_set0;
}
// {
// std::string filename = debugFolder+appendix+"AHA.cplx";
//write_cuNDArray_to_disk(&AHA, filename.c_str());
// }
{
//GPUTimer timer("GRAPPA cublas gemm");
//TODO: Sort out arguments for source and target coils here.
stat = hipblasCgemm(handle, HIPBLAS_OP_C, HIPBLAS_OP_N,
n,target_coils,m,(float2*) &alpha,
(float2*) system_matrix.get_data_ptr(), m,
(float2*) b.get_data_ptr(), m,
(float2*) &beta, (float2*)AHrhs.get_data_ptr(), n);
}
// {
// std::string filename = debugFolder+appendix+"AHrhs.cplx";
//write_cuNDArray_to_disk(&AHrhs, filename.c_str());
// }
if (stat != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to form AHrhs product using cublas gemm" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
{
//It actually turns out to be faster to do this inversion on the CPU. Problem is probably too small for GPU to make sense
//GPUTimer cpu_invert_time("CPU Inversion time");
ht_grappa_solve_spd_system(AHA, AHrhs);
}
gridDim = dim3((unsigned int) ::ceil((1.0f*n*source_coils)/blockDim.x), 1, 1 );
//TODO: This should be target coils used as argument here.
hipLaunchKernelGGL(( copy_grappa_coefficients_to_kernel_2d), dim3(gridDim), dim3(blockDim) , 0, 0, AHrhs.get_data_ptr(),
gkernel.get_data_ptr(),
source_coils,
target_coils,
dkernel_size,
acceleration_factor,
set);
// {
// std::string filename = debugFolder+appendix+"kernel.cplx";
//write_cuNDArray_to_disk(&gkernel, filename.c_str());
// }
err = hipGetLastError();
if( err != hipSuccess ){
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to copy calculated coefficients to kernel: " <<
hipGetErrorString(err) << std::endl;
return -1;
}
}
//{
// std::string filename = debugFolder+"kernel_all.cplx";
// write_cuNDArray_to_disk(&gkernel, filename.c_str());
//}
//TODO: This should be source coils
cuNDArray<T> tmp_mixing = cuNDArray<T>(ref_data->get_dimensions());
int kernel_elements = gkernel.get_number_of_elements()/target_coils;
int total_elements = tmp_mixing.get_number_of_elements()/source_coils;
dkernel_size.y *= acceleration_factor;
std::vector<size_t> ft_dims(2,0);ft_dims[1] = 1;
clear(out_mixing_coeff);
unsigned int current_uncombined_index = 0;
//TODO: Loop over target coils.
for (unsigned int c = 0; c < target_coils; c++)
{
clear(&tmp_mixing);
dim3 blockDim(512,1,1);
dim3 gridDim((unsigned int) ::ceil((1.0f*kernel_elements)/blockDim.x), 1, 1 );
//TODO: Take source and target into consideration
hipLaunchKernelGGL(( copy_grappa_kernel_to_kspace_2d), dim3(gridDim), dim3(blockDim) , 0, 0, (gkernel.get_data_ptr()+(c*kernel_elements)),
tmp_mixing.get_data_ptr(),
dims,
dkernel_size,
source_coils);
hipError_t err = hipGetLastError();
if( err != hipSuccess ){
std::cerr << "htgrappa_calculate_grappa_unmixing: Unable to pad GRAPPA kernel: " <<
hipGetErrorString(err) << std::endl;
return -1;
}
cuNDFFT<typename realType<T>::Type>::instance()->ifft(&tmp_mixing, &ft_dims);
float scale_factor = (float)std::sqrt((double)(RO*E1));
gridDim = dim3((unsigned int) ::ceil(1.0f*total_elements/blockDim.x), 1, 1 );
hipLaunchKernelGGL(( scale_and_add_unmixing_coeffs), dim3(gridDim), dim3(blockDim) , 0, 0, tmp_mixing.get_data_ptr(),
(b1->get_data_ptr()+ c*total_elements),
out_mixing_coeff->get_data_ptr(),
total_elements,
source_coils,
scale_factor);
err = hipGetLastError();
if( err != hipSuccess ){
std::cerr << "htgrappa_calculate_grappa_unmixing: scale and add mixing coeffs: " <<
hipGetErrorString(err) << std::endl;
return -1;
}
if (uncombined_channels) {
std::list<unsigned int>::iterator it = std::find((*uncombined_channels).begin(),(*uncombined_channels).end(),c);
if (it != (*uncombined_channels).end()) {
current_uncombined_index++;
hipLaunchKernelGGL(( scale_and_copy_unmixing_coeffs), dim3(gridDim), dim3(blockDim) , 0, 0, tmp_mixing.get_data_ptr(),
(out_mixing_coeff->get_data_ptr()+current_uncombined_index*total_elements*source_coils),
total_elements,
source_coils,
scale_factor);
}
}
}
//GDEBUG_STREAM("**********hipblasDestroy()**************" << std::endl);
//hipblasDestroy(handle);
return 0;
}
template <class T> int inverse_clib_matrix(cuNDArray<T>* A,
cuNDArray<T>* b,
cuNDArray<T>* coeff,
double lamda)
{
// A: M*N
// b: M*K
size_t M = A->get_size(0);
size_t N = A->get_size(1);
size_t K = b->get_size(1);
std::vector<size_t> AHA_dims(2,N);
cuNDArray<T> AHA = cuNDArray<T>(&AHA_dims);
std::vector<size_t> AHrhs_dims;
AHrhs_dims.push_back(N);
AHrhs_dims.push_back(K);
coeff->create(&AHrhs_dims);
hipblasHandle_t handle = *CUBLASContextProvider::instance()->getCublasHandle();
complext<float> alpha = complext<float>(1);
complext<float> beta = complext<float>(0);
//{
// std::string filename = debugFolder+"A.cplx";
// write_cuNDArray_to_disk(A, filename.c_str());
//}
//{
// std::string filename = debugFolder+"b.cplx";
// write_cuNDArray_to_disk(b, filename.c_str());
//}
{
//GPUTimer t2("compute AHA ...");
hipblasStatus_t stat = hipblasCgemm(handle, HIPBLAS_OP_C, HIPBLAS_OP_N,
N,N,M,(float2*) &alpha,
(float2*) A->get_data_ptr(), M,
(float2*) A->get_data_ptr(), M,
(float2*) &beta, (float2*) AHA.get_data_ptr(), N);
if (stat != HIPBLAS_STATUS_SUCCESS)
{
std::cerr << "inverse_clib_matrix: Failed to form AHA product using cublas gemm" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
}
//{
// std::string filename = debugFolder+"AHA.cplx";
// write_cuNDArray_to_disk(&AHA, filename.c_str());
//}
{
//GPUTimer t2("compute AHrhs ...");
hipblasStatus_t stat = hipblasCgemm(handle, HIPBLAS_OP_C, HIPBLAS_OP_N,
N,K,M,(float2*) &alpha,
(float2*) A->get_data_ptr(), M,
(float2*) b->get_data_ptr(), M,
(float2*) &beta, (float2*)coeff->get_data_ptr(), N);
if (stat != HIPBLAS_STATUS_SUCCESS)
{
std::cerr << "inverse_clib_matrix: Failed to form AHrhs product using cublas gemm" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
}
//{
// std::string filename = debugFolder+"AHrhs.cplx";
// write_cuNDArray_to_disk(coeff, filename.c_str());
//}
// apply the regularization
if ( lamda > 0 )
{
hoNDArray<T> AHA_host(N, N);
float2* pAHA = (float2*) AHA_host.get_data_ptr();
//GadgetronTimer timer;
//timer.start("copy AHA to host");
if (hipMemcpy(pAHA, AHA.get_data_ptr(), AHA_host.get_number_of_bytes(), hipMemcpyDeviceToHost) != hipSuccess)
{
std::cerr << "inverse_clib_matrix: Failed to copy AHA to host" << std::endl;
return -1;
}
//timer.stop();
//timer.start("apply the regularization");
// apply the regularization
double trA = std::sqrt(pAHA[0].x*pAHA[0].x + pAHA[0].y*pAHA[0].y);
size_t c;
for ( c=1; c<N; c++ )
{
float x = pAHA[c+c*N].x;
float y = pAHA[c+c*N].y;
trA += std::sqrt(x*x+y*y);
}
double value = trA*lamda/N;
for ( c=0; c<N; c++ )
{
float x = pAHA[c+c*N].x;
float y = pAHA[c+c*N].y;
pAHA[c+c*N].x = std::sqrt(x*x+y*y) + value;
pAHA[c+c*N].y = 0;
}
//timer.stop();
//timer.start("copy the AHA to device");
if (hipMemcpy(AHA.get_data_ptr(), pAHA, AHA_host.get_number_of_bytes(), hipMemcpyHostToDevice) != hipSuccess)
{
std::cerr << "inverse_clib_matrix: Failed to copy regularized AHA to device" << std::endl;
return -1;
}
//timer.stop();
}
/*
culaStatus s;
s = culaDeviceCgels( 'N', N, N, K,
(culaDeviceFloatComplex*)AHA.get_data_ptr(), N,
(culaDeviceFloatComplex*)coeff->get_data_ptr(), N);
*/
{
//It actually turns out to be faster to do this inversion on the CPU. Problem is probably too small for GPU to make sense
//GPUTimer cpu_invert_time("CPU Inversion time");
ht_grappa_solve_spd_system(AHA, *coeff);
}
//{
// std::string filename = debugFolder+"coeff.cplx";
// write_cuNDArray_to_disk(coeff, filename.c_str());
//}
/*
if (s != culaNoError)
{
GDEBUG_STREAM("inverse_clib_matrix: linear solve failed" << std::endl);
return -1;
}
*/
return 0;
}
//Template instanciation
template EXPORTGPUPMRI int htgrappa_calculate_grappa_unmixing(cuNDArray<complext<float> >* ref_data,
cuNDArray<complext<float> >* b1,
unsigned int acceleration_factor,
std::vector<unsigned int> *kernel_size,
cuNDArray<complext<float> >* out_mixing_coeff,
std::vector< std::pair<unsigned int, unsigned int> >* sampled_region,
std::list< unsigned int >* uncombined_channels);
template EXPORTGPUPMRI int inverse_clib_matrix(cuNDArray<complext<float> >* A,
cuNDArray<complext<float> >* b,
cuNDArray<complext<float> >* coeff,
double lamda);
}
|
39b349cd4c0f45def9a94cdb1bba878308b3e1db.cu
|
#include "htgrappa.h"
#include "cuNDArray_elemwise.h"
#include "cuNDFFT.h"
#include "GadgetronTimer.h"
#include "GPUTimer.h"
#include "cuNDArray_elemwise.h"
#include "CUBLASContextProvider.h"
#include "hoNDArray_fileio.h"
#include <cublas_v2.h>
//#include <cula_lapack_device.h>
#include <iostream>
namespace Gadgetron {
static int2 vec_to_int2(std::vector<unsigned int> vec)
{
int2 ret; ret.x = 0; ret.y = 0;
if (vec.size() < 2) {
GDEBUG_STREAM("vec_to_uint2 dimensions of vector too small" << std::endl);
return ret;
}
ret.x = vec[0]; ret.y = vec[1];
return ret;
}
template <class T> static int write_cuNDArray_to_disk(cuNDArray<T>* a, const char* filename)
{
boost::shared_ptr< hoNDArray<T> > host = a->to_host();
write_nd_array<complext<float> >(host.get(), filename);
return 0;
}
template <class T> __global__ void form_grappa_system_matrix_kernel_2d(const T* __restrict__ ref_data,
int2 dims,
int source_coils,
int target_coils,
int2 ros,
int2 ros_offset,
int2 kernel_size,
int acceleration_factor,
int set_number,
T* __restrict__ out_matrix,
T* __restrict__ b)
{
long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
int klocations = ros.x*ros.y;
int image_elements = dims.x*dims.y;
//int coefficients = kernel_size.x*kernel_size.y*coils;
if (idx_in < klocations) {
//unsigned int y = idx_in/ros.x;
//unsigned int x = idx_in - y*ros.x;
unsigned int x = idx_in/ros.y;
unsigned int y = idx_in - x*ros.y;
unsigned int idx_ref = 0;
unsigned int coeff_counter = 0;
int kernel_size_x = kernel_size.x;
int kernel_size_y = kernel_size.y;
for (int c = 0; c < source_coils; c++) {
for (int ky = -((kernel_size_y*acceleration_factor)>>1)+set_number+1;
ky < ((kernel_size_y*acceleration_factor+1)>>1); ky+=acceleration_factor) {
for (int kx = -(kernel_size_x>>1); kx < ((kernel_size_x+1)>>1); kx++) {
idx_ref = c*image_elements + x+kx+ros_offset.x + (y+ky+ros_offset.y)*dims.x;
//out_matrix[idx_in*coefficients+coeff_counter++] = ref_data[idx_ref];
out_matrix[idx_in+(coeff_counter++)*klocations] = ref_data[idx_ref];
}
}
}
//Loop over target coils here
for (unsigned int c = 0; c < target_coils; c++) {
//b[idx_in*coils + c] = ref_data[c*image_elements + y*dims.x+x];
b[idx_in + c*klocations] = ref_data[c*image_elements + (y+ros_offset.y)*dims.x+(x+ros_offset.x)];
}
}
}
//TODO: This should take source and target coils into consideration
template <class T> __global__ void copy_grappa_coefficients_to_kernel_2d(const T* __restrict__ coeffs,
T* __restrict__ kernel,
int source_coils,
int target_coils,
int2 kernel_size,
int acceleration_factor,
int set)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int coefficients_in_set = source_coils*kernel_size.x*kernel_size.y*target_coils;
if (idx_in < coefficients_in_set) {
int idx_in_tmp = idx_in;
int kx = idx_in%kernel_size.x;
idx_in = (idx_in-kx)/kernel_size.x;
int ky = idx_in%kernel_size.y;
idx_in = (idx_in-ky)/kernel_size.y;
int coil = idx_in%source_coils;
idx_in = (idx_in-coil)/source_coils;
int coilg = idx_in;
kernel[coilg*source_coils*(kernel_size.y*acceleration_factor)*kernel_size.x +
coil*(kernel_size.y*acceleration_factor)*kernel_size.x +
(ky*acceleration_factor + set + 1)*kernel_size.x + kx] = coeffs[idx_in_tmp];
if ((coil == coilg) && (kx == 0) && (ky == 0) && (set == 0)) {
kernel[coilg*source_coils*(kernel_size.y*acceleration_factor)*kernel_size.x +
coil*(kernel_size.y*acceleration_factor)*kernel_size.x +
((kernel_size.y>>1)*acceleration_factor)*kernel_size.x + (kernel_size.x>>1) ]._real = 1;
}
}
}
template <class T> __global__ void copy_grappa_kernel_to_kspace_2d(const T* __restrict__ kernel,
T* __restrict__ out,
int2 dims,
int2 kernel_size,
int coils)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
if (idx_in < kernel_size.x*kernel_size.y*coils) {
int idx_in_tmp = idx_in;
int kx = idx_in%kernel_size.x;
idx_in = (idx_in-kx)/kernel_size.x;
int ky = idx_in%kernel_size.y;
idx_in = (idx_in-ky)/kernel_size.y;
int coil = idx_in;
int outx = -(kx- (kernel_size.x>>1)) + (dims.x>>1); //Flipping the kernel for conv
int outy = -(ky- (kernel_size.y>>1)) + (dims.y>>1);
out[coil*dims.x*dims.y + outy*dims.x + outx] = kernel[idx_in_tmp];
}
}
__global__ void scale_and_add_unmixing_coeffs(const complext<float> * __restrict__ unmixing,
const complext<float> * __restrict__ csm,
complext<float> * __restrict__ out,
int elements,
int coils,
float scale_factor)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
complext<float> tmp;
if (idx_in < elements) {
for (int c = 0; c < coils; c++) {
tmp = unmixing[c*elements + idx_in]*conj(csm[idx_in]);
out[c*elements + idx_in] += scale_factor*tmp;
}
}
}
__global__ void scale_and_copy_unmixing_coeffs(const complext<float> * __restrict__ unmixing,
complext<float> * __restrict__ out,
int elements,
int coils,
float scale_factor)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
if (idx_in < elements) {
for (int c = 0; c < coils; c++) {
out[c*elements + idx_in] = scale_factor*unmixing[c*elements + idx_in];
}
}
}
__global__ void conj_csm_coeffs(const complext<float> * __restrict__ csm,
complext<float> * __restrict__ out,
int source_elements,
int target_elements)
{
//TODO: Here we need to have both src_elements and target_elements and we use conj(csm) for all target_elements and 0.0 when element > target_elements
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
if (idx_in < source_elements) {
if (idx_in >= target_elements) {
out[idx_in] = complext<float> (0.0,0.0);
} else {
out[idx_in] = conj(csm[idx_in]);
}
}
}
__global__ void single_channel_coeffs(complext<float> * out,
int channel_no,
int elements_per_channel)
{
unsigned long idx_in = blockIdx.x*blockDim.x+threadIdx.x;
if (idx_in < elements_per_channel) {
out[idx_in + channel_no*elements_per_channel] = complext<float>(1.0,0.0);
}
}
template <class T> int htgrappa_calculate_grappa_unmixing(cuNDArray<T>* ref_data,
cuNDArray<T>* b1,
unsigned int acceleration_factor,
std::vector<unsigned int>* kernel_size,
cuNDArray<T>* out_mixing_coeff,
std::vector< std::pair<unsigned int, unsigned int> >* sampled_region,
std::list< unsigned int >* uncombined_channels)
{
if (ref_data->get_number_of_dimensions() != b1->get_number_of_dimensions()) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Dimensions mismatch" << std::endl;
return -1;
}
for (unsigned int i = 0; i < (ref_data->get_number_of_dimensions()-1); i++) {
if (ref_data->get_size(i) != b1->get_size(i)) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Dimensions mismatch" << std::endl;
return -1;
}
}
unsigned int RO = ref_data->get_size(0);
unsigned int E1 = ref_data->get_size(1);
unsigned int source_coils = ref_data->get_size(ref_data->get_number_of_dimensions()-1);
unsigned int target_coils = b1->get_size(b1->get_number_of_dimensions()-1);
unsigned int elements_per_coil = b1->get_number_of_elements()/target_coils;
if (target_coils > source_coils) {
std::cerr << "target_coils > source_coils" << std::endl;
return -1;
}
if (acceleration_factor == 1) {
dim3 blockDim(512,1,1);
dim3 gridDim((unsigned int) std::ceil((1.0f*elements_per_coil*source_coils)/blockDim.x), 1, 1 );
conj_csm_coeffs<<< gridDim, blockDim >>>( b1->get_data_ptr(),
out_mixing_coeff->get_data_ptr(),
out_mixing_coeff->get_number_of_elements(),
b1->get_number_of_elements());
std::list<unsigned int>::iterator it;
gridDim = dim3((unsigned int) std::ceil((1.0f*(elements_per_coil))/blockDim.x), 1, 1 );
int uncombined_channel_no = 0;
if (uncombined_channels) {
for ( it = uncombined_channels->begin(); it != uncombined_channels->end(); it++ ) {
uncombined_channel_no++;
//TODO: Adjust pointers to reflect that number of target/source may not be qual
single_channel_coeffs<<< gridDim, blockDim >>>( out_mixing_coeff->get_data_ptr() + uncombined_channel_no*source_coils*elements_per_coil,
*it,
(elements_per_coil));
}
}
return 0;
}
if (kernel_size->size() != (ref_data->get_number_of_dimensions()-1)) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Kernel size does not match the data dimensions" << std::endl;
return -1;
}
if (ref_data->get_number_of_dimensions() > 3) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Not yet implemented for 3D" << std::endl;
return -1;
}
//Calculate region of support + offsets
std::vector<size_t> rosTmp = *ref_data->get_dimensions();
std::vector<unsigned int> ros(rosTmp.size());
for ( unsigned int ii=0; ii<rosTmp.size(); ii++ ){
ros[ii] = rosTmp[ii];
}
ros.pop_back(); //Remove the number of coils
std::vector<unsigned int> ros_offset(ref_data->get_number_of_dimensions(),0);
unsigned long int kspace_locations = 1;
if (sampled_region) {
for (unsigned int i = 0; i < ros.size(); i++) {
if (i > 0) {
ros[i] = (*sampled_region)[i].second-(*sampled_region)[i].first-((*kernel_size)[i]*acceleration_factor);
} else {
ros[i] = (*sampled_region)[i].second-(*sampled_region)[i].first-(*kernel_size)[i];
}
ros_offset[i] = (*sampled_region)[i].first+(((*sampled_region)[i].second-(*sampled_region)[i].first-ros[i])>>1);
kspace_locations *= ros[i];
}
} else {
for (unsigned int i = 0; i < ros.size(); i++) {
if (i > 0) {
ros[i] -= ((*kernel_size)[i]*acceleration_factor);
} else {
ros[i] -= (*kernel_size)[i];
}
ros_offset[i] = (ref_data->get_size(i)-ros[i])>>1;
kspace_locations *= ros[i];
}
}
/*
for (unsigned int i = 0; i < ros.size(); i++) {
GDEBUG_STREAM("ROS[" << i << "] = " << ros[i] << " + " << ros_offset[i] << std::endl);
}
*/
std::vector<size_t> sys_matrix_size;
sys_matrix_size.push_back(kspace_locations);
sys_matrix_size.push_back(source_coils*(*kernel_size)[0]*(*kernel_size)[1]);
std::vector<size_t> b_size;
b_size.push_back(kspace_locations);
b_size.push_back(target_coils);
cuNDArray<T> system_matrix = cuNDArray<T>(&sys_matrix_size);
clear(&system_matrix);
cuNDArray<T> b = cuNDArray<T>(&b_size);
boost::shared_ptr< std::vector<size_t> > dimTmp = ref_data->get_dimensions();
std::vector<unsigned int> dimInt(2, 0);
dimInt[0] = (*dimTmp)[0];
dimInt[1] = (*dimTmp)[1];
int2 dims = vec_to_int2(dimInt);
int2 dros = vec_to_int2(ros);
int2 dros_offset = vec_to_int2(ros_offset);
int2 dkernel_size = vec_to_int2(*kernel_size);
//TODO: Use source coils here
int n = source_coils*(*kernel_size)[0]*(*kernel_size)[1];
int m = kspace_locations;
std::vector<size_t> AHA_dims(2,n);
cuNDArray<T> AHA = cuNDArray<T>(&AHA_dims);
cuNDArray<T> AHA_set0 = cuNDArray<T>(&AHA_dims);
hoNDArray<T> AHA_host(n, n);
float2* pAHA = (float2*) AHA_host.get_data_ptr();
//TODO: Use target coils here
std::vector<size_t> AHrhs_dims;
AHrhs_dims.push_back(n);
AHrhs_dims.push_back(target_coils);
cuNDArray<T> AHrhs = cuNDArray<T>(&AHrhs_dims);
cublasHandle_t handle = *CUBLASContextProvider::instance()->getCublasHandle();
std::vector<size_t> gkernel_dims;
gkernel_dims.push_back((*kernel_size)[0]);
gkernel_dims.push_back((*kernel_size)[1]*acceleration_factor);
gkernel_dims.push_back(source_coils);
gkernel_dims.push_back(target_coils);
cuNDArray<T> gkernel = cuNDArray<T>(&gkernel_dims);
clear(&gkernel);
//GadgetronTimer timer;
for (unsigned int set = 0; set < acceleration_factor-1; set++)
{
//GDEBUG_STREAM("Calculating coefficients for set " << set << std::endl);
//GDEBUG_STREAM("dros.x = " << dros.x << ", dros.y = " << dros.y << std::endl);
std::ostringstream ostr;
ostr << "Set_" << set << "_";
std::string appendix = ostr.str();
dim3 blockDim(512,1,1);
dim3 gridDim((unsigned int) std::ceil((1.0f*kspace_locations)/blockDim.x), 1, 1 );
form_grappa_system_matrix_kernel_2d<<< gridDim, blockDim >>>( ref_data->get_data_ptr(), dims,
source_coils, target_coils, dros, dros_offset,
dkernel_size, acceleration_factor, set,
system_matrix.get_data_ptr(),
b.get_data_ptr());
cudaError_t err = cudaGetLastError();
if( err != cudaSuccess ){
std::cerr << "htgrappa_calculate_grappa_unmixing: Unable to form system matrix: " <<
cudaGetErrorString(err) << std::endl;
return -1;
}
// {
// std::string filename = debugFolder+appendix+"A.cplx";
//write_cuNDArray_to_disk(&system_matrix, filename.c_str());
// }
// {
// std::string filename = debugFolder+appendix+"b.cplx";
//write_cuNDArray_to_disk(&b, filename.c_str());
// }
complext<float> alpha = complext<float>(1);
complext<float> beta = complext<float>(0);
cublasStatus_t stat;
if ( set == 0 )
{
{
//GPUTimer t2("Cgemm call");
stat = cublasCgemm(handle, CUBLAS_OP_C, CUBLAS_OP_N,
n,n,m,(float2*) &alpha,
(float2*) system_matrix.get_data_ptr(), m,
(float2*) system_matrix.get_data_ptr(), m,
(float2*) &beta, (float2*) AHA.get_data_ptr(), n);
if (stat != CUBLAS_STATUS_SUCCESS) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to form AHA product using cublas gemm" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
}
{
//timer.start("copy AHA to host");
if (cudaMemcpy(pAHA, AHA.get_data_ptr(), AHA_host.get_number_of_bytes(), cudaMemcpyDeviceToHost) != cudaSuccess)
{
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to copy AHA to host" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
//timer.stop();
//timer.start("apply the regularization");
// apply the regularization
double lamda = 0.0005;
double trA = std::sqrt(pAHA[0].x*pAHA[0].x + pAHA[0].y*pAHA[0].y);
size_t c;
for ( c=1; c<n; c++ )
{
float x = pAHA[c+c*n].x;
float y = pAHA[c+c*n].y;
trA += std::sqrt(x*x+y*y);
}
double value = trA*lamda/n;
for ( c=0; c<n; c++ )
{
float x = pAHA[c+c*n].x;
float y = pAHA[c+c*n].y;
pAHA[c+c*n].x = std::sqrt(x*x+y*y) + value;
pAHA[c+c*n].y = 0;
}
//timer.stop();
//timer.start("copy the AHA to device");
if (cudaMemcpy(AHA.get_data_ptr(), pAHA, AHA_host.get_number_of_bytes(), cudaMemcpyHostToDevice) != cudaSuccess)
{
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to copy regularized AHA to device" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
//timer.stop();
}
AHA_set0 = AHA;
}
else
{
AHA = AHA_set0;
}
// {
// std::string filename = debugFolder+appendix+"AHA.cplx";
//write_cuNDArray_to_disk(&AHA, filename.c_str());
// }
{
//GPUTimer timer("GRAPPA cublas gemm");
//TODO: Sort out arguments for source and target coils here.
stat = cublasCgemm(handle, CUBLAS_OP_C, CUBLAS_OP_N,
n,target_coils,m,(float2*) &alpha,
(float2*) system_matrix.get_data_ptr(), m,
(float2*) b.get_data_ptr(), m,
(float2*) &beta, (float2*)AHrhs.get_data_ptr(), n);
}
// {
// std::string filename = debugFolder+appendix+"AHrhs.cplx";
//write_cuNDArray_to_disk(&AHrhs, filename.c_str());
// }
if (stat != CUBLAS_STATUS_SUCCESS) {
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to form AHrhs product using cublas gemm" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
{
//It actually turns out to be faster to do this inversion on the CPU. Problem is probably too small for GPU to make sense
//GPUTimer cpu_invert_time("CPU Inversion time");
ht_grappa_solve_spd_system(AHA, AHrhs);
}
gridDim = dim3((unsigned int) std::ceil((1.0f*n*source_coils)/blockDim.x), 1, 1 );
//TODO: This should be target coils used as argument here.
copy_grappa_coefficients_to_kernel_2d<<< gridDim, blockDim >>>( AHrhs.get_data_ptr(),
gkernel.get_data_ptr(),
source_coils,
target_coils,
dkernel_size,
acceleration_factor,
set);
// {
// std::string filename = debugFolder+appendix+"kernel.cplx";
//write_cuNDArray_to_disk(&gkernel, filename.c_str());
// }
err = cudaGetLastError();
if( err != cudaSuccess ){
std::cerr << "htgrappa_calculate_grappa_unmixing: Failed to copy calculated coefficients to kernel: " <<
cudaGetErrorString(err) << std::endl;
return -1;
}
}
//{
// std::string filename = debugFolder+"kernel_all.cplx";
// write_cuNDArray_to_disk(&gkernel, filename.c_str());
//}
//TODO: This should be source coils
cuNDArray<T> tmp_mixing = cuNDArray<T>(ref_data->get_dimensions());
int kernel_elements = gkernel.get_number_of_elements()/target_coils;
int total_elements = tmp_mixing.get_number_of_elements()/source_coils;
dkernel_size.y *= acceleration_factor;
std::vector<size_t> ft_dims(2,0);ft_dims[1] = 1;
clear(out_mixing_coeff);
unsigned int current_uncombined_index = 0;
//TODO: Loop over target coils.
for (unsigned int c = 0; c < target_coils; c++)
{
clear(&tmp_mixing);
dim3 blockDim(512,1,1);
dim3 gridDim((unsigned int) std::ceil((1.0f*kernel_elements)/blockDim.x), 1, 1 );
//TODO: Take source and target into consideration
copy_grappa_kernel_to_kspace_2d<<< gridDim, blockDim >>>((gkernel.get_data_ptr()+(c*kernel_elements)),
tmp_mixing.get_data_ptr(),
dims,
dkernel_size,
source_coils);
cudaError_t err = cudaGetLastError();
if( err != cudaSuccess ){
std::cerr << "htgrappa_calculate_grappa_unmixing: Unable to pad GRAPPA kernel: " <<
cudaGetErrorString(err) << std::endl;
return -1;
}
cuNDFFT<typename realType<T>::Type>::instance()->ifft(&tmp_mixing, &ft_dims);
float scale_factor = (float)std::sqrt((double)(RO*E1));
gridDim = dim3((unsigned int) std::ceil(1.0f*total_elements/blockDim.x), 1, 1 );
scale_and_add_unmixing_coeffs<<< gridDim, blockDim >>>(tmp_mixing.get_data_ptr(),
(b1->get_data_ptr()+ c*total_elements),
out_mixing_coeff->get_data_ptr(),
total_elements,
source_coils,
scale_factor);
err = cudaGetLastError();
if( err != cudaSuccess ){
std::cerr << "htgrappa_calculate_grappa_unmixing: scale and add mixing coeffs: " <<
cudaGetErrorString(err) << std::endl;
return -1;
}
if (uncombined_channels) {
std::list<unsigned int>::iterator it = std::find((*uncombined_channels).begin(),(*uncombined_channels).end(),c);
if (it != (*uncombined_channels).end()) {
current_uncombined_index++;
scale_and_copy_unmixing_coeffs<<< gridDim, blockDim >>>(tmp_mixing.get_data_ptr(),
(out_mixing_coeff->get_data_ptr()+current_uncombined_index*total_elements*source_coils),
total_elements,
source_coils,
scale_factor);
}
}
}
//GDEBUG_STREAM("**********cublasDestroy()**************" << std::endl);
//cublasDestroy_v2(handle);
return 0;
}
template <class T> int inverse_clib_matrix(cuNDArray<T>* A,
cuNDArray<T>* b,
cuNDArray<T>* coeff,
double lamda)
{
// A: M*N
// b: M*K
size_t M = A->get_size(0);
size_t N = A->get_size(1);
size_t K = b->get_size(1);
std::vector<size_t> AHA_dims(2,N);
cuNDArray<T> AHA = cuNDArray<T>(&AHA_dims);
std::vector<size_t> AHrhs_dims;
AHrhs_dims.push_back(N);
AHrhs_dims.push_back(K);
coeff->create(&AHrhs_dims);
cublasHandle_t handle = *CUBLASContextProvider::instance()->getCublasHandle();
complext<float> alpha = complext<float>(1);
complext<float> beta = complext<float>(0);
//{
// std::string filename = debugFolder+"A.cplx";
// write_cuNDArray_to_disk(A, filename.c_str());
//}
//{
// std::string filename = debugFolder+"b.cplx";
// write_cuNDArray_to_disk(b, filename.c_str());
//}
{
//GPUTimer t2("compute AHA ...");
cublasStatus_t stat = cublasCgemm(handle, CUBLAS_OP_C, CUBLAS_OP_N,
N,N,M,(float2*) &alpha,
(float2*) A->get_data_ptr(), M,
(float2*) A->get_data_ptr(), M,
(float2*) &beta, (float2*) AHA.get_data_ptr(), N);
if (stat != CUBLAS_STATUS_SUCCESS)
{
std::cerr << "inverse_clib_matrix: Failed to form AHA product using cublas gemm" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
}
//{
// std::string filename = debugFolder+"AHA.cplx";
// write_cuNDArray_to_disk(&AHA, filename.c_str());
//}
{
//GPUTimer t2("compute AHrhs ...");
cublasStatus_t stat = cublasCgemm(handle, CUBLAS_OP_C, CUBLAS_OP_N,
N,K,M,(float2*) &alpha,
(float2*) A->get_data_ptr(), M,
(float2*) b->get_data_ptr(), M,
(float2*) &beta, (float2*)coeff->get_data_ptr(), N);
if (stat != CUBLAS_STATUS_SUCCESS)
{
std::cerr << "inverse_clib_matrix: Failed to form AHrhs product using cublas gemm" << std::endl;
std::cerr << "---- cublas error code " << stat << std::endl;
return -1;
}
}
//{
// std::string filename = debugFolder+"AHrhs.cplx";
// write_cuNDArray_to_disk(coeff, filename.c_str());
//}
// apply the regularization
if ( lamda > 0 )
{
hoNDArray<T> AHA_host(N, N);
float2* pAHA = (float2*) AHA_host.get_data_ptr();
//GadgetronTimer timer;
//timer.start("copy AHA to host");
if (cudaMemcpy(pAHA, AHA.get_data_ptr(), AHA_host.get_number_of_bytes(), cudaMemcpyDeviceToHost) != cudaSuccess)
{
std::cerr << "inverse_clib_matrix: Failed to copy AHA to host" << std::endl;
return -1;
}
//timer.stop();
//timer.start("apply the regularization");
// apply the regularization
double trA = std::sqrt(pAHA[0].x*pAHA[0].x + pAHA[0].y*pAHA[0].y);
size_t c;
for ( c=1; c<N; c++ )
{
float x = pAHA[c+c*N].x;
float y = pAHA[c+c*N].y;
trA += std::sqrt(x*x+y*y);
}
double value = trA*lamda/N;
for ( c=0; c<N; c++ )
{
float x = pAHA[c+c*N].x;
float y = pAHA[c+c*N].y;
pAHA[c+c*N].x = std::sqrt(x*x+y*y) + value;
pAHA[c+c*N].y = 0;
}
//timer.stop();
//timer.start("copy the AHA to device");
if (cudaMemcpy(AHA.get_data_ptr(), pAHA, AHA_host.get_number_of_bytes(), cudaMemcpyHostToDevice) != cudaSuccess)
{
std::cerr << "inverse_clib_matrix: Failed to copy regularized AHA to device" << std::endl;
return -1;
}
//timer.stop();
}
/*
culaStatus s;
s = culaDeviceCgels( 'N', N, N, K,
(culaDeviceFloatComplex*)AHA.get_data_ptr(), N,
(culaDeviceFloatComplex*)coeff->get_data_ptr(), N);
*/
{
//It actually turns out to be faster to do this inversion on the CPU. Problem is probably too small for GPU to make sense
//GPUTimer cpu_invert_time("CPU Inversion time");
ht_grappa_solve_spd_system(AHA, *coeff);
}
//{
// std::string filename = debugFolder+"coeff.cplx";
// write_cuNDArray_to_disk(coeff, filename.c_str());
//}
/*
if (s != culaNoError)
{
GDEBUG_STREAM("inverse_clib_matrix: linear solve failed" << std::endl);
return -1;
}
*/
return 0;
}
//Template instanciation
template EXPORTGPUPMRI int htgrappa_calculate_grappa_unmixing(cuNDArray<complext<float> >* ref_data,
cuNDArray<complext<float> >* b1,
unsigned int acceleration_factor,
std::vector<unsigned int> *kernel_size,
cuNDArray<complext<float> >* out_mixing_coeff,
std::vector< std::pair<unsigned int, unsigned int> >* sampled_region,
std::list< unsigned int >* uncombined_channels);
template EXPORTGPUPMRI int inverse_clib_matrix(cuNDArray<complext<float> >* A,
cuNDArray<complext<float> >* b,
cuNDArray<complext<float> >* coeff,
double lamda);
}
|
1b477a7f4f7b1897207803b8afc5756883810715.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "degree.cuh"
namespace cugraph {
/**
* Single node multi-GPU method for degree calculation on a partitioned graph.
* @param x Indicates whether to compute in degree, out degree, or the sum of both.
* 0 = in + out degree
* 1 = in-degree
* 2 = out-degree
* @param part_off The vertex partitioning of the global graph
* @param off The offsets array of the local partition
* @param ind The indices array of the local partition
* @param degree Pointer to pointers to memory on each GPU for the result
* @return Error code
*/
template<typename idx_t>
gdf_error snmg_degree(int x, size_t* part_off, idx_t* off, idx_t* ind, idx_t** degree) {
sync_all();
SNMGinfo env;
auto i = env.get_thread_num();
auto p = env.get_num_threads();
// Getting the global and local vertices and edges
size_t glob_v = part_off[p];
size_t loc_v = part_off[i + 1] - part_off[i];
idx_t tmp;
CUDA_TRY(hipMemcpy(&tmp, &off[loc_v], sizeof(idx_t), hipMemcpyDeviceToHost));
size_t loc_e = tmp;
// Allocating the local result array, and setting all entries to zero.
idx_t* local_result;
ALLOC_TRY((void** )&local_result, glob_v * sizeof(idx_t), nullptr);
thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), local_result, local_result + glob_v, 0);
// In-degree
if (x == 1 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<idx_t>(loc_e), static_cast<idx_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<idx_t>((loc_e + nthreads.x - 1) / nthreads.x),
static_cast<idx_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( degree_coo<idx_t, idx_t>) , dim3(nblocks), dim3(nthreads), 0, 0, static_cast<idx_t>(loc_e),
static_cast<idx_t>(loc_e),
ind,
local_result);
cudaCheckError();
}
// Out-degree
if (x == 2 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<idx_t>(loc_v), static_cast<idx_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<idx_t>((loc_v + nthreads.x - 1) / nthreads.x),
static_cast<idx_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( degree_offsets<idx_t, idx_t>) , dim3(nblocks), dim3(nthreads), 0, 0, static_cast<idx_t>(loc_v),
static_cast<idx_t>(loc_e),
off,
local_result + part_off[i]);
cudaCheckError();
}
// Combining the local results into global results
sync_all();
treeReduce<idx_t, thrust::plus<idx_t> >(env, glob_v, local_result, degree);
// Broadcasting the global result to all GPUs
treeBroadcast(env, glob_v, local_result, degree);
return GDF_SUCCESS;
}
template gdf_error snmg_degree<int>(int x, size_t* part_off, int* off, int* ind, int** degree);
template<>
gdf_error snmg_degree<int64_t>(int x,
size_t* part_off,
int64_t* off,
int64_t* ind,
int64_t** degree) {
sync_all();
SNMGinfo env;
auto i = env.get_thread_num();
auto p = env.get_num_threads();
// Getting the global and local vertices and edges
size_t glob_v = part_off[p];
size_t loc_v = part_off[i + 1] - part_off[i];
int64_t tmp;
CUDA_TRY(hipMemcpy(&tmp, &off[loc_v], sizeof(int64_t), hipMemcpyDeviceToHost));
size_t loc_e = tmp;
// Allocating the local result array, and setting all entries to zero.
int64_t* local_result;
ALLOC_TRY((void** )&local_result, glob_v * sizeof(int64_t), nullptr);
thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), local_result, local_result + glob_v, 0);
// In-degree
if (x == 1 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(loc_e), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((loc_e + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( degree_coo<int64_t, double>) , dim3(nblocks), dim3(nthreads), 0, 0, static_cast<int64_t>(loc_e),
static_cast<int64_t>(loc_e),
ind,
reinterpret_cast<double*>(local_result));
cudaCheckError();
}
// Out-degree
if (x == 2 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(loc_v), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((loc_v + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( degree_offsets<int64_t, double>) , dim3(nblocks), dim3(nthreads), 0, 0, static_cast<int64_t>(loc_v),
static_cast<int64_t>(loc_e),
off,
reinterpret_cast<double*>(local_result
+ part_off[i]));
cudaCheckError();
}
// Convert the values written as doubles back to int64:
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(glob_v), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((glob_v + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( type_convert<double, int64_t>) , dim3(nblocks), dim3(nthreads), 0, 0, reinterpret_cast<double*>(local_result), glob_v);
cudaCheckError();
// Combining the local results into global results
treeReduce<int64_t, thrust::plus<int64_t> >(env, glob_v, local_result, degree);
// Broadcasting the global result to all GPUs
treeBroadcast(env, glob_v, local_result, degree);
return GDF_SUCCESS;
}
}//namespace
template<typename idx_t>
gdf_error gdf_snmg_degree_impl(int x,
size_t* part_offsets,
gdf_column* off,
gdf_column* ind,
gdf_column** x_cols) {
GDF_REQUIRE(off->size > 0, GDF_INVALID_API_CALL);
GDF_REQUIRE(ind->size > 0, GDF_INVALID_API_CALL);
GDF_REQUIRE(off->dtype == ind->dtype, GDF_UNSUPPORTED_DTYPE);
GDF_REQUIRE(off->null_count + ind->null_count == 0, GDF_VALIDITY_UNSUPPORTED);
gdf_error status;
auto p = omp_get_num_threads();
idx_t* degree[p];
for (auto i = 0; i < p; ++i) {
GDF_REQUIRE(x_cols[i] != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(x_cols[i]->size > 0, GDF_INVALID_API_CALL);
degree[i] = static_cast<idx_t*>(x_cols[i]->data);
}
status = cugraph::snmg_degree(x,
part_offsets,
static_cast<idx_t*>(off->data),
static_cast<idx_t*>(ind->data),
degree);
return status;
}
gdf_error gdf_snmg_degree(int x,
size_t* part_offsets,
gdf_column* off,
gdf_column* ind,
gdf_column** x_cols) {
GDF_REQUIRE(part_offsets != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(off != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(ind != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(x_cols != nullptr, GDF_INVALID_API_CALL);
switch (off->dtype) {
case GDF_INT32:
return gdf_snmg_degree_impl<int32_t>(x, part_offsets, off, ind, x_cols);
case GDF_INT64:
return gdf_snmg_degree_impl<int64_t>(x, part_offsets, off, ind, x_cols);
default:
return GDF_INVALID_API_CALL;
}
}
|
1b477a7f4f7b1897207803b8afc5756883810715.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "degree.cuh"
namespace cugraph {
/**
* Single node multi-GPU method for degree calculation on a partitioned graph.
* @param x Indicates whether to compute in degree, out degree, or the sum of both.
* 0 = in + out degree
* 1 = in-degree
* 2 = out-degree
* @param part_off The vertex partitioning of the global graph
* @param off The offsets array of the local partition
* @param ind The indices array of the local partition
* @param degree Pointer to pointers to memory on each GPU for the result
* @return Error code
*/
template<typename idx_t>
gdf_error snmg_degree(int x, size_t* part_off, idx_t* off, idx_t* ind, idx_t** degree) {
sync_all();
SNMGinfo env;
auto i = env.get_thread_num();
auto p = env.get_num_threads();
// Getting the global and local vertices and edges
size_t glob_v = part_off[p];
size_t loc_v = part_off[i + 1] - part_off[i];
idx_t tmp;
CUDA_TRY(cudaMemcpy(&tmp, &off[loc_v], sizeof(idx_t), cudaMemcpyDeviceToHost));
size_t loc_e = tmp;
// Allocating the local result array, and setting all entries to zero.
idx_t* local_result;
ALLOC_TRY((void** )&local_result, glob_v * sizeof(idx_t), nullptr);
thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), local_result, local_result + glob_v, 0);
// In-degree
if (x == 1 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<idx_t>(loc_e), static_cast<idx_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<idx_t>((loc_e + nthreads.x - 1) / nthreads.x),
static_cast<idx_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
degree_coo<idx_t, idx_t> <<<nblocks, nthreads>>>(static_cast<idx_t>(loc_e),
static_cast<idx_t>(loc_e),
ind,
local_result);
cudaCheckError();
}
// Out-degree
if (x == 2 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<idx_t>(loc_v), static_cast<idx_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<idx_t>((loc_v + nthreads.x - 1) / nthreads.x),
static_cast<idx_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
degree_offsets<idx_t, idx_t> <<<nblocks, nthreads>>>(static_cast<idx_t>(loc_v),
static_cast<idx_t>(loc_e),
off,
local_result + part_off[i]);
cudaCheckError();
}
// Combining the local results into global results
sync_all();
treeReduce<idx_t, thrust::plus<idx_t> >(env, glob_v, local_result, degree);
// Broadcasting the global result to all GPUs
treeBroadcast(env, glob_v, local_result, degree);
return GDF_SUCCESS;
}
template gdf_error snmg_degree<int>(int x, size_t* part_off, int* off, int* ind, int** degree);
template<>
gdf_error snmg_degree<int64_t>(int x,
size_t* part_off,
int64_t* off,
int64_t* ind,
int64_t** degree) {
sync_all();
SNMGinfo env;
auto i = env.get_thread_num();
auto p = env.get_num_threads();
// Getting the global and local vertices and edges
size_t glob_v = part_off[p];
size_t loc_v = part_off[i + 1] - part_off[i];
int64_t tmp;
CUDA_TRY(cudaMemcpy(&tmp, &off[loc_v], sizeof(int64_t), cudaMemcpyDeviceToHost));
size_t loc_e = tmp;
// Allocating the local result array, and setting all entries to zero.
int64_t* local_result;
ALLOC_TRY((void** )&local_result, glob_v * sizeof(int64_t), nullptr);
thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), local_result, local_result + glob_v, 0);
// In-degree
if (x == 1 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(loc_e), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((loc_e + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
degree_coo<int64_t, double> <<<nblocks, nthreads>>>(static_cast<int64_t>(loc_e),
static_cast<int64_t>(loc_e),
ind,
reinterpret_cast<double*>(local_result));
cudaCheckError();
}
// Out-degree
if (x == 2 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(loc_v), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((loc_v + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
degree_offsets<int64_t, double> <<<nblocks, nthreads>>>(static_cast<int64_t>(loc_v),
static_cast<int64_t>(loc_e),
off,
reinterpret_cast<double*>(local_result
+ part_off[i]));
cudaCheckError();
}
// Convert the values written as doubles back to int64:
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(glob_v), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((glob_v + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
type_convert<double, int64_t> <<<nblocks, nthreads>>>(reinterpret_cast<double*>(local_result), glob_v);
cudaCheckError();
// Combining the local results into global results
treeReduce<int64_t, thrust::plus<int64_t> >(env, glob_v, local_result, degree);
// Broadcasting the global result to all GPUs
treeBroadcast(env, glob_v, local_result, degree);
return GDF_SUCCESS;
}
}//namespace
template<typename idx_t>
gdf_error gdf_snmg_degree_impl(int x,
size_t* part_offsets,
gdf_column* off,
gdf_column* ind,
gdf_column** x_cols) {
GDF_REQUIRE(off->size > 0, GDF_INVALID_API_CALL);
GDF_REQUIRE(ind->size > 0, GDF_INVALID_API_CALL);
GDF_REQUIRE(off->dtype == ind->dtype, GDF_UNSUPPORTED_DTYPE);
GDF_REQUIRE(off->null_count + ind->null_count == 0, GDF_VALIDITY_UNSUPPORTED);
gdf_error status;
auto p = omp_get_num_threads();
idx_t* degree[p];
for (auto i = 0; i < p; ++i) {
GDF_REQUIRE(x_cols[i] != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(x_cols[i]->size > 0, GDF_INVALID_API_CALL);
degree[i] = static_cast<idx_t*>(x_cols[i]->data);
}
status = cugraph::snmg_degree(x,
part_offsets,
static_cast<idx_t*>(off->data),
static_cast<idx_t*>(ind->data),
degree);
return status;
}
gdf_error gdf_snmg_degree(int x,
size_t* part_offsets,
gdf_column* off,
gdf_column* ind,
gdf_column** x_cols) {
GDF_REQUIRE(part_offsets != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(off != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(ind != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(x_cols != nullptr, GDF_INVALID_API_CALL);
switch (off->dtype) {
case GDF_INT32:
return gdf_snmg_degree_impl<int32_t>(x, part_offsets, off, ind, x_cols);
case GDF_INT64:
return gdf_snmg_degree_impl<int64_t>(x, part_offsets, off, ind, x_cols);
default:
return GDF_INVALID_API_CALL;
}
}
|
c5a6658b8639ebd4a35a8d81b0353e3a79d1c1e8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
//
struct Index{
int block, thread;
};
//:
__global__ void prob_idx(Index id[]){
int b=blockIdx.x; //
int t=threadIdx.x; //
int n=blockDim.x; //
int x=b*n+t; //
//.
id[x].block=b;
id[x].thread=t;
};
//
int main(){
Index* d;
Index h[3000];
//
hipMalloc((void**) &d, 3000*sizeof(Index));
//
int g=1, b=1000, m=g*b;
hipLaunchKernelGGL(( prob_idx), dim3(g),dim3(b), 0, 0, d);
//
hipMemcpy(h, d, 3000*sizeof(Index), hipMemcpyDeviceToHost);
//
for(int i=0; i<m; i++){
printf("h[%d]={block:%d, thread:%d}\n", i,h[i].block,h[i].thread);
}
//
hipFree(d);
return 0;
}
|
c5a6658b8639ebd4a35a8d81b0353e3a79d1c1e8.cu
|
#include<stdio.h>
#include<cuda.h>
//索引用到的緒構體
struct Index{
int block, thread;
};
//核心:把索引寫入裝置記憶體
__global__ void prob_idx(Index id[]){
int b=blockIdx.x; //區塊索引
int t=threadIdx.x; //執行緒索引
int n=blockDim.x; //區塊中包含的執行緒數目
int x=b*n+t; //執行緒在陣列中對應的位置
//每個執行緒寫入自己的區塊和執行緒索引.
id[x].block=b;
id[x].thread=t;
};
//主函式
int main(){
Index* d;
Index h[3000];
//配置裝置記憶體
cudaMalloc((void**) &d, 3000*sizeof(Index));
//呼叫裝置核心
int g=1, b=1000, m=g*b;
prob_idx<<<g,b>>>(d);
//下載裝置記憶體內容到主機上
cudaMemcpy(h, d, 3000*sizeof(Index), cudaMemcpyDeviceToHost);
//顯示內容
for(int i=0; i<m; i++){
printf("h[%d]={block:%d, thread:%d}\n", i,h[i].block,h[i].thread);
}
//釋放裝置記憶體
cudaFree(d);
return 0;
}
|
9dbd5b484e8c5bb1bff58cc22c3739a03c3d5693.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************
*
* CUDA Kernel: column gradient computing
*
*/
/* ==================================================
*
* sub2ind - Column-major indexing of 2D arrays
*
*/
template <typename T>
__device__ __forceinline__ T sub2ind( T i, T j, T height) {
return (i + height*j);
} // end function 'sub2ind'
/* ==================================================
*
* core kernel
*
*/
__global__
void column_filtering(double * R,
const double * M,
const int m,
const int n,
const int p){
/* thread indices */
const int j = blockIdx.y*blockDim.y+threadIdx.y;
const int i = blockIdx.x*blockDim.x+threadIdx.x;
/* matrix calculation */
if ((i >= m) || (j >= n*p) ){
return;
}
int page=j/n;
int col=j-page*n;
R[sub2ind(i,j,m)] = M[sub2ind(i,min(col+1,n-1)+page*n,m)]-M[sub2ind(i,j,m)];
return ;
}
|
9dbd5b484e8c5bb1bff58cc22c3739a03c3d5693.cu
|
/********************
*
* CUDA Kernel: column gradient computing
*
*/
/* ==================================================
*
* sub2ind - Column-major indexing of 2D arrays
*
*/
template <typename T>
__device__ __forceinline__ T sub2ind( T i, T j, T height) {
return (i + height*j);
} // end function 'sub2ind'
/* ==================================================
*
* core kernel
*
*/
__global__
void column_filtering(double * R,
const double * M,
const int m,
const int n,
const int p){
/* thread indices */
const int j = blockIdx.y*blockDim.y+threadIdx.y;
const int i = blockIdx.x*blockDim.x+threadIdx.x;
/* matrix calculation */
if ((i >= m) || (j >= n*p) ){
return;
}
int page=j/n;
int col=j-page*n;
R[sub2ind(i,j,m)] = M[sub2ind(i,min(col+1,n-1)+page*n,m)]-M[sub2ind(i,j,m)];
return ;
}
|
d1ab2f06341558efafa853605f472a3c16aaead9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Filename : assignment4.c
Author : Arash Pourhabibi, Hussein Kassir
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sys/time.h>
#include <hip/hip_runtime.h>
using namespace std;
#include "utility.h"
void array_process(double *input, double *output, int length, int iterations);
void GPU_array_process(double *input, double *output, int length, int iterations);
int main (int argc, const char *argv[]) {
int length, iterations;
double time;
if (argc != 3) {
cout<<"Invalid input!"<<endl<<"Usage: ./assignment4 <length> <iterations>"<<endl;
return 1;
} else {
length = atoi(argv[1]);
iterations = atoi(argv[2]);
if(length%2!=0)
{
cout<<"Invalid input!"<<endl<<"Array length must be even"<<endl;
return 1;
}
}
//Allocate arrays
double *input = new double[length*length];
double *output = new double[length*length];
//Reset Device
hipDeviceReset();
//Initialize the arrays
init(input, length);
init(output, length);
//Start timer
set_clock();
/*Use either the CPU or the GPU functions*/
//CPU Baseline
//Uncomment the block to use the baseline
array_process(input, output, length, iterations);
if(iterations%2==0)
{
double* temp = input;
input = output;
output = temp;
}
//GPU function
//GPU_array_process(input, output, length, iterations);
//Stop timer
time = elapsed_time();
//Report time required for n iterations
cout<<"Running the algorithm on "<<length<<" by "<<length<<" array for "<<iterations<<" iteration takes "<<setprecision(4)<<time<<"s"<<endl;
//Save array in filelength
save(output, length);
//Free allocated memory
delete[] input;
delete[] output;
return 0;
}
|
d1ab2f06341558efafa853605f472a3c16aaead9.cu
|
/*
============================================================================
Filename : assignment4.c
Author : Arash Pourhabibi, Hussein Kassir
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
#include "utility.h"
void array_process(double *input, double *output, int length, int iterations);
void GPU_array_process(double *input, double *output, int length, int iterations);
int main (int argc, const char *argv[]) {
int length, iterations;
double time;
if (argc != 3) {
cout<<"Invalid input!"<<endl<<"Usage: ./assignment4 <length> <iterations>"<<endl;
return 1;
} else {
length = atoi(argv[1]);
iterations = atoi(argv[2]);
if(length%2!=0)
{
cout<<"Invalid input!"<<endl<<"Array length must be even"<<endl;
return 1;
}
}
//Allocate arrays
double *input = new double[length*length];
double *output = new double[length*length];
//Reset Device
cudaDeviceReset();
//Initialize the arrays
init(input, length);
init(output, length);
//Start timer
set_clock();
/*Use either the CPU or the GPU functions*/
//CPU Baseline
//Uncomment the block to use the baseline
array_process(input, output, length, iterations);
if(iterations%2==0)
{
double* temp = input;
input = output;
output = temp;
}
//GPU function
//GPU_array_process(input, output, length, iterations);
//Stop timer
time = elapsed_time();
//Report time required for n iterations
cout<<"Running the algorithm on "<<length<<" by "<<length<<" array for "<<iterations<<" iteration takes "<<setprecision(4)<<time<<"s"<<endl;
//Save array in filelength
save(output, length);
//Free allocated memory
delete[] input;
delete[] output;
return 0;
}
|
9771e0eea15430eaad230132b9f3a48399b1b302.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "decoder_masked_multihead_attention_template.hpp"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/utils/cuda_bf16_wrapper.h"
#include <assert.h>
#include <float.h>
#include <type_traits>
////////////////////////////////////////////////////////////////////////////////////////////////////
#define MMHA_LAUNCH_KERNEL( \
T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, HAS_BEAMS, stream) \
size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
dim3 grid(params.num_heads, params.batch_size); \
hipLaunchKernelGGL(( mmha::masked_multihead_attention_kernel<T, \
Dh, \
Dh_MAX, \
THDS_PER_KEY, \
THDS_PER_VALUE, \
THDS_PER_BLOCK, \
DO_CROSS_ATTENTION, \
HAS_BEAMS>), dim3(grid), dim3(THDS_PER_BLOCK), smem_sz, stream, params)
////////////////////////////////////////////////////////////////////////////////////////////////////
// !!! Specialize the launcher for Cross attention
template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE>
void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const hipStream_t& stream)
{
constexpr int THREADS_PER_VALUE = threads_per_value_t<T, Dh_MAX>::value;
constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value;
int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep;
if (params.cache_indir == nullptr) {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, false, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, false, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, false, stream);
}
}
else {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, true, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, true, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, true, stream);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template void mmha_launch_kernel<float, 32, 32, Masked_multihead_attention_params<float>>(
const Masked_multihead_attention_params<float>& params, const hipStream_t& stream);
template void mmha_launch_kernel<uint16_t, 32, 32, Masked_multihead_attention_params<uint16_t>>(
const Masked_multihead_attention_params<uint16_t>& params, const hipStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 32, 32, Masked_multihead_attention_params<__nv_bfloat16>>(
const Masked_multihead_attention_params<__nv_bfloat16>& params, const hipStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 32, 32, Masked_multihead_attention_params<__nv_fp8_e4m3>>(
const Masked_multihead_attention_params<__nv_fp8_e4m3>& params, const hipStream_t& stream);
#endif
template void mmha_launch_kernel<float, 32, 32, Cross_multihead_attention_params<float>>(
const Cross_multihead_attention_params<float>& params, const hipStream_t& stream);
template void mmha_launch_kernel<uint16_t, 32, 32, Cross_multihead_attention_params<uint16_t>>(
const Cross_multihead_attention_params<uint16_t>& params, const hipStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 32, 32, Cross_multihead_attention_params<__nv_bfloat16>>(
const Cross_multihead_attention_params<__nv_bfloat16>& params, const hipStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 32, 32, Cross_multihead_attention_params<__nv_fp8_e4m3>>(
const Cross_multihead_attention_params<__nv_fp8_e4m3>& params, const hipStream_t& stream);
#endif
#undef MMHA_LAUNCH_KERNEL
|
9771e0eea15430eaad230132b9f3a48399b1b302.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "decoder_masked_multihead_attention_template.hpp"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/utils/cuda_bf16_wrapper.h"
#include <assert.h>
#include <float.h>
#include <type_traits>
////////////////////////////////////////////////////////////////////////////////////////////////////
#define MMHA_LAUNCH_KERNEL( \
T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, HAS_BEAMS, stream) \
size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
dim3 grid(params.num_heads, params.batch_size); \
mmha::masked_multihead_attention_kernel<T, \
Dh, \
Dh_MAX, \
THDS_PER_KEY, \
THDS_PER_VALUE, \
THDS_PER_BLOCK, \
DO_CROSS_ATTENTION, \
HAS_BEAMS><<<grid, THDS_PER_BLOCK, smem_sz, stream>>>(params)
////////////////////////////////////////////////////////////////////////////////////////////////////
// !!! Specialize the launcher for Cross attention
template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE>
void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const cudaStream_t& stream)
{
constexpr int THREADS_PER_VALUE = threads_per_value_t<T, Dh_MAX>::value;
constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value;
int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep;
if (params.cache_indir == nullptr) {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, false, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, false, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, false, stream);
}
}
else {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, true, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, true, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, true, stream);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template void mmha_launch_kernel<float, 32, 32, Masked_multihead_attention_params<float>>(
const Masked_multihead_attention_params<float>& params, const cudaStream_t& stream);
template void mmha_launch_kernel<uint16_t, 32, 32, Masked_multihead_attention_params<uint16_t>>(
const Masked_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 32, 32, Masked_multihead_attention_params<__nv_bfloat16>>(
const Masked_multihead_attention_params<__nv_bfloat16>& params, const cudaStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 32, 32, Masked_multihead_attention_params<__nv_fp8_e4m3>>(
const Masked_multihead_attention_params<__nv_fp8_e4m3>& params, const cudaStream_t& stream);
#endif
template void mmha_launch_kernel<float, 32, 32, Cross_multihead_attention_params<float>>(
const Cross_multihead_attention_params<float>& params, const cudaStream_t& stream);
template void mmha_launch_kernel<uint16_t, 32, 32, Cross_multihead_attention_params<uint16_t>>(
const Cross_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 32, 32, Cross_multihead_attention_params<__nv_bfloat16>>(
const Cross_multihead_attention_params<__nv_bfloat16>& params, const cudaStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 32, 32, Cross_multihead_attention_params<__nv_fp8_e4m3>>(
const Cross_multihead_attention_params<__nv_fp8_e4m3>& params, const cudaStream_t& stream);
#endif
#undef MMHA_LAUNCH_KERNEL
|
ebe704cf90e4779bcccd7217b3f5aefa7921b0a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <gauge_field.h>
#include <face_quda.h>
#include <typeinfo>
#include <misc_helpers.h>
#include <blas_quda.h>
namespace quda {
cudaGaugeField::cudaGaugeField(const GaugeFieldParam ¶m) :
GaugeField(param), gauge(0), even(0), odd(0), backed_up(false)
{
if ((order == QUDA_QDP_GAUGE_ORDER || order == QUDA_QDPJIT_GAUGE_ORDER) &&
create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("QDP ordering only supported for reference fields");
}
if (order == QUDA_QDP_GAUGE_ORDER || order == QUDA_MILC_GAUGE_ORDER ||
order == QUDA_TIFR_GAUGE_ORDER || order == QUDA_BQCD_GAUGE_ORDER ||
order == QUDA_CPS_WILSON_GAUGE_ORDER)
errorQuda("Field ordering %d presently disabled for this type", order);
#ifdef MULTI_GPU
if (link_type != QUDA_ASQTAD_MOM_LINKS &&
ghostExchange == QUDA_GHOST_EXCHANGE_PAD &&
isNative()) {
bool pad_check = true;
for (int i=0; i<nDim; i++)
if (pad < nFace*surfaceCB[i]) pad_check = false;
if (!pad_check)
errorQuda("cudaGaugeField being constructed with insufficient padding\n");
}
#endif
if(create != QUDA_NULL_FIELD_CREATE &&
create != QUDA_ZERO_FIELD_CREATE &&
create != QUDA_REFERENCE_FIELD_CREATE){
errorQuda("ERROR: create type(%d) not supported yet\n", create);
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
gauge = pool_device_malloc(bytes);
if (create == QUDA_ZERO_FIELD_CREATE) hipMemset(gauge, 0, bytes);
} else {
gauge = param.gauge;
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
size_t nbytes = nFace * surface[i] * nInternal * precision;
ghost[i] = nbytes ? pool_device_malloc(nbytes) : NULL;
}
}
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD) {
if (create == QUDA_REFERENCE_FIELD_CREATE) exchangeGhost();
}
even = gauge;
odd = (char*)gauge + bytes/2;
#ifdef USE_TEXTURE_OBJECTS
createTexObject(evenTex, even);
createTexObject(oddTex, odd);
if(reconstruct == QUDA_RECONSTRUCT_13 || reconstruct == QUDA_RECONSTRUCT_9)
{ // Create texture objects for the phases
const int isPhase = 1;
createTexObject(evenPhaseTex, (char*)even + phase_offset, isPhase);
createTexObject(oddPhaseTex, (char*)odd + phase_offset, isPhase);
}
#endif
}
#ifdef USE_TEXTURE_OBJECTS
void cudaGaugeField::createTexObject(hipTextureObject_t &tex, void *field, int isPhase) {
if( isNative() ){
// create the texture for the field components
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = hipChannelFormatKindFloat;
else desc.f = hipChannelFormatKindSigned; // half is short, double is int2
if(isPhase){
if(precision == QUDA_DOUBLE_PRECISION){
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 0;
desc.w = 0;
}else{
desc.x = 8*precision;
desc.y = desc.z = desc.w = 0;
}
}else{
// always four components regardless of precision
if (precision == QUDA_DOUBLE_PRECISION) {
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 8*sizeof(int);
desc.w = 8*sizeof(int);
} else {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = (reconstruct == 18) ? 0 : 8*precision; // float2 or short2 for 18 reconstruct
desc.w = (reconstruct == 18) ? 0 : 8*precision;
}
}
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = field;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = isPhase ? phase_bytes/2 : (bytes-phase_bytes)/2;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = hipReadModeNormalizedFloat;
else texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
}
}
void cudaGaugeField::destroyTexObject() {
if( isNative() ){
hipDestroyTextureObject(evenTex);
hipDestroyTextureObject(oddTex);
if(reconstruct == QUDA_RECONSTRUCT_9 || reconstruct == QUDA_RECONSTRUCT_13){
hipDestroyTextureObject(evenPhaseTex);
hipDestroyTextureObject(oddPhaseTex);
}
checkCudaError();
}
}
#endif
cudaGaugeField::~cudaGaugeField()
{
#ifdef USE_TEXTURE_OBJECTS
destroyTexObject();
#endif
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (gauge) pool_device_free(gauge);
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
if (ghost[i]) pool_device_free(ghost[i]);
}
}
}
// This does the exchange of the gauge field ghost zone and places it
// into the ghost array.
void cudaGaugeField::exchangeGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY && geometry != QUDA_COARSE_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *send[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? pool_device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
send[d] = pool_device_malloc(nFace*surface[d]*nInternal*precision);
}
// get the links into contiguous buffers
extractGaugeGhost(*this, send, true);
// communicate between nodes
exchange(ghost_, send, QUDA_FORWARDS);
for (int d=0; d<nDim; d++) pool_device_free(send[d]);
if (isNative()) {
// copy from ghost into the padded region in gauge
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, 0, ghost_, 1);
for (int d=0; d<nDim; d++) pool_device_free(ghost_[d]);
}
}
// This does the opposite of exchnageGhost and sends back the ghost
// zone to the node from which it came and injects it back into the
// field
void cudaGaugeField::injectGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY && geometry != QUDA_COARSE_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? pool_device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
recv[d] = pool_device_malloc(nFace*surface[d]*nInternal*precision);
}
if (isNative()) {
// copy from padded region in gauge field into ghost
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, ghost_, 0, 1);
}
// communicate between nodes
exchange(recv, ghost_, QUDA_BACKWARDS);
// get the links into contiguous buffers
extractGaugeGhost(*this, recv, false);
for (int d=0; d<nDim; d++) {
pool_device_free(recv[d]);
if (isNative()) pool_device_free(ghost_[d]);
}
}
void cudaGaugeField::exchangeExtendedGhost(const int *R, bool no_comms_fill) {
void *send[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
void *send_d[QUDA_MAX_DIM];
void *recv_d[QUDA_MAX_DIM];
size_t bytes[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
// store both parities and directions in each
bytes[d] = surface[d] * R[d] * geometry * nInternal * precision;
send_d[d] = pool_device_malloc(2 * bytes[d]);
recv_d[d] = pool_device_malloc(2 * bytes[d]);
}
#ifndef GPU_COMMS
void *send_h[QUDA_MAX_DIM];
void *recv_h[QUDA_MAX_DIM];
size_t total_bytes = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
total_bytes += 4*bytes[d]; // (2 from send/recv) x (2 from fwd/back)
}
void *buffer = total_bytes > 0 ? pool_pinned_malloc(total_bytes) : nullptr;
size_t offset = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
recv_h[d] = static_cast<char*>(buffer) + offset;
send_h[d] = static_cast<char*>(recv_h[d]) + 2*bytes[d];
offset += 4*bytes[d];
}
#endif
// do the exchange
MsgHandle *mh_recv_back[QUDA_MAX_DIM];
MsgHandle *mh_recv_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_back[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
recv[d] = recv_d[d];
send[d] = send_d[d];
#else
recv[d] = recv_h[d];
send[d] = send_h[d];
#endif
// look into storing these for later
mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]);
mh_recv_fwd[d] = comm_declare_receive_relative(static_cast<char*>(recv[d])+bytes[d],
d, +1, bytes[d]);
mh_send_back[d] = comm_declare_send_relative(send[d], d, -1, bytes[d]);
mh_send_fwd[d] = comm_declare_send_relative(static_cast<char*>(send[d])+bytes[d],
d, +1, bytes[d]);
}
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
// FIXME why does this break if the order is switched?
// prepost the receives
if (commDimPartitioned(d)) {
comm_start(mh_recv_fwd[d]);
comm_start(mh_recv_back[d]);
}
//extract into a contiguous buffer
extractExtendedGaugeGhost(*this, d, R, send_d, true);
if (commDimPartitioned(d)) {
// pipeline the forwards and backwards sending
#ifndef GPU_COMMS
hipMemcpyAsync(send_h[d], send_d[d], bytes[d], hipMemcpyDeviceToHost, streams[0]);
hipMemcpyAsync(static_cast<char*>(send_h[d])+bytes[d],
static_cast<char*>(send_d[d])+bytes[d], bytes[d], hipMemcpyDeviceToHost, streams[1]);
#endif
#ifndef GPU_COMMS
hipStreamSynchronize(streams[0]);
#endif
comm_start(mh_send_back[d]);
#ifndef GPU_COMMS
hipStreamSynchronize(streams[1]);
#endif
comm_start(mh_send_fwd[d]);
// forwards recv
comm_wait(mh_send_back[d]);
comm_wait(mh_recv_fwd[d]);
#ifndef GPU_COMMS
hipMemcpyAsync(static_cast<char*>(recv_d[d])+bytes[d],
static_cast<char*>(recv_h[d])+bytes[d], bytes[d], hipMemcpyHostToDevice, streams[0]);
#endif
// backwards recv
comm_wait(mh_send_fwd[d]);
comm_wait(mh_recv_back[d]);
#ifndef GPU_COMMS
hipMemcpyAsync(recv_d[d], recv_h[d], bytes[d], hipMemcpyHostToDevice, streams[1]);
#endif
} else { // if just doing a local exchange to fill halo then need to swap faces
qudaMemcpy(static_cast<char*>(recv_d[d])+bytes[d], send_d[d], bytes[d], hipMemcpyDeviceToDevice);
qudaMemcpy(recv_d[d], static_cast<char*>(send_d[d])+bytes[d], bytes[d], hipMemcpyDeviceToDevice);
}
// inject back into the gauge field
extractExtendedGaugeGhost(*this, d, R, recv_d, false);
}
#ifndef GPU_COMMS
if (total_bytes > 0) pool_pinned_free(buffer);
#endif
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
if (commDimPartitioned(d)) {
comm_free(mh_send_fwd[d]);
comm_free(mh_send_back[d]);
comm_free(mh_recv_back[d]);
comm_free(mh_recv_fwd[d]);
}
pool_device_free(send_d[d]);
pool_device_free(recv_d[d]);
}
}
void cudaGaugeField::setGauge(void *gauge_)
{
if(create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("Setting gauge pointer is only allowed when create="
"QUDA_REFERENCE_FIELD_CREATE type\n");
}
gauge = gauge_;
}
void *create_gauge_buffer(size_t bytes, QudaGaugeFieldOrder order, QudaFieldGeometry geometry) {
if (order == QUDA_QDP_GAUGE_ORDER) {
void **buffer = new void*[geometry];
for (int d=0; d<geometry; d++) buffer[d] = pool_device_malloc(bytes/geometry);
return ((void*)buffer);
} else {
return pool_device_malloc(bytes);
}
}
void **create_ghost_buffer(size_t bytes[], QudaGaugeFieldOrder order) {
if (order > 4) {
void **buffer = new void*[4];
for (int d=0; d<4; d++) buffer[d] = pool_device_malloc(bytes[d]);
return buffer;
} else {
return 0;
}
}
void free_gauge_buffer(void *buffer, QudaGaugeFieldOrder order, QudaFieldGeometry geometry) {
if (order == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) pool_device_free(((void**)buffer)[d]);
delete []((void**)buffer);
} else {
pool_device_free(buffer);
}
}
void free_ghost_buffer(void **buffer, QudaGaugeFieldOrder order) {
if (order > 4) {
for (int d=0; d<4; d++) pool_device_free(buffer[d]);
delete []buffer;
}
}
void cudaGaugeField::copy(const GaugeField &src) {
if (this == &src) return;
checkField(src);
if (link_type == QUDA_ASQTAD_FAT_LINKS) {
fat_link_max = src.LinkMax();
if (precision == QUDA_HALF_PRECISION && fat_link_max == 0.0)
errorQuda("fat_link_max has not been computed");
} else {
fat_link_max = 1.0;
}
if (typeid(src) == typeid(cudaGaugeField)) {
// copy field and ghost zone into this field
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge,
static_cast<const cudaGaugeField&>(src).gauge);
} else if (typeid(src) == typeid(cpuGaugeField)) {
if (reorder_location() == QUDA_CPU_FIELD_LOCATION) { // do reorder on the CPU
void *buffer = pool_pinned_malloc(bytes);
// copy field and ghost zone into buffer
copyGenericGauge(*this, src, QUDA_CPU_FIELD_LOCATION, buffer, static_cast<const cpuGaugeField&>(src).gauge);
// this copies over both even and odd
qudaMemcpy(gauge, buffer, bytes, hipMemcpyHostToDevice);
pool_pinned_free(buffer);
} else { // else on the GPU
void *buffer = create_gauge_buffer(src.Bytes(), src.Order(), src.Geometry());
size_t ghost_bytes[4];
int srcNinternal = src.Reconstruct() != QUDA_RECONSTRUCT_NO ? src.Reconstruct() : 2*nColor*nColor;
for (int d=0; d<4; d++) ghost_bytes[d] = nFace * surface[d] * srcNinternal * src.Precision();
void **ghost_buffer = (nFace > 0) ? create_ghost_buffer(ghost_bytes, src.Order()) : nullptr;
if (src.Order() == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) {
qudaMemcpy(((void**)buffer)[d], ((void**)src.Gauge_p())[d], src.Bytes()/geometry, hipMemcpyHostToDevice);
}
} else {
qudaMemcpy(buffer, src.Gauge_p(), src.Bytes(), hipMemcpyHostToDevice);
}
if (src.Order() > 4 && GhostExchange() == QUDA_GHOST_EXCHANGE_PAD &&
src.GhostExchange() == QUDA_GHOST_EXCHANGE_PAD && nFace)
for (int d=0; d<4; d++)
qudaMemcpy(ghost_buffer[d], src.Ghost()[d], ghost_bytes[d], hipMemcpyHostToDevice);
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge, buffer, 0, ghost_buffer);
free_gauge_buffer(buffer, src.Order(), src.Geometry());
if (nFace > 0) free_ghost_buffer(ghost_buffer, src.Order());
} // reorder_location
} else {
errorQuda("Invalid gauge field type");
}
// if we have copied from a source without a pad then we need to exchange
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD && src.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD) exchangeGhost();
staggeredPhaseApplied = src.StaggeredPhaseApplied();
staggeredPhaseType = src.StaggeredPhase();
checkCudaError();
}
void cudaGaugeField::loadCPUField(const cpuGaugeField &cpu) { copy(cpu); }
void cudaGaugeField::saveCPUField(cpuGaugeField &cpu) const
{
QudaFieldLocation pack_location = reorder_location();
if (pack_location == QUDA_CUDA_FIELD_LOCATION) {
void *buffer = create_gauge_buffer(cpu.Bytes(), cpu.Order(), cpu.Geometry());
// Allocate space for ghost zone if required
size_t ghost_bytes[4];
int cpuNinternal = cpu.Reconstruct() != QUDA_RECONSTRUCT_NO ? cpu.Reconstruct() : 2*nColor*nColor;
for (int d=0; d<4; d++) ghost_bytes[d] = nFace * surface[d] * cpuNinternal * cpu.Precision();
void **ghost_buffer = (nFace > 0) ? create_ghost_buffer(ghost_bytes, cpu.Order()) : nullptr;
copyGenericGauge(cpu, *this, QUDA_CUDA_FIELD_LOCATION, buffer, gauge, ghost_buffer, 0);
if (cpu.Order() == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) qudaMemcpy(((void**)cpu.gauge)[d], ((void**)buffer)[d], cpu.Bytes()/geometry, hipMemcpyDeviceToHost);
} else {
qudaMemcpy(cpu.gauge, buffer, cpu.Bytes(), hipMemcpyDeviceToHost);
}
if (cpu.Order() > 4 && GhostExchange() == QUDA_GHOST_EXCHANGE_PAD &&
cpu.GhostExchange() == QUDA_GHOST_EXCHANGE_PAD && nFace)
for (int d=0; d<4; d++)
qudaMemcpy(cpu.Ghost()[d], ghost_buffer[d], ghost_bytes[d], hipMemcpyDeviceToHost);
free_gauge_buffer(buffer, cpu.Order(), cpu.Geometry());
if (nFace > 0) free_ghost_buffer(ghost_buffer, cpu.Order());
} else if (pack_location == QUDA_CPU_FIELD_LOCATION) { // do copy then host-side reorder
void *buffer = pool_pinned_malloc(bytes);
qudaMemcpy(buffer, gauge, bytes, hipMemcpyDeviceToHost);
copyGenericGauge(cpu, *this, QUDA_CPU_FIELD_LOCATION, cpu.gauge, buffer);
pool_pinned_free(buffer);
} else {
errorQuda("Invalid pack location %d", pack_location);
}
cpu.staggeredPhaseApplied = staggeredPhaseApplied;
cpu.staggeredPhaseType = staggeredPhaseType;
}
void cudaGaugeField::backup() const {
if (backed_up) errorQuda("Gauge field already backed up");
backup_h = new char[bytes];
hipMemcpy(backup_h, gauge, bytes, hipMemcpyDeviceToHost);
checkCudaError();
backed_up = true;
}
void cudaGaugeField::restore() {
if (!backed_up) errorQuda("Cannot restore since not backed up");
hipMemcpy(gauge, backup_h, bytes, hipMemcpyHostToDevice);
delete []backup_h;
checkCudaError();
backed_up = false;
}
void cudaGaugeField::zero() {
hipMemset(gauge, 0, bytes);
}
} // namespace quda
|
ebe704cf90e4779bcccd7217b3f5aefa7921b0a4.cu
|
#include <string.h>
#include <gauge_field.h>
#include <face_quda.h>
#include <typeinfo>
#include <misc_helpers.h>
#include <blas_quda.h>
namespace quda {
cudaGaugeField::cudaGaugeField(const GaugeFieldParam ¶m) :
GaugeField(param), gauge(0), even(0), odd(0), backed_up(false)
{
if ((order == QUDA_QDP_GAUGE_ORDER || order == QUDA_QDPJIT_GAUGE_ORDER) &&
create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("QDP ordering only supported for reference fields");
}
if (order == QUDA_QDP_GAUGE_ORDER || order == QUDA_MILC_GAUGE_ORDER ||
order == QUDA_TIFR_GAUGE_ORDER || order == QUDA_BQCD_GAUGE_ORDER ||
order == QUDA_CPS_WILSON_GAUGE_ORDER)
errorQuda("Field ordering %d presently disabled for this type", order);
#ifdef MULTI_GPU
if (link_type != QUDA_ASQTAD_MOM_LINKS &&
ghostExchange == QUDA_GHOST_EXCHANGE_PAD &&
isNative()) {
bool pad_check = true;
for (int i=0; i<nDim; i++)
if (pad < nFace*surfaceCB[i]) pad_check = false;
if (!pad_check)
errorQuda("cudaGaugeField being constructed with insufficient padding\n");
}
#endif
if(create != QUDA_NULL_FIELD_CREATE &&
create != QUDA_ZERO_FIELD_CREATE &&
create != QUDA_REFERENCE_FIELD_CREATE){
errorQuda("ERROR: create type(%d) not supported yet\n", create);
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
gauge = pool_device_malloc(bytes);
if (create == QUDA_ZERO_FIELD_CREATE) cudaMemset(gauge, 0, bytes);
} else {
gauge = param.gauge;
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
size_t nbytes = nFace * surface[i] * nInternal * precision;
ghost[i] = nbytes ? pool_device_malloc(nbytes) : NULL;
}
}
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD) {
if (create == QUDA_REFERENCE_FIELD_CREATE) exchangeGhost();
}
even = gauge;
odd = (char*)gauge + bytes/2;
#ifdef USE_TEXTURE_OBJECTS
createTexObject(evenTex, even);
createTexObject(oddTex, odd);
if(reconstruct == QUDA_RECONSTRUCT_13 || reconstruct == QUDA_RECONSTRUCT_9)
{ // Create texture objects for the phases
const int isPhase = 1;
createTexObject(evenPhaseTex, (char*)even + phase_offset, isPhase);
createTexObject(oddPhaseTex, (char*)odd + phase_offset, isPhase);
}
#endif
}
#ifdef USE_TEXTURE_OBJECTS
void cudaGaugeField::createTexObject(cudaTextureObject_t &tex, void *field, int isPhase) {
if( isNative() ){
// create the texture for the field components
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = cudaChannelFormatKindFloat;
else desc.f = cudaChannelFormatKindSigned; // half is short, double is int2
if(isPhase){
if(precision == QUDA_DOUBLE_PRECISION){
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 0;
desc.w = 0;
}else{
desc.x = 8*precision;
desc.y = desc.z = desc.w = 0;
}
}else{
// always four components regardless of precision
if (precision == QUDA_DOUBLE_PRECISION) {
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 8*sizeof(int);
desc.w = 8*sizeof(int);
} else {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = (reconstruct == 18) ? 0 : 8*precision; // float2 or short2 for 18 reconstruct
desc.w = (reconstruct == 18) ? 0 : 8*precision;
}
}
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = field;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = isPhase ? phase_bytes/2 : (bytes-phase_bytes)/2;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = cudaReadModeNormalizedFloat;
else texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
}
}
void cudaGaugeField::destroyTexObject() {
if( isNative() ){
cudaDestroyTextureObject(evenTex);
cudaDestroyTextureObject(oddTex);
if(reconstruct == QUDA_RECONSTRUCT_9 || reconstruct == QUDA_RECONSTRUCT_13){
cudaDestroyTextureObject(evenPhaseTex);
cudaDestroyTextureObject(oddPhaseTex);
}
checkCudaError();
}
}
#endif
cudaGaugeField::~cudaGaugeField()
{
#ifdef USE_TEXTURE_OBJECTS
destroyTexObject();
#endif
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (gauge) pool_device_free(gauge);
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
if (ghost[i]) pool_device_free(ghost[i]);
}
}
}
// This does the exchange of the gauge field ghost zone and places it
// into the ghost array.
void cudaGaugeField::exchangeGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY && geometry != QUDA_COARSE_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *send[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? pool_device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
send[d] = pool_device_malloc(nFace*surface[d]*nInternal*precision);
}
// get the links into contiguous buffers
extractGaugeGhost(*this, send, true);
// communicate between nodes
exchange(ghost_, send, QUDA_FORWARDS);
for (int d=0; d<nDim; d++) pool_device_free(send[d]);
if (isNative()) {
// copy from ghost into the padded region in gauge
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, 0, ghost_, 1);
for (int d=0; d<nDim; d++) pool_device_free(ghost_[d]);
}
}
// This does the opposite of exchnageGhost and sends back the ghost
// zone to the node from which it came and injects it back into the
// field
void cudaGaugeField::injectGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY && geometry != QUDA_COARSE_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? pool_device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
recv[d] = pool_device_malloc(nFace*surface[d]*nInternal*precision);
}
if (isNative()) {
// copy from padded region in gauge field into ghost
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, ghost_, 0, 1);
}
// communicate between nodes
exchange(recv, ghost_, QUDA_BACKWARDS);
// get the links into contiguous buffers
extractGaugeGhost(*this, recv, false);
for (int d=0; d<nDim; d++) {
pool_device_free(recv[d]);
if (isNative()) pool_device_free(ghost_[d]);
}
}
void cudaGaugeField::exchangeExtendedGhost(const int *R, bool no_comms_fill) {
void *send[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
void *send_d[QUDA_MAX_DIM];
void *recv_d[QUDA_MAX_DIM];
size_t bytes[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
// store both parities and directions in each
bytes[d] = surface[d] * R[d] * geometry * nInternal * precision;
send_d[d] = pool_device_malloc(2 * bytes[d]);
recv_d[d] = pool_device_malloc(2 * bytes[d]);
}
#ifndef GPU_COMMS
void *send_h[QUDA_MAX_DIM];
void *recv_h[QUDA_MAX_DIM];
size_t total_bytes = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
total_bytes += 4*bytes[d]; // (2 from send/recv) x (2 from fwd/back)
}
void *buffer = total_bytes > 0 ? pool_pinned_malloc(total_bytes) : nullptr;
size_t offset = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
recv_h[d] = static_cast<char*>(buffer) + offset;
send_h[d] = static_cast<char*>(recv_h[d]) + 2*bytes[d];
offset += 4*bytes[d];
}
#endif
// do the exchange
MsgHandle *mh_recv_back[QUDA_MAX_DIM];
MsgHandle *mh_recv_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_back[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
recv[d] = recv_d[d];
send[d] = send_d[d];
#else
recv[d] = recv_h[d];
send[d] = send_h[d];
#endif
// look into storing these for later
mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]);
mh_recv_fwd[d] = comm_declare_receive_relative(static_cast<char*>(recv[d])+bytes[d],
d, +1, bytes[d]);
mh_send_back[d] = comm_declare_send_relative(send[d], d, -1, bytes[d]);
mh_send_fwd[d] = comm_declare_send_relative(static_cast<char*>(send[d])+bytes[d],
d, +1, bytes[d]);
}
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
// FIXME why does this break if the order is switched?
// prepost the receives
if (commDimPartitioned(d)) {
comm_start(mh_recv_fwd[d]);
comm_start(mh_recv_back[d]);
}
//extract into a contiguous buffer
extractExtendedGaugeGhost(*this, d, R, send_d, true);
if (commDimPartitioned(d)) {
// pipeline the forwards and backwards sending
#ifndef GPU_COMMS
cudaMemcpyAsync(send_h[d], send_d[d], bytes[d], cudaMemcpyDeviceToHost, streams[0]);
cudaMemcpyAsync(static_cast<char*>(send_h[d])+bytes[d],
static_cast<char*>(send_d[d])+bytes[d], bytes[d], cudaMemcpyDeviceToHost, streams[1]);
#endif
#ifndef GPU_COMMS
cudaStreamSynchronize(streams[0]);
#endif
comm_start(mh_send_back[d]);
#ifndef GPU_COMMS
cudaStreamSynchronize(streams[1]);
#endif
comm_start(mh_send_fwd[d]);
// forwards recv
comm_wait(mh_send_back[d]);
comm_wait(mh_recv_fwd[d]);
#ifndef GPU_COMMS
cudaMemcpyAsync(static_cast<char*>(recv_d[d])+bytes[d],
static_cast<char*>(recv_h[d])+bytes[d], bytes[d], cudaMemcpyHostToDevice, streams[0]);
#endif
// backwards recv
comm_wait(mh_send_fwd[d]);
comm_wait(mh_recv_back[d]);
#ifndef GPU_COMMS
cudaMemcpyAsync(recv_d[d], recv_h[d], bytes[d], cudaMemcpyHostToDevice, streams[1]);
#endif
} else { // if just doing a local exchange to fill halo then need to swap faces
qudaMemcpy(static_cast<char*>(recv_d[d])+bytes[d], send_d[d], bytes[d], cudaMemcpyDeviceToDevice);
qudaMemcpy(recv_d[d], static_cast<char*>(send_d[d])+bytes[d], bytes[d], cudaMemcpyDeviceToDevice);
}
// inject back into the gauge field
extractExtendedGaugeGhost(*this, d, R, recv_d, false);
}
#ifndef GPU_COMMS
if (total_bytes > 0) pool_pinned_free(buffer);
#endif
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
if (commDimPartitioned(d)) {
comm_free(mh_send_fwd[d]);
comm_free(mh_send_back[d]);
comm_free(mh_recv_back[d]);
comm_free(mh_recv_fwd[d]);
}
pool_device_free(send_d[d]);
pool_device_free(recv_d[d]);
}
}
void cudaGaugeField::setGauge(void *gauge_)
{
if(create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("Setting gauge pointer is only allowed when create="
"QUDA_REFERENCE_FIELD_CREATE type\n");
}
gauge = gauge_;
}
void *create_gauge_buffer(size_t bytes, QudaGaugeFieldOrder order, QudaFieldGeometry geometry) {
if (order == QUDA_QDP_GAUGE_ORDER) {
void **buffer = new void*[geometry];
for (int d=0; d<geometry; d++) buffer[d] = pool_device_malloc(bytes/geometry);
return ((void*)buffer);
} else {
return pool_device_malloc(bytes);
}
}
void **create_ghost_buffer(size_t bytes[], QudaGaugeFieldOrder order) {
if (order > 4) {
void **buffer = new void*[4];
for (int d=0; d<4; d++) buffer[d] = pool_device_malloc(bytes[d]);
return buffer;
} else {
return 0;
}
}
void free_gauge_buffer(void *buffer, QudaGaugeFieldOrder order, QudaFieldGeometry geometry) {
if (order == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) pool_device_free(((void**)buffer)[d]);
delete []((void**)buffer);
} else {
pool_device_free(buffer);
}
}
void free_ghost_buffer(void **buffer, QudaGaugeFieldOrder order) {
if (order > 4) {
for (int d=0; d<4; d++) pool_device_free(buffer[d]);
delete []buffer;
}
}
void cudaGaugeField::copy(const GaugeField &src) {
if (this == &src) return;
checkField(src);
if (link_type == QUDA_ASQTAD_FAT_LINKS) {
fat_link_max = src.LinkMax();
if (precision == QUDA_HALF_PRECISION && fat_link_max == 0.0)
errorQuda("fat_link_max has not been computed");
} else {
fat_link_max = 1.0;
}
if (typeid(src) == typeid(cudaGaugeField)) {
// copy field and ghost zone into this field
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge,
static_cast<const cudaGaugeField&>(src).gauge);
} else if (typeid(src) == typeid(cpuGaugeField)) {
if (reorder_location() == QUDA_CPU_FIELD_LOCATION) { // do reorder on the CPU
void *buffer = pool_pinned_malloc(bytes);
// copy field and ghost zone into buffer
copyGenericGauge(*this, src, QUDA_CPU_FIELD_LOCATION, buffer, static_cast<const cpuGaugeField&>(src).gauge);
// this copies over both even and odd
qudaMemcpy(gauge, buffer, bytes, cudaMemcpyHostToDevice);
pool_pinned_free(buffer);
} else { // else on the GPU
void *buffer = create_gauge_buffer(src.Bytes(), src.Order(), src.Geometry());
size_t ghost_bytes[4];
int srcNinternal = src.Reconstruct() != QUDA_RECONSTRUCT_NO ? src.Reconstruct() : 2*nColor*nColor;
for (int d=0; d<4; d++) ghost_bytes[d] = nFace * surface[d] * srcNinternal * src.Precision();
void **ghost_buffer = (nFace > 0) ? create_ghost_buffer(ghost_bytes, src.Order()) : nullptr;
if (src.Order() == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) {
qudaMemcpy(((void**)buffer)[d], ((void**)src.Gauge_p())[d], src.Bytes()/geometry, cudaMemcpyHostToDevice);
}
} else {
qudaMemcpy(buffer, src.Gauge_p(), src.Bytes(), cudaMemcpyHostToDevice);
}
if (src.Order() > 4 && GhostExchange() == QUDA_GHOST_EXCHANGE_PAD &&
src.GhostExchange() == QUDA_GHOST_EXCHANGE_PAD && nFace)
for (int d=0; d<4; d++)
qudaMemcpy(ghost_buffer[d], src.Ghost()[d], ghost_bytes[d], cudaMemcpyHostToDevice);
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge, buffer, 0, ghost_buffer);
free_gauge_buffer(buffer, src.Order(), src.Geometry());
if (nFace > 0) free_ghost_buffer(ghost_buffer, src.Order());
} // reorder_location
} else {
errorQuda("Invalid gauge field type");
}
// if we have copied from a source without a pad then we need to exchange
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD && src.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD) exchangeGhost();
staggeredPhaseApplied = src.StaggeredPhaseApplied();
staggeredPhaseType = src.StaggeredPhase();
checkCudaError();
}
void cudaGaugeField::loadCPUField(const cpuGaugeField &cpu) { copy(cpu); }
void cudaGaugeField::saveCPUField(cpuGaugeField &cpu) const
{
QudaFieldLocation pack_location = reorder_location();
if (pack_location == QUDA_CUDA_FIELD_LOCATION) {
void *buffer = create_gauge_buffer(cpu.Bytes(), cpu.Order(), cpu.Geometry());
// Allocate space for ghost zone if required
size_t ghost_bytes[4];
int cpuNinternal = cpu.Reconstruct() != QUDA_RECONSTRUCT_NO ? cpu.Reconstruct() : 2*nColor*nColor;
for (int d=0; d<4; d++) ghost_bytes[d] = nFace * surface[d] * cpuNinternal * cpu.Precision();
void **ghost_buffer = (nFace > 0) ? create_ghost_buffer(ghost_bytes, cpu.Order()) : nullptr;
copyGenericGauge(cpu, *this, QUDA_CUDA_FIELD_LOCATION, buffer, gauge, ghost_buffer, 0);
if (cpu.Order() == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) qudaMemcpy(((void**)cpu.gauge)[d], ((void**)buffer)[d], cpu.Bytes()/geometry, cudaMemcpyDeviceToHost);
} else {
qudaMemcpy(cpu.gauge, buffer, cpu.Bytes(), cudaMemcpyDeviceToHost);
}
if (cpu.Order() > 4 && GhostExchange() == QUDA_GHOST_EXCHANGE_PAD &&
cpu.GhostExchange() == QUDA_GHOST_EXCHANGE_PAD && nFace)
for (int d=0; d<4; d++)
qudaMemcpy(cpu.Ghost()[d], ghost_buffer[d], ghost_bytes[d], cudaMemcpyDeviceToHost);
free_gauge_buffer(buffer, cpu.Order(), cpu.Geometry());
if (nFace > 0) free_ghost_buffer(ghost_buffer, cpu.Order());
} else if (pack_location == QUDA_CPU_FIELD_LOCATION) { // do copy then host-side reorder
void *buffer = pool_pinned_malloc(bytes);
qudaMemcpy(buffer, gauge, bytes, cudaMemcpyDeviceToHost);
copyGenericGauge(cpu, *this, QUDA_CPU_FIELD_LOCATION, cpu.gauge, buffer);
pool_pinned_free(buffer);
} else {
errorQuda("Invalid pack location %d", pack_location);
}
cpu.staggeredPhaseApplied = staggeredPhaseApplied;
cpu.staggeredPhaseType = staggeredPhaseType;
}
void cudaGaugeField::backup() const {
if (backed_up) errorQuda("Gauge field already backed up");
backup_h = new char[bytes];
cudaMemcpy(backup_h, gauge, bytes, cudaMemcpyDeviceToHost);
checkCudaError();
backed_up = true;
}
void cudaGaugeField::restore() {
if (!backed_up) errorQuda("Cannot restore since not backed up");
cudaMemcpy(gauge, backup_h, bytes, cudaMemcpyHostToDevice);
delete []backup_h;
checkCudaError();
backed_up = false;
}
void cudaGaugeField::zero() {
cudaMemset(gauge, 0, bytes);
}
} // namespace quda
|
2db008858cf6d14660e4043b58fbedde7ed840ab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void copyKernel (T* out,
T* in,
const unsigned int N)
{
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x)
{
const unsigned el_id = i;
((T*) out)[el_id] = ((T*) in)[el_id];
// ((T*) out)[(1<<29) + 100] = ((T*) in)[0];
}
}
int main () {
using namespace std::chrono;
unsigned int N = 1<<29; //N is the Number of elements in the Array
double lastMeasurementTimeSpan = 100.0f;//we are not expecting measurements greater 100 s
bool stopMeasurement = false;
std::cout << "np.array("; //output the results so that they can be read easily by python
std::cout << "(";
for (int M = 1; M <= 4; M++)
{
std::cout << "(";
for(int i = 1; i <= 32; i++)
{
if(!stopMeasurement)
{
unsigned int m = 32 * i;
// int* carray;
void* out;
void* in;
// malloc(carray);
auto err1 = hipHostMalloc(&in, N*4);
auto err2 = hipMalloc(&out, N*4);
if (err1 != hipSuccess)
{
std::cout << "Allocation ERROR: " << hipGetErrorString(err1) << std::endl;
}
if (err2 != hipSuccess)
{
std::cout << "Allocation ERROR2: " << hipGetErrorString(err2) << std::endl;
}
//make a warmup
hipLaunchKernelGGL(( copyKernel), dim3(M), dim3(m), 0, 0, static_cast<int*> (out), static_cast<int*> (in), N);
hipDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for(int x = 1; x <= 10; x++)//run 10 times for better measurement accuracy
{
//run kernel here
hipLaunchKernelGGL(( copyKernel), dim3(M), dim3(m), 0, 0, static_cast<int*> (out), static_cast<int*> (in), N);
hipDeviceSynchronize();
auto lstErr = hipGetLastError();
if ( hipSuccess != lstErr )
{
std::cout << lstErr << ": " << hipGetErrorString(lstErr) << std::endl;
}
}
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
hipHostFree(in);
hipFree(out);
//it seems we cant use automatic measurement stops
if(false)// (lastMeasurementTimeSpan- time_span.count() < 0.01 && i=1)
{
stopMeasurement = true;
}
else
{
lastMeasurementTimeSpan = time_span.count();
std::cout << time_span.count();
}
}
else
{
std::cout << 0.0;
}
if( i != 32) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
if( M != 15) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
std::cout << ")" << std::endl;
return 0;
}
|
2db008858cf6d14660e4043b58fbedde7ed840ab.cu
|
#include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void copyKernel (T* out,
T* in,
const unsigned int N)
{
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x)
{
const unsigned el_id = i;
((T*) out)[el_id] = ((T*) in)[el_id];
// ((T*) out)[(1<<29) + 100] = ((T*) in)[0];
}
}
int main () {
using namespace std::chrono;
unsigned int N = 1<<29; //N is the Number of elements in the Array
double lastMeasurementTimeSpan = 100.0f;//we are not expecting measurements greater 100 s
bool stopMeasurement = false;
std::cout << "np.array("; //output the results so that they can be read easily by python
std::cout << "(";
for (int M = 1; M <= 4; M++)
{
std::cout << "(";
for(int i = 1; i <= 32; i++)
{
if(!stopMeasurement)
{
unsigned int m = 32 * i;
// int* carray;
void* out;
void* in;
// malloc(carray);
auto err1 = cudaMallocHost(&in, N*4);
auto err2 = cudaMalloc(&out, N*4);
if (err1 != cudaSuccess)
{
std::cout << "Allocation ERROR: " << cudaGetErrorString(err1) << std::endl;
}
if (err2 != cudaSuccess)
{
std::cout << "Allocation ERROR2: " << cudaGetErrorString(err2) << std::endl;
}
//make a warmup
copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N);
cudaDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for(int x = 1; x <= 10; x++)//run 10 times for better measurement accuracy
{
//run kernel here
copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N);
cudaDeviceSynchronize();
auto lstErr = cudaGetLastError();
if ( cudaSuccess != lstErr )
{
std::cout << lstErr << ": " << cudaGetErrorString(lstErr) << std::endl;
}
}
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
cudaFreeHost(in);
cudaFree(out);
//it seems we cant use automatic measurement stops
if(false)// (lastMeasurementTimeSpan- time_span.count() < 0.01 && i=1)
{
stopMeasurement = true;
}
else
{
lastMeasurementTimeSpan = time_span.count();
std::cout << time_span.count();
}
}
else
{
std::cout << 0.0;
}
if( i != 32) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
if( M != 15) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
std::cout << ")" << std::endl;
return 0;
}
|
57e8e7084ad21dc4264842cc3773f5526576fc9a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gnn.h"
#include "cuda_helper.h"
__global__
void norm_coop_kernel(V_ID rowLeft,
V_ID rowRight,
E_ID colLeft,
int hiddenDim,
const NodeStruct* row_ptrs,
const DATATYPE *input,
DATATYPE* output)
{
assert(blockDim.x == CUDA_NUM_THREADS);
__shared__ V_ID inDegree[CUDA_NUM_THREADS];
for (V_ID blkRowStart = blockIdx.x * blockDim.x + rowLeft;
blkRowStart <= rowRight;
blkRowStart += blockDim.x * gridDim.x)
{
if (blkRowStart + threadIdx.x <= rowRight)
{
V_ID curVtx = threadIdx.x + blkRowStart;
E_ID startColIdx, endColIdx = row_ptrs[curVtx-rowLeft].index;
if (curVtx == rowLeft)
startColIdx = colLeft;
else
startColIdx = row_ptrs[curVtx-rowLeft-1].index;
inDegree[threadIdx.x] = endColIdx - startColIdx;
}
__syncthreads();
E_ID todo = min(blockDim.x, rowRight+1-blkRowStart) * hiddenDim;
E_ID done = 0;
while (todo > 0) {
if (threadIdx.x < todo) {
output[(blkRowStart-rowLeft)*hiddenDim+done+threadIdx.x] =
input[(blkRowStart-rowLeft)*hiddenDim+done+threadIdx.x]
/ sqrt((float)inDegree[(done+threadIdx.x)/hiddenDim]);
}
done += blockDim.x;
todo -= (todo > blockDim.x) ? blockDim.x : todo;
}
}
}
__host__
void InDegreeNorm::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
ResourceManager* manager = *((ResourceManager**) task->local_args);
assert(manager->proc_id == task->current_proc.id);
manager->reset();
TensorAccessorR<NodeStruct, 1> accRowPtr(
regions[0], task->regions[0], FID_DATA, ctx, runtime, manager);
TensorAccessorR<EdgeStruct, 1> accColIdx(
regions[1], task->regions[1], FID_DATA, ctx, runtime, manager);
TensorAccessorR<DATATYPE, 2> accInput(
regions[2], task->regions[2], FID_DATA, ctx, runtime, manager);
TensorAccessorW<DATATYPE, 2> accOutput(
regions[3], task->regions[3], FID_DATA, ctx, runtime, manager,
false/*readOutput*/);
// Assert memories are correctly mapped
assert(accRowPtr.memory.kind() == Memory::GPU_FB_MEM);
assert(accColIdx.memory.kind() == Memory::GPU_FB_MEM);
assert(accInput.memory.kind() == Memory::Z_COPY_MEM);
assert(accOutput.memory.kind() == Memory::Z_COPY_MEM);
#ifdef DEADCODE
const AccessorRO<NodeStruct, 1> accRowPtr(regions[0], FID_DATA);
const AccessorRO<EdgeStruct, 1> accColIdx(regions[1], FID_DATA);
const AccessorRO<DATATYPE, 2> accInput(regions[2], FID_DATA);
const AccessorWO<DATATYPE, 2> accOutput(regions[3], FID_DATA);
Rect<1> rectRowPtr = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rectColIdx = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<2> rectInput = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Rect<2> rectOutput = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
assert(accRowPtr.accessor.is_dense_arbitrary(rectRowPtr));
assert(accColIdx.accessor.is_dense_arbitrary(rectColIdx));
assert(accInput.accessor.is_dense_arbitrary(rectInput));
assert(accOutput.accessor.is_dense_arbitrary(rectOutput));
const NodeStruct* rowPtrs = accRowPtr.ptr(rectRowPtr);
const EdgeStruct* colIdxs = accColIdx.ptr(rectColIdx);
const DATATYPE* zcInput = accInput.ptr(rectInput);
DATATYPE* zcOutput = accOutput.ptr(rectOutput);
#endif
V_ID rowLeft = accRowPtr.rect.lo[0], rowRight = accRowPtr.rect.hi[0];
E_ID colLeft = accColIdx.rect.lo[0], colRight = accColIdx.rect.hi[0];
int hiddenDim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1;
assert(accOutput.rect == accInput.rect);
assert(accOutput.rect.lo[1] == accRowPtr.rect.lo[0]);
assert(accOutput.rect.hi[1] == accRowPtr.rect.hi[0]);
hipLaunchKernelGGL(( norm_coop_kernel), dim3(GET_BLOCKS(rowRight-rowLeft+1)), dim3(CUDA_NUM_THREADS), 0, 0,
rowLeft, rowRight, colLeft, hiddenDim, accRowPtr.ptr,
accInput.fbCache, accOutput.fbCache);
checkCUDA(hipMemcpy(accOutput.ptr, accOutput.fbCache,
accOutput.rect.volume() * sizeof(DATATYPE),
hipMemcpyDeviceToHost));
//for (int i = 0; i < 8; i++)
// for (int j = 0; j < 8; j++)
// printf("[InDegreeNorm] Input[%d][%d]: %.4lf\n", i, j, accInput.ptr[i * hiddenDim + j]);
//for (int i = 0; i < 8; i++)
// for (int j = 0; j < 8; j++)
// printf("[InDegreeNorm] Output[%d][%d]: %.4lf\n", i, j, accOutput.ptr[i * hiddenDim + j]);
checkCUDA(hipDeviceSynchronize());
}
__host__
void InDegreeNorm::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const InDegreeNorm* op = (InDegreeNorm*) task->args;
// assert that we should reset input gradient
assert(op->resetInputGrads[0]);
// Forward and backward do exact same thing
return forward_task(task, regions, ctx, runtime);
}
|
57e8e7084ad21dc4264842cc3773f5526576fc9a.cu
|
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gnn.h"
#include "cuda_helper.h"
__global__
void norm_coop_kernel(V_ID rowLeft,
V_ID rowRight,
E_ID colLeft,
int hiddenDim,
const NodeStruct* row_ptrs,
const DATATYPE *input,
DATATYPE* output)
{
assert(blockDim.x == CUDA_NUM_THREADS);
__shared__ V_ID inDegree[CUDA_NUM_THREADS];
for (V_ID blkRowStart = blockIdx.x * blockDim.x + rowLeft;
blkRowStart <= rowRight;
blkRowStart += blockDim.x * gridDim.x)
{
if (blkRowStart + threadIdx.x <= rowRight)
{
V_ID curVtx = threadIdx.x + blkRowStart;
E_ID startColIdx, endColIdx = row_ptrs[curVtx-rowLeft].index;
if (curVtx == rowLeft)
startColIdx = colLeft;
else
startColIdx = row_ptrs[curVtx-rowLeft-1].index;
inDegree[threadIdx.x] = endColIdx - startColIdx;
}
__syncthreads();
E_ID todo = min(blockDim.x, rowRight+1-blkRowStart) * hiddenDim;
E_ID done = 0;
while (todo > 0) {
if (threadIdx.x < todo) {
output[(blkRowStart-rowLeft)*hiddenDim+done+threadIdx.x] =
input[(blkRowStart-rowLeft)*hiddenDim+done+threadIdx.x]
/ sqrt((float)inDegree[(done+threadIdx.x)/hiddenDim]);
}
done += blockDim.x;
todo -= (todo > blockDim.x) ? blockDim.x : todo;
}
}
}
__host__
void InDegreeNorm::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
ResourceManager* manager = *((ResourceManager**) task->local_args);
assert(manager->proc_id == task->current_proc.id);
manager->reset();
TensorAccessorR<NodeStruct, 1> accRowPtr(
regions[0], task->regions[0], FID_DATA, ctx, runtime, manager);
TensorAccessorR<EdgeStruct, 1> accColIdx(
regions[1], task->regions[1], FID_DATA, ctx, runtime, manager);
TensorAccessorR<DATATYPE, 2> accInput(
regions[2], task->regions[2], FID_DATA, ctx, runtime, manager);
TensorAccessorW<DATATYPE, 2> accOutput(
regions[3], task->regions[3], FID_DATA, ctx, runtime, manager,
false/*readOutput*/);
// Assert memories are correctly mapped
assert(accRowPtr.memory.kind() == Memory::GPU_FB_MEM);
assert(accColIdx.memory.kind() == Memory::GPU_FB_MEM);
assert(accInput.memory.kind() == Memory::Z_COPY_MEM);
assert(accOutput.memory.kind() == Memory::Z_COPY_MEM);
#ifdef DEADCODE
const AccessorRO<NodeStruct, 1> accRowPtr(regions[0], FID_DATA);
const AccessorRO<EdgeStruct, 1> accColIdx(regions[1], FID_DATA);
const AccessorRO<DATATYPE, 2> accInput(regions[2], FID_DATA);
const AccessorWO<DATATYPE, 2> accOutput(regions[3], FID_DATA);
Rect<1> rectRowPtr = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rectColIdx = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<2> rectInput = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Rect<2> rectOutput = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
assert(accRowPtr.accessor.is_dense_arbitrary(rectRowPtr));
assert(accColIdx.accessor.is_dense_arbitrary(rectColIdx));
assert(accInput.accessor.is_dense_arbitrary(rectInput));
assert(accOutput.accessor.is_dense_arbitrary(rectOutput));
const NodeStruct* rowPtrs = accRowPtr.ptr(rectRowPtr);
const EdgeStruct* colIdxs = accColIdx.ptr(rectColIdx);
const DATATYPE* zcInput = accInput.ptr(rectInput);
DATATYPE* zcOutput = accOutput.ptr(rectOutput);
#endif
V_ID rowLeft = accRowPtr.rect.lo[0], rowRight = accRowPtr.rect.hi[0];
E_ID colLeft = accColIdx.rect.lo[0], colRight = accColIdx.rect.hi[0];
int hiddenDim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1;
assert(accOutput.rect == accInput.rect);
assert(accOutput.rect.lo[1] == accRowPtr.rect.lo[0]);
assert(accOutput.rect.hi[1] == accRowPtr.rect.hi[0]);
norm_coop_kernel<<<GET_BLOCKS(rowRight-rowLeft+1), CUDA_NUM_THREADS>>>(
rowLeft, rowRight, colLeft, hiddenDim, accRowPtr.ptr,
accInput.fbCache, accOutput.fbCache);
checkCUDA(cudaMemcpy(accOutput.ptr, accOutput.fbCache,
accOutput.rect.volume() * sizeof(DATATYPE),
cudaMemcpyDeviceToHost));
//for (int i = 0; i < 8; i++)
// for (int j = 0; j < 8; j++)
// printf("[InDegreeNorm] Input[%d][%d]: %.4lf\n", i, j, accInput.ptr[i * hiddenDim + j]);
//for (int i = 0; i < 8; i++)
// for (int j = 0; j < 8; j++)
// printf("[InDegreeNorm] Output[%d][%d]: %.4lf\n", i, j, accOutput.ptr[i * hiddenDim + j]);
checkCUDA(cudaDeviceSynchronize());
}
__host__
void InDegreeNorm::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const InDegreeNorm* op = (InDegreeNorm*) task->args;
// assert that we should reset input gradient
assert(op->resetInputGrads[0]);
// Forward and backward do exact same thing
return forward_task(task, regions, ctx, runtime);
}
|
94a1d536e0d532f98a7df1441dc103c0e3cd2dbf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* inference-101
*/
#include <math_functions.h>
#include "cudaRGB.h"
//-------------------------------------------------------------------------------------------------------------------------
__global__ void RGBToRGBAf(uint8_t* srcImage,
float4* dstImage,
uint32_t width, uint32_t height)
{
int x, y, pixel;
x = (blockIdx.x * blockDim.x) + threadIdx.x;
y = (blockIdx.y * blockDim.y) + threadIdx.y;
pixel = y * width + x;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel);
const float s = 1;
dstImage[pixel] = make_float4(srcImage[pixel*3] * s, srcImage[ pixel*3 + 1] * s, srcImage[ pixel*3 + 2] * s, 0.0f);
}
hipError_t cudaRGBToRGBAf( uint8_t* srcDev, float4* destDev, size_t width, size_t height )
{
if( !srcDev || !destDev )
return hipErrorInvalidDevicePointer;
const dim3 blockDim(128,1,1);
const dim3 gridDim(width/blockDim.x, height/blockDim.y, 1);
hipLaunchKernelGGL(( RGBToRGBAf), dim3(gridDim), dim3(blockDim), 0, 0, (uint8_t*)srcDev, destDev, width, height );
return CUDA(hipGetLastError());
}
//-------------------------------------------------------------------------------------------------------------------------
__global__ void BAYER_GR8toRGBA(uint8_t* srcImage,
float4* dstImage,
uint32_t width, uint32_t height)
{
int x, y, pixel;
bool lineOdd, pixelOdd;
x = (blockIdx.x * blockDim.x) + threadIdx.x;
y = (blockIdx.y * blockDim.y) + threadIdx.y;
pixel = y * width + x;
pixelOdd = ((pixel) % 2) ? true : false;
double t = floor((double)(pixel / width)) ;
lineOdd = (int)t % 2 ? false : true;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel);
#if 1 // Colour
/* BAYER_GR
* 1 2 3 4 5 6
* 1 G R G R G R
* 2 B G B G B G
* 3 G R G R G R
* 4 B G B G B G
*/
// Odd lines
if ((lineOdd) && (pixelOdd)) // First Pixel
{
int r = srcImage[pixel-1] + srcImage[pixel+1] /2;
int b = srcImage[pixel+width]; // + srcImage[pixel-width+1] + srcImage[pixel-width-1] / 4;
dstImage[pixel] = make_float4(r, srcImage[pixel], b, 0.0f); // Green Info
}
else if ((lineOdd) && (!pixelOdd))
{
int g = srcImage[pixel-1] + srcImage[pixel+1] /2;
int b = srcImage[pixel+width-1] + srcImage[pixel+width+1] / 2;
dstImage[pixel] = make_float4(srcImage[pixel], g, b, 0.0f); // Red Info
}
// Even lines
if ((!lineOdd) && (pixelOdd))
{
int g = srcImage[pixel+1] + srcImage[pixel-1] / 2;
int r = srcImage[pixel+width-1] + srcImage[pixel+width+1] / 2;
dstImage[pixel] = make_float4(r, g, srcImage[pixel], 0.0f); // Blue Info
}
else if ((!lineOdd) && (!pixelOdd))
{
int b = srcImage[pixel+1] + srcImage[pixel-1] / 2;
int r = srcImage[pixel+width] + srcImage[pixel+width] / 2;
dstImage[pixel] = make_float4(r, srcImage[pixel], b, 0.0f); // Green Info
}
#else
// Monochrome output
dstImage[pixel] = make_float4(srcImage[pixel], srcImage[ pixel], srcImage[ pixel], 0.0f);
#endif
}
hipError_t cudaBAYER_GR8toRGBA( uint8_t* srcDev, float4* destDev, size_t width, size_t height )
{
if( !srcDev || !destDev )
return hipErrorInvalidDevicePointer;
const dim3 blockDim(128,1,1);
const dim3 gridDim(width/blockDim.x, height/blockDim.y, 1);
hipLaunchKernelGGL(( BAYER_GR8toRGBA), dim3(gridDim), dim3(blockDim), 0, 0, (uint8_t*)srcDev, destDev, width, height );
return CUDA(hipGetLastError());
}
|
94a1d536e0d532f98a7df1441dc103c0e3cd2dbf.cu
|
/*
* inference-101
*/
#include <math_functions.h>
#include "cudaRGB.h"
//-------------------------------------------------------------------------------------------------------------------------
__global__ void RGBToRGBAf(uint8_t* srcImage,
float4* dstImage,
uint32_t width, uint32_t height)
{
int x, y, pixel;
x = (blockIdx.x * blockDim.x) + threadIdx.x;
y = (blockIdx.y * blockDim.y) + threadIdx.y;
pixel = y * width + x;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel);
const float s = 1;
dstImage[pixel] = make_float4(srcImage[pixel*3] * s, srcImage[ pixel*3 + 1] * s, srcImage[ pixel*3 + 2] * s, 0.0f);
}
cudaError_t cudaRGBToRGBAf( uint8_t* srcDev, float4* destDev, size_t width, size_t height )
{
if( !srcDev || !destDev )
return cudaErrorInvalidDevicePointer;
const dim3 blockDim(128,1,1);
const dim3 gridDim(width/blockDim.x, height/blockDim.y, 1);
RGBToRGBAf<<<gridDim, blockDim>>>( (uint8_t*)srcDev, destDev, width, height );
return CUDA(cudaGetLastError());
}
//-------------------------------------------------------------------------------------------------------------------------
__global__ void BAYER_GR8toRGBA(uint8_t* srcImage,
float4* dstImage,
uint32_t width, uint32_t height)
{
int x, y, pixel;
bool lineOdd, pixelOdd;
x = (blockIdx.x * blockDim.x) + threadIdx.x;
y = (blockIdx.y * blockDim.y) + threadIdx.y;
pixel = y * width + x;
pixelOdd = ((pixel) % 2) ? true : false;
double t = floor((double)(pixel / width)) ;
lineOdd = (int)t % 2 ? false : true;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel);
#if 1 // Colour
/* BAYER_GR
* 1 2 3 4 5 6
* 1 G R G R G R
* 2 B G B G B G
* 3 G R G R G R
* 4 B G B G B G
*/
// Odd lines
if ((lineOdd) && (pixelOdd)) // First Pixel
{
int r = srcImage[pixel-1] + srcImage[pixel+1] /2;
int b = srcImage[pixel+width]; // + srcImage[pixel-width+1] + srcImage[pixel-width-1] / 4;
dstImage[pixel] = make_float4(r, srcImage[pixel], b, 0.0f); // Green Info
}
else if ((lineOdd) && (!pixelOdd))
{
int g = srcImage[pixel-1] + srcImage[pixel+1] /2;
int b = srcImage[pixel+width-1] + srcImage[pixel+width+1] / 2;
dstImage[pixel] = make_float4(srcImage[pixel], g, b, 0.0f); // Red Info
}
// Even lines
if ((!lineOdd) && (pixelOdd))
{
int g = srcImage[pixel+1] + srcImage[pixel-1] / 2;
int r = srcImage[pixel+width-1] + srcImage[pixel+width+1] / 2;
dstImage[pixel] = make_float4(r, g, srcImage[pixel], 0.0f); // Blue Info
}
else if ((!lineOdd) && (!pixelOdd))
{
int b = srcImage[pixel+1] + srcImage[pixel-1] / 2;
int r = srcImage[pixel+width] + srcImage[pixel+width] / 2;
dstImage[pixel] = make_float4(r, srcImage[pixel], b, 0.0f); // Green Info
}
#else
// Monochrome output
dstImage[pixel] = make_float4(srcImage[pixel], srcImage[ pixel], srcImage[ pixel], 0.0f);
#endif
}
cudaError_t cudaBAYER_GR8toRGBA( uint8_t* srcDev, float4* destDev, size_t width, size_t height )
{
if( !srcDev || !destDev )
return cudaErrorInvalidDevicePointer;
const dim3 blockDim(128,1,1);
const dim3 gridDim(width/blockDim.x, height/blockDim.y, 1);
BAYER_GR8toRGBA<<<gridDim, blockDim>>>( (uint8_t*)srcDev, destDev, width, height );
return CUDA(cudaGetLastError());
}
|
9c6a3963947e12d8150dbf57bce35d6a0830a39f.hip
|
// !!! This is a file automatically generated by hipify!!!
//A CUDA based implementation of the Smith Waterman Algorithm
//Author: Romil Bhardwaj
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
// #include <fstream>
// #include <cassert>
// #include <cstring>
// #include <string>
// #include <chrono>
// #include<time.h>
// #include <stdio.h>
// #include <stdlib.h>
#define max(a,b) (((a)>(b))?(a):(b))
//Define the costs here
#define indel -1
#define match 2
#define mismatch -1
//CHANGE THIS VALUE TO CHANGE THE NUMBER OF ELEMENTS
const int arraySize = 500;
//CHANGE THIS VALUE TO CHANGE THE NUMBER OF ELEMENTS
hipError_t SWHelper(int (*c)[arraySize+1], const char *a, const char *b, size_t size);
__global__ void SmithWKernelExpand(int (*c)[arraySize+1], const char *a, const char *b, const int *k) //Declared consts to increase access speed
{
int i = threadIdx.x+1; // i,j,k start from 1
int j = ((*k)-i)+1;
int north=c[i][(j)-1]+indel; //Indel
int west=c[i-1][j]+indel;
int northwest;
if (((int) a[i-1])==((int)b[(j)-1]))
northwest=c[i-1][(j)-1]+match; //Match
else
northwest=c[i-1][(j)-1]+mismatch; //Mismatch
c[i][j] = max(max(north, west),max(northwest,0));
//c[i][j]=(*k); //Debugging - Print the antidiag num
}
__global__ void SmithWKernelShrink(int (*c)[arraySize+1], const char *a, const char *b, const int *k)
{
int i = threadIdx.x+((*k)-arraySize)+1;
int j = ((*k)-i)+1;
int north=c[i][(j)-1]+indel; //Indel
int west=c[i-1][j]+indel;
int northwest;
if (((int) a[i-1])==((int)b[(j)-1]))
northwest=c[i-1][(j)-1]+match; //Match
else
northwest=c[i-1][(j)-1]+mismatch; //Mismatch
c[i][j] = max(max(north, west),max(northwest,0));
//c[i][j]=(*k); //Debugging - Print the antidiag num
}
void print(int c[arraySize+1][arraySize+1]){
int j=0,i=0;
for (i = 0; i < arraySize+1; i++) {
for (j = 0; j < arraySize+1; j++) {
printf("%d \t", c[i][j]);
}
printf("\n");
}
}
void traceback(int c[arraySize+1][arraySize+1], char a[], char b[]){
int j=0,i=0;
int maxi=0,maxj=0,max=0;
for (i = 0; i < arraySize+1; i++) {
for (j = 0; j < arraySize+1; j++) {
if(c[i][j]>max){
maxi=i;
maxj=j;
max=c[i][j];
}
}
}
i=maxi;
j=maxj;
printf("The optimal local alignment starts at index %d for a, and index %d for b.\n", i,j);
while (c[i][j]!=0 && i>=0 && j>=0 ){
printf("\n");
if (c[i][j]==c[i-1][(j)-1]+match){ //From match
i--;
j--;
printf("%c -- %c", a[i], b[j]);
}
else if (c[i][j]==c[i-1][(j)-1]+mismatch){ //From mismatch
i--;
j--;
printf("%c -- %c", a[i], b[j]);
}
else if (c[i][j]==c[i][(j)-1]+indel){ //North
j--;
printf("- -- %c", b[j]);
}
else{ //Else has to be from West
i--;
printf("%c -- -", a[i]);
}
}
printf("\n\nThe optimal local alignment ends at index %d for a, and index %d for b.\n", i,j);
}
// Helper function for SmithWaterman
hipError_t SWHelper(int (*c)[arraySize+1], const char *a, const char *b, size_t size)
{
char *dev_a;
char *dev_b;
int (*dev_c)[arraySize+1] = {0};
int (*j)=0;
int *dev_j;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = hipSetDevice(0);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, (size+1) * (size+1) * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(char));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(char));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
cudaStatus = hipMalloc((void**)&dev_j, sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(char), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
cudaStatus = hipMemcpy(dev_j, &j, sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(char), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
cudaStatus = hipMemcpy(dev_c, c, (size+1) * (size+1) * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
int i=0;
clock_t start1=clock();
// Launch a kernel on the GPU with one thread for each element.
//Expanding Phase
for (i=1; i<size+1; i++){ // fred: the dev_j is the rank_dia
cudaStatus = hipMemcpy(dev_j, &i, sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!", cudaStatus);
// goto Error;
// }
hipLaunchKernelGGL(( SmithWKernelExpand), dim3(1), dim3(i), 0, 0, dev_c, dev_a, dev_b, dev_j);
}
//Shrink Phase
for (int k=size-1; k>0; k--, i++){
cudaStatus = hipMemcpy(dev_j, &i, sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
hipLaunchKernelGGL(( SmithWKernelShrink), dim3(1), dim3(k), 0, 0, dev_c, dev_a, dev_b, dev_j);
}
clock_t end1=clock();
printf("\n\nKernel Time taken is %f seconds\n",(double)(end1-start1)/CLOCKS_PER_SEC);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching SmithWKernel!\n", cudaStatus);
// goto Error;
// }
// Copy output vector from GPU buffer to host memory.
//cudaStatus = hipMemcpy2D(c,size * size * sizeof(int),dev_c,size * size * sizeof(int),size * size * sizeof(int),size * size * sizeof(int),hipMemcpyDeviceToHost);
cudaStatus = hipMemcpy(c, dev_c, (size+1) * (size+1) * sizeof(int), hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
int main(int argc, char **argv) {
char b[arraySize] = {'a','c','a','c','a','c','t','a'};
char a[arraySize] = {'a','g','c','a','c','a','c','a'};
int i=0;
//Generating the sequences:
srand (time(NULL));
printf("\nString a is: ");
for(i=0;i<arraySize;i++) {
int gen1=rand()%4;
switch(gen1)
{
case 0:a[i]='a';
break;
case 1: a[i]='c';
break;
case 2: a[i]='g';
break;
case 3: a[i]='t';
}
//a[i]='a';
printf("%c ", a[i]);
}
printf("\nString b is: ");
for(i=0;i<arraySize;i++) {
int gen1=rand()%4;
switch(gen1)
{
case 0:b[i]='a';
break;
case 1: b[i]='c';
break;
case 2: b[i]='g';
break;
case 3: b[i]='t';
}
//b[i]='a';
printf("%c ", b[i]);
}
printf("\nOkay, generated the string \n");
int c[arraySize+1][arraySize+1] = { {0} };
clock_t start=clock();
// Run the SW Helper function
hipError_t cudaStatus = SWHelper(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "SWHelper failed!");
return 1;
}
clock_t end=clock();
//Printing the final score matrix. Uncomment this to see the matrix.
//print(c);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
traceback (c,a,b);
printf("\n\nEnter any number to exit.");
printf("\n\nTotal time taken is %f seconds\n",(double)(end-start)/CLOCKS_PER_SEC);
int x;
scanf("%d", &x);
return 0;
}
|
9c6a3963947e12d8150dbf57bce35d6a0830a39f.cu
|
//A CUDA based implementation of the Smith Waterman Algorithm
//Author: Romil Bhardwaj
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
// #include <fstream>
// #include <cassert>
// #include <cstring>
// #include <string>
// #include <chrono>
// #include<time.h>
// #include <stdio.h>
// #include <stdlib.h>
#define max(a,b) (((a)>(b))?(a):(b))
//Define the costs here
#define indel -1
#define match 2
#define mismatch -1
//CHANGE THIS VALUE TO CHANGE THE NUMBER OF ELEMENTS
const int arraySize = 500;
//CHANGE THIS VALUE TO CHANGE THE NUMBER OF ELEMENTS
cudaError_t SWHelper(int (*c)[arraySize+1], const char *a, const char *b, size_t size);
__global__ void SmithWKernelExpand(int (*c)[arraySize+1], const char *a, const char *b, const int *k) //Declared consts to increase access speed
{
int i = threadIdx.x+1; // i,j,k start from 1
int j = ((*k)-i)+1;
int north=c[i][(j)-1]+indel; //Indel
int west=c[i-1][j]+indel;
int northwest;
if (((int) a[i-1])==((int)b[(j)-1]))
northwest=c[i-1][(j)-1]+match; //Match
else
northwest=c[i-1][(j)-1]+mismatch; //Mismatch
c[i][j] = max(max(north, west),max(northwest,0));
//c[i][j]=(*k); //Debugging - Print the antidiag num
}
__global__ void SmithWKernelShrink(int (*c)[arraySize+1], const char *a, const char *b, const int *k)
{
int i = threadIdx.x+((*k)-arraySize)+1;
int j = ((*k)-i)+1;
int north=c[i][(j)-1]+indel; //Indel
int west=c[i-1][j]+indel;
int northwest;
if (((int) a[i-1])==((int)b[(j)-1]))
northwest=c[i-1][(j)-1]+match; //Match
else
northwest=c[i-1][(j)-1]+mismatch; //Mismatch
c[i][j] = max(max(north, west),max(northwest,0));
//c[i][j]=(*k); //Debugging - Print the antidiag num
}
void print(int c[arraySize+1][arraySize+1]){
int j=0,i=0;
for (i = 0; i < arraySize+1; i++) {
for (j = 0; j < arraySize+1; j++) {
printf("%d \t", c[i][j]);
}
printf("\n");
}
}
void traceback(int c[arraySize+1][arraySize+1], char a[], char b[]){
int j=0,i=0;
int maxi=0,maxj=0,max=0;
for (i = 0; i < arraySize+1; i++) {
for (j = 0; j < arraySize+1; j++) {
if(c[i][j]>max){
maxi=i;
maxj=j;
max=c[i][j];
}
}
}
i=maxi;
j=maxj;
printf("The optimal local alignment starts at index %d for a, and index %d for b.\n", i,j);
while (c[i][j]!=0 && i>=0 && j>=0 ){
printf("\n");
if (c[i][j]==c[i-1][(j)-1]+match){ //From match
i--;
j--;
printf("%c -- %c", a[i], b[j]);
}
else if (c[i][j]==c[i-1][(j)-1]+mismatch){ //From mismatch
i--;
j--;
printf("%c -- %c", a[i], b[j]);
}
else if (c[i][j]==c[i][(j)-1]+indel){ //North
j--;
printf("- -- %c", b[j]);
}
else{ //Else has to be from West
i--;
printf("%c -- -", a[i]);
}
}
printf("\n\nThe optimal local alignment ends at index %d for a, and index %d for b.\n", i,j);
}
// Helper function for SmithWaterman
cudaError_t SWHelper(int (*c)[arraySize+1], const char *a, const char *b, size_t size)
{
char *dev_a;
char *dev_b;
int (*dev_c)[arraySize+1] = {0};
int (*j)=0;
int *dev_j;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, (size+1) * (size+1) * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(char));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(char));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
cudaStatus = cudaMalloc((void**)&dev_j, sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(char), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
cudaStatus = cudaMemcpy(dev_j, &j, sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(char), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
cudaStatus = cudaMemcpy(dev_c, c, (size+1) * (size+1) * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
int i=0;
clock_t start1=clock();
// Launch a kernel on the GPU with one thread for each element.
//Expanding Phase
for (i=1; i<size+1; i++){ // fred: the dev_j is the rank_dia
cudaStatus = cudaMemcpy(dev_j, &i, sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!", cudaStatus);
// goto Error;
// }
SmithWKernelExpand<<<1, i>>>(dev_c, dev_a, dev_b, dev_j);
}
//Shrink Phase
for (int k=size-1; k>0; k--, i++){
cudaStatus = cudaMemcpy(dev_j, &i, sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
SmithWKernelShrink<<<1, k>>>(dev_c, dev_a, dev_b, dev_j);
}
clock_t end1=clock();
printf("\n\nKernel Time taken is %f seconds\n",(double)(end1-start1)/CLOCKS_PER_SEC);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching SmithWKernel!\n", cudaStatus);
// goto Error;
// }
// Copy output vector from GPU buffer to host memory.
//cudaStatus = cudaMemcpy2D(c,size * size * sizeof(int),dev_c,size * size * sizeof(int),size * size * sizeof(int),size * size * sizeof(int),cudaMemcpyDeviceToHost);
cudaStatus = cudaMemcpy(c, dev_c, (size+1) * (size+1) * sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
int main(int argc, char **argv) {
char b[arraySize] = {'a','c','a','c','a','c','t','a'};
char a[arraySize] = {'a','g','c','a','c','a','c','a'};
int i=0;
//Generating the sequences:
srand (time(NULL));
printf("\nString a is: ");
for(i=0;i<arraySize;i++) {
int gen1=rand()%4;
switch(gen1)
{
case 0:a[i]='a';
break;
case 1: a[i]='c';
break;
case 2: a[i]='g';
break;
case 3: a[i]='t';
}
//a[i]='a';
printf("%c ", a[i]);
}
printf("\nString b is: ");
for(i=0;i<arraySize;i++) {
int gen1=rand()%4;
switch(gen1)
{
case 0:b[i]='a';
break;
case 1: b[i]='c';
break;
case 2: b[i]='g';
break;
case 3: b[i]='t';
}
//b[i]='a';
printf("%c ", b[i]);
}
printf("\nOkay, generated the string \n");
int c[arraySize+1][arraySize+1] = { {0} };
clock_t start=clock();
// Run the SW Helper function
cudaError_t cudaStatus = SWHelper(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "SWHelper failed!");
return 1;
}
clock_t end=clock();
//Printing the final score matrix. Uncomment this to see the matrix.
//print(c);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
traceback (c,a,b);
printf("\n\nEnter any number to exit.");
printf("\n\nTotal time taken is %f seconds\n",(double)(end-start)/CLOCKS_PER_SEC);
int x;
scanf("%d", &x);
return 0;
}
|
7566d6dec6226cbac3a7bf55803b4f7c10926ab5.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @brief
* ragged
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <hipcub/hipcub.hpp>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
namespace {
// will be used in RaggedShape::MaxSize(int32_t axis) to call
// hipcub::DeviceReduce::Max
struct RowSplitsDiff {
const int32_t *row_splits_data;
explicit RowSplitsDiff(const int32_t *row_splits)
: row_splits_data(row_splits) {}
// operator[] and operator+ are required by hipcub::DeviceReduce::Max
__device__ int32_t operator[](int32_t i) const {
return row_splits_data[i + 1] - row_splits_data[i];
}
__device__ RowSplitsDiff operator+(int32_t n) const {
RowSplitsDiff tmp(*this);
tmp.row_splits_data += n;
return tmp;
}
};
} // namespace
namespace std {
// vaule_type is required by hipcub::DeviceReduce::Max
template <>
struct iterator_traits<::RowSplitsDiff> {
typedef int32_t value_type;
};
} // namespace std
namespace k2 {
// Recursive function that prints (part of) a ragged shape.
// 0 <= begin_pos <= end_pos < shape.TotSize(axis).
void PrintRaggedShapePart(std::ostream &stream, const RaggedShape &shape,
int32_t axis, int32_t begin_pos, int32_t end_pos) {
K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 &&
begin_pos <= end_pos && end_pos <= shape.TotSize(axis));
for (int32_t d = begin_pos; d < end_pos; ++d) {
if (axis == shape.NumAxes() - 1) {
stream << "x ";
} else {
stream << "[ ";
const int32_t *row_splits = shape.RowSplits(axis + 1).Data();
K2_DCHECK(d < shape.RowSplits(axis + 1).Dim());
int32_t row_start = row_splits[d], row_end = row_splits[d + 1];
PrintRaggedShapePart(stream, shape, axis + 1, row_start, row_end);
stream << "] ";
}
}
}
// prints a RaggedShape as e.g. [ [ 0 1 ] [ 2 ] [] ]. Note, the 'values'
// are just the positions in the array, this is for readability.
std::ostream &operator<<(std::ostream &stream, const RaggedShape &shape) {
if (shape.Context()->GetDeviceType() != kCpu) {
return stream << shape.To(GetCpuContext());
} else {
bool print_warnings = false;
if (shape.Validate(print_warnings)) {
stream << "[ ";
PrintRaggedShapePart(stream, shape, 0, 0, shape.Dim0());
stream << "]";
return stream;
} else {
// For non-valid shapes, print the raw info.
stream << "Invalid RaggedShape: { ";
stream << " num-axes = " << shape.NumAxes();
for (int32_t i = 1; i < shape.NumAxes(); i++) {
const RaggedShapeDim &axis = shape.Axes()[i-1];
if (axis.row_splits.IsValid())
stream << " RowSplits(" << i << ")=" << axis.row_splits;
if (axis.row_ids.IsValid())
stream << "RowIds(" << i << ")=" << axis.row_ids;
stream << "cached_tot_size[" << i << "]=" << axis.cached_tot_size;
}
return stream << " }";
}
}
}
Array1<int32_t> &RaggedShape::RowIds(int32_t axis) {
NVTX_RANGE("RaggedShape::RowIds()");
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
RaggedShapeDim &rsd = axes_[axis - 1];
auto &row_splits = rsd.row_splits;
auto &row_ids = rsd.row_ids;
// there must be row_splits.Dim() >=1 according to the definition of
// RaggedShapeDim.
K2_CHECK_GE(row_splits.Dim(), 1);
if (!row_ids.IsValid()) {
if (rsd.cached_tot_size < 0)
rsd.cached_tot_size = row_splits[row_splits.Dim() - 1];
// create row_ids as it does not exist
row_ids = Array1<int32_t>(Context(), rsd.cached_tot_size);
const int32_t *row_splits_data = row_splits.Data();
int32_t *row_ids_data = row_ids.Data();
RowSplitsToRowIds(Context(), row_splits.Dim() - 1, row_splits_data,
row_ids.Dim(), row_ids_data);
}
return row_ids;
}
int32_t RaggedShape::MaxSize(int32_t axis) {
NVTX_RANGE(__func__);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
const auto &row_splits = axes_[axis - 1].row_splits;
const int32_t num_rows = row_splits.Dim() - 1;
if (num_rows == 0) return 0;
const int32_t *row_splits_data = row_splits.Data();
ContextPtr c = Context();
if (c->GetDeviceType() == kCpu) {
int32_t max_value = 0;
for (int32_t i = 0; i < num_rows; ++i) {
int32_t value = row_splits_data[i + 1] - row_splits_data[i];
if (value > max_value) max_value = value;
}
return max_value;
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
::RowSplitsDiff row_splits_diff(row_splits_data);
Array1<int32_t> max_array(Context(), 1, 0);
int32_t *max_value = max_array.Data();
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// the first time is to determine temporary device storage requirements
K2_CUDA_SAFE_CALL(hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
void *deleter_context;
d_temp_storage = c->Allocate(temp_storage_bytes, &deleter_context);
K2_CUDA_SAFE_CALL(hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
c->Deallocate(d_temp_storage, deleter_context);
// this will convert to memory on CPU
return max_array[0];
}
}
RaggedShape RaggedShape::Index(int32_t axis, int32_t i,
int32_t *value_offset /*= nullptr*/) {
NVTX_RANGE(__func__);
// only support `axis == 0` for now
K2_CHECK_EQ(axis, 0);
K2_CHECK_GE(i, 0);
int32_t num_axes = NumAxes();
K2_CHECK_GE(num_axes, 2);
const auto &src_axes = Axes();
K2_CHECK_LT(i + 1, src_axes[0].row_splits.Dim());
if (i == 0 && Dim0() == 1) {
// Just remove first axis. Common case so we make it efficient.
std::vector<RaggedShapeDim> ans_axes(src_axes.begin() + 1, src_axes.end());
if (value_offset) *value_offset = 0;
return RaggedShape(ans_axes, false);
}
int32_t idx_begin = (i != 0 ? src_axes[0].row_splits[i] : 0),
idx_end = src_axes[0].row_splits[i + 1];
std::vector<RaggedShapeDim> axes(src_axes.size() - 1);
ContextPtr c = Context();
for (int32_t i = 2; i < num_axes; ++i) {
const Array1<int32_t> &src_row_splits = RowSplits(i),
&src_row_ids = RowIds(i);
int32_t idx_begin_next = (idx_begin != 0 ? src_row_splits[idx_begin] : 0),
idx_end_next = src_row_splits[idx_end];
axes[i - 2].row_splits =
src_row_splits.Range(idx_begin, idx_end - idx_begin + 1);
if (idx_begin_next != 0)
axes[i - 2].row_splits = Minus(axes[i - 2].row_splits, idx_begin_next);
axes[i - 2].row_ids =
src_row_ids.Range(idx_begin_next, idx_end_next - idx_begin_next);
if (idx_begin != 0)
axes[i - 2].row_ids = Minus(axes[i - 2].row_ids, idx_begin);
axes[i - 2].cached_tot_size = idx_end_next - idx_begin_next;
idx_begin = idx_begin_next;
idx_end = idx_end_next;
}
if (value_offset) *value_offset = idx_begin;
return RaggedShape(axes);
}
void RaggedShape::Populate() {
NVTX_RANGE(__func__);
int32_t num_axes = NumAxes();
ParallelRunner pr(this->Context());
for (int32_t i = 1; i < num_axes; ++i) {
With w(pr.NewStream());
// ignore return values of the following calls.
this->TotSize(i);
this->RowIds(i);
}
}
RaggedShape RaggedShape::To(ContextPtr ctx) const {
NVTX_RANGE(__func__);
if (ctx->IsCompatible(*Context())) return *this;
std::vector<RaggedShapeDim> axes(axes_.size());
int32_t num_axes = NumAxes();
for (int32_t i = 1; i < num_axes; ++i) {
axes[i - 1].row_splits = axes_[i - 1].row_splits.To(ctx);
// leave row_ids and cached_tot_size unset
axes[i - 1].cached_tot_size = -1;
}
return RaggedShape(axes);
}
RaggedShapeIndexIterator RaggedShape::Iterator() {
return RaggedShapeIndexIterator(*this);
}
int32_t RaggedShape::operator[](const std::vector<int32_t> &indexes) {
NVTX_RANGE("RaggedShape::op[](std::vector<int32>)");
K2_CHECK_EQ(static_cast<int32_t>(indexes.size()), NumAxes());
K2_CHECK_EQ(Context()->GetDeviceType(), kCpu);
int32_t cur_idx = indexes[0];
for (int32_t i = 1; i < NumAxes(); i++) {
Array1<int32_t> &row_splits = axes_[i - 1].row_splits;
K2_CHECK(cur_idx >= 0 && cur_idx + 1 < row_splits.Dim());
cur_idx = row_splits[cur_idx];
cur_idx += indexes[i];
}
return cur_idx;
}
int32_t RaggedShape::TotSize(int32_t axis) const {
NVTX_RANGE("RaggedShape::TotSize");
K2_CHECK_GE(axis, 0);
K2_CHECK_LT(axis, NumAxes());
if (axis == 0)
return Dim0();
else {
const RaggedShapeDim &rsd = axes_[axis - 1];
if (rsd.cached_tot_size >= 0) {
return rsd.cached_tot_size;
} else {
// if we had row_ids set up, we should have set cached_tot_size.
K2_CHECK_EQ(rsd.row_ids.Dim(), 0);
K2_CHECK_GT(rsd.row_splits.Dim(), 0);
const_cast<RaggedShapeDim &>(rsd).cached_tot_size = rsd.row_splits.Back();
return rsd.cached_tot_size;
}
}
}
// TODO(dan): change this so that on error it prints a warning if
// print_warnings==true, and then returns false.
bool RaggedShape::Validate(bool print_warnings) const {
NVTX_RANGE("RaggedShape::Validate");
ContextPtr c = Context();
int32_t num_axes = axes_.size();
ParallelRunner pr(c);
for (int32_t axis = 0; axis < num_axes; ++axis) {
With w(pr.NewStream());
const RaggedShapeDim &rsd = axes_[axis];
K2_CHECK_GE(rsd.row_splits.Dim(), 0);
if (rsd.cached_tot_size >= 0) {
if (!(rsd.row_splits.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_splits.Back())) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, row_splits.Back()="
<< rsd.row_splits.Back() << " vs. cached-tot-size="
<< rsd.cached_tot_size;
return false;
}
if (!((rsd.row_ids.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_ids.Dim()))) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, row_ids.Dim()="
<< rsd.row_ids.Dim() << " vs. cached-tot-size="
<< rsd.cached_tot_size;
return false;
}
} else {
if (rsd.cached_tot_size != -1 || rsd.row_ids.Dim() != 0) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, cached_tot_size="
<< rsd.cached_tot_size << ", row-ids.Dim()=" << rsd.row_ids.Dim();
return false;
}
}
int32_t num_elems;
// Check row_splits.
{
// meta[0] is a bool, ok == 1, not-ok == 0.
// meta[1] will contain the number of row_splits.
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *num_elems_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data();
int32_t num_rows = rsd.row_splits.Dim() - 1;
auto lambda_check_row_splits =
[=] __host__ __device__(int32_t i) -> void {
int32_t this_idx = row_splits_data[i];
if (i == 0 && this_idx != 0) *ok_data = 0;
if (i < num_rows) {
int32_t next_idx = row_splits_data[i + 1];
if (next_idx < this_idx) *ok_data = 0;
} else {
K2_CHECK(i == num_rows);
*num_elems_data = this_idx;
}
};
Eval(c, num_rows + 1, lambda_check_row_splits);
meta = meta.To(GetCpuContext());
num_elems = meta[1];
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-splits: for axes_[" << axis
<< "], row_splits = " << rsd.row_splits;
}
if (rsd.cached_tot_size > 0 && rsd.cached_tot_size != num_elems) {
K2_LOG(FATAL) << "Problem validating row-splits: for axes_[" << axis
<< "], row_splits[-1] = " << num_elems
<< " but cached_tot_size == " << rsd.cached_tot_size;
}
}
if (axis + 1 < num_axes) {
int32_t next_num_rows = axes_[axis + 1].row_splits.Dim() - 1;
if (num_elems != next_num_rows) {
K2_LOG(FATAL) << "Ragged shape has num_elems for axes_[" << axis
<< "] == " << num_elems << " and num-rows for axes_["
<< (axis + 1) << "] == " << next_num_rows;
}
}
if (rsd.row_ids.Dim() != 0) { // check row_ids.
K2_CHECK(IsCompatible(rsd.row_ids, rsd.row_splits));
// 1st elem is `ok` (1 or 0); 2nd elem is location of bad index
// into row_splits
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *bad_index_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data(),
*row_ids_data = rsd.row_ids.Data();
int32_t num_elems_from_row_ids = rsd.row_ids.Dim(),
num_rows = rsd.row_splits.Dim() - 1;
K2_CHECK_EQ(num_elems, num_elems_from_row_ids);
auto lambda_check_row_ids = [=] __host__ __device__(int32_t i) -> void {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1]) {
*ok_data = 0;
*bad_index_data = i;
}
};
// TODO: could do this and the other one in separate streams.
Eval(c, num_elems, lambda_check_row_ids);
meta = meta.To(GetCpuContext()); // since we have 2 accesses, this should
// be faster.
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-ids: for axes_[" << axis
<< "], row_splits = " << rsd.row_splits
<< ", row_ids = " << rsd.row_ids << ", see index "
<< meta[1] << " of row_ids, whose dim is "
<< rsd.row_ids.Dim();
}
}
if (axis + 1 < axes_.size()) {
K2_CHECK(IsCompatible(rsd.row_splits, axes_[axis + 1].row_splits));
}
}
return true;
}
bool Equal(RaggedShape &a, RaggedShape &b) {
NVTX_RANGE("Equal(RaggedShape)");
if (a.NumAxes() != b.NumAxes()) return false;
for (int32_t i = 1; i < a.NumAxes(); i++) {
if (a.RowSplits(i).Dim() != b.RowSplits(i).Dim() ||
!Equal(a.RowSplits(i), b.RowSplits(i)))
return false;
}
return true;
}
std::istream &operator>>(std::istream &is,
RaggedShape &shape) {
NVTX_RANGE("operator>>(RaggedShape)");
// Note: element 0 of 'row_splits' will end up being
// discarded; the others will become the axes of `shape`.
std::vector<std::vector<int32_t> > row_splits;
int32_t cur_level = 0,
num_elems = 0;
while (1) {
is >> std::ws; // eat whitespace
if (!is.good()) {
is.setstate(std::ios::failbit);
return is;
}
int c = is.get();
if (c == static_cast<int32_t>('[')) {
cur_level++;
while (row_splits.size() < static_cast<size_t>(cur_level)) {
if (num_elems != 0) {
is.setstate(std::ios::failbit);
return is;
}
row_splits.push_back(std::vector<int32_t>(1, 0));
}
} else if (c == static_cast<int32_t>(']')) {
cur_level--;
if (cur_level <= 0) { // Done; return...
if (cur_level < 0) { // ']' without '['.
is.setstate(std::ios::failbit);
return is;
}
row_splits.erase(row_splits.begin());
if (row_splits.empty()) {
// Assume 2 axes even though the num-axes is ambiguous from the input.
// row_splits is 0 0.
row_splits.push_back(std::vector<int32_t>(1, 0));
}
std::vector<RaggedShapeDim> axes(row_splits.size());
for (size_t i = 0; i < row_splits.size(); i++) {
axes[i].row_splits = Array1<int32_t>(GetCpuContext(), row_splits[i]);
axes[i].cached_tot_size = -1;
}
shape = RaggedShape(axes);
return is;
}
row_splits[cur_level].push_back(
(cur_level + 1 >= row_splits.size()) ?
num_elems : (row_splits[cur_level+1].size() - 1));
} else if (c == static_cast<int32_t>('x')) {
if (cur_level != static_cast<int32_t>(row_splits.size()) ||
cur_level < 2) {
is.setstate(std::ios::failbit);
return is;
}
num_elems++;
} else {
is.setstate(std::ios::failbit);
return is;
}
}
}
} // namespace k2
|
7566d6dec6226cbac3a7bf55803b4f7c10926ab5.cu
|
/**
* @brief
* ragged
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <cub/cub.cuh>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
namespace {
// will be used in RaggedShape::MaxSize(int32_t axis) to call
// cub::DeviceReduce::Max
struct RowSplitsDiff {
const int32_t *row_splits_data;
explicit RowSplitsDiff(const int32_t *row_splits)
: row_splits_data(row_splits) {}
// operator[] and operator+ are required by cub::DeviceReduce::Max
__device__ int32_t operator[](int32_t i) const {
return row_splits_data[i + 1] - row_splits_data[i];
}
__device__ RowSplitsDiff operator+(int32_t n) const {
RowSplitsDiff tmp(*this);
tmp.row_splits_data += n;
return tmp;
}
};
} // namespace
namespace std {
// vaule_type is required by cub::DeviceReduce::Max
template <>
struct iterator_traits<::RowSplitsDiff> {
typedef int32_t value_type;
};
} // namespace std
namespace k2 {
// Recursive function that prints (part of) a ragged shape.
// 0 <= begin_pos <= end_pos < shape.TotSize(axis).
void PrintRaggedShapePart(std::ostream &stream, const RaggedShape &shape,
int32_t axis, int32_t begin_pos, int32_t end_pos) {
K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 &&
begin_pos <= end_pos && end_pos <= shape.TotSize(axis));
for (int32_t d = begin_pos; d < end_pos; ++d) {
if (axis == shape.NumAxes() - 1) {
stream << "x ";
} else {
stream << "[ ";
const int32_t *row_splits = shape.RowSplits(axis + 1).Data();
K2_DCHECK(d < shape.RowSplits(axis + 1).Dim());
int32_t row_start = row_splits[d], row_end = row_splits[d + 1];
PrintRaggedShapePart(stream, shape, axis + 1, row_start, row_end);
stream << "] ";
}
}
}
// prints a RaggedShape as e.g. [ [ 0 1 ] [ 2 ] [] ]. Note, the 'values'
// are just the positions in the array, this is for readability.
std::ostream &operator<<(std::ostream &stream, const RaggedShape &shape) {
if (shape.Context()->GetDeviceType() != kCpu) {
return stream << shape.To(GetCpuContext());
} else {
bool print_warnings = false;
if (shape.Validate(print_warnings)) {
stream << "[ ";
PrintRaggedShapePart(stream, shape, 0, 0, shape.Dim0());
stream << "]";
return stream;
} else {
// For non-valid shapes, print the raw info.
stream << "Invalid RaggedShape: { ";
stream << " num-axes = " << shape.NumAxes();
for (int32_t i = 1; i < shape.NumAxes(); i++) {
const RaggedShapeDim &axis = shape.Axes()[i-1];
if (axis.row_splits.IsValid())
stream << " RowSplits(" << i << ")=" << axis.row_splits;
if (axis.row_ids.IsValid())
stream << "RowIds(" << i << ")=" << axis.row_ids;
stream << "cached_tot_size[" << i << "]=" << axis.cached_tot_size;
}
return stream << " }";
}
}
}
Array1<int32_t> &RaggedShape::RowIds(int32_t axis) {
NVTX_RANGE("RaggedShape::RowIds()");
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
RaggedShapeDim &rsd = axes_[axis - 1];
auto &row_splits = rsd.row_splits;
auto &row_ids = rsd.row_ids;
// there must be row_splits.Dim() >=1 according to the definition of
// RaggedShapeDim.
K2_CHECK_GE(row_splits.Dim(), 1);
if (!row_ids.IsValid()) {
if (rsd.cached_tot_size < 0)
rsd.cached_tot_size = row_splits[row_splits.Dim() - 1];
// create row_ids as it does not exist
row_ids = Array1<int32_t>(Context(), rsd.cached_tot_size);
const int32_t *row_splits_data = row_splits.Data();
int32_t *row_ids_data = row_ids.Data();
RowSplitsToRowIds(Context(), row_splits.Dim() - 1, row_splits_data,
row_ids.Dim(), row_ids_data);
}
return row_ids;
}
int32_t RaggedShape::MaxSize(int32_t axis) {
NVTX_RANGE(__func__);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
const auto &row_splits = axes_[axis - 1].row_splits;
const int32_t num_rows = row_splits.Dim() - 1;
if (num_rows == 0) return 0;
const int32_t *row_splits_data = row_splits.Data();
ContextPtr c = Context();
if (c->GetDeviceType() == kCpu) {
int32_t max_value = 0;
for (int32_t i = 0; i < num_rows; ++i) {
int32_t value = row_splits_data[i + 1] - row_splits_data[i];
if (value > max_value) max_value = value;
}
return max_value;
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
::RowSplitsDiff row_splits_diff(row_splits_data);
Array1<int32_t> max_array(Context(), 1, 0);
int32_t *max_value = max_array.Data();
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// the first time is to determine temporary device storage requirements
K2_CUDA_SAFE_CALL(cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
void *deleter_context;
d_temp_storage = c->Allocate(temp_storage_bytes, &deleter_context);
K2_CUDA_SAFE_CALL(cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
c->Deallocate(d_temp_storage, deleter_context);
// this will convert to memory on CPU
return max_array[0];
}
}
RaggedShape RaggedShape::Index(int32_t axis, int32_t i,
int32_t *value_offset /*= nullptr*/) {
NVTX_RANGE(__func__);
// only support `axis == 0` for now
K2_CHECK_EQ(axis, 0);
K2_CHECK_GE(i, 0);
int32_t num_axes = NumAxes();
K2_CHECK_GE(num_axes, 2);
const auto &src_axes = Axes();
K2_CHECK_LT(i + 1, src_axes[0].row_splits.Dim());
if (i == 0 && Dim0() == 1) {
// Just remove first axis. Common case so we make it efficient.
std::vector<RaggedShapeDim> ans_axes(src_axes.begin() + 1, src_axes.end());
if (value_offset) *value_offset = 0;
return RaggedShape(ans_axes, false);
}
int32_t idx_begin = (i != 0 ? src_axes[0].row_splits[i] : 0),
idx_end = src_axes[0].row_splits[i + 1];
std::vector<RaggedShapeDim> axes(src_axes.size() - 1);
ContextPtr c = Context();
for (int32_t i = 2; i < num_axes; ++i) {
const Array1<int32_t> &src_row_splits = RowSplits(i),
&src_row_ids = RowIds(i);
int32_t idx_begin_next = (idx_begin != 0 ? src_row_splits[idx_begin] : 0),
idx_end_next = src_row_splits[idx_end];
axes[i - 2].row_splits =
src_row_splits.Range(idx_begin, idx_end - idx_begin + 1);
if (idx_begin_next != 0)
axes[i - 2].row_splits = Minus(axes[i - 2].row_splits, idx_begin_next);
axes[i - 2].row_ids =
src_row_ids.Range(idx_begin_next, idx_end_next - idx_begin_next);
if (idx_begin != 0)
axes[i - 2].row_ids = Minus(axes[i - 2].row_ids, idx_begin);
axes[i - 2].cached_tot_size = idx_end_next - idx_begin_next;
idx_begin = idx_begin_next;
idx_end = idx_end_next;
}
if (value_offset) *value_offset = idx_begin;
return RaggedShape(axes);
}
void RaggedShape::Populate() {
NVTX_RANGE(__func__);
int32_t num_axes = NumAxes();
ParallelRunner pr(this->Context());
for (int32_t i = 1; i < num_axes; ++i) {
With w(pr.NewStream());
// ignore return values of the following calls.
this->TotSize(i);
this->RowIds(i);
}
}
RaggedShape RaggedShape::To(ContextPtr ctx) const {
NVTX_RANGE(__func__);
if (ctx->IsCompatible(*Context())) return *this;
std::vector<RaggedShapeDim> axes(axes_.size());
int32_t num_axes = NumAxes();
for (int32_t i = 1; i < num_axes; ++i) {
axes[i - 1].row_splits = axes_[i - 1].row_splits.To(ctx);
// leave row_ids and cached_tot_size unset
axes[i - 1].cached_tot_size = -1;
}
return RaggedShape(axes);
}
RaggedShapeIndexIterator RaggedShape::Iterator() {
return RaggedShapeIndexIterator(*this);
}
int32_t RaggedShape::operator[](const std::vector<int32_t> &indexes) {
NVTX_RANGE("RaggedShape::op[](std::vector<int32>)");
K2_CHECK_EQ(static_cast<int32_t>(indexes.size()), NumAxes());
K2_CHECK_EQ(Context()->GetDeviceType(), kCpu);
int32_t cur_idx = indexes[0];
for (int32_t i = 1; i < NumAxes(); i++) {
Array1<int32_t> &row_splits = axes_[i - 1].row_splits;
K2_CHECK(cur_idx >= 0 && cur_idx + 1 < row_splits.Dim());
cur_idx = row_splits[cur_idx];
cur_idx += indexes[i];
}
return cur_idx;
}
int32_t RaggedShape::TotSize(int32_t axis) const {
NVTX_RANGE("RaggedShape::TotSize");
K2_CHECK_GE(axis, 0);
K2_CHECK_LT(axis, NumAxes());
if (axis == 0)
return Dim0();
else {
const RaggedShapeDim &rsd = axes_[axis - 1];
if (rsd.cached_tot_size >= 0) {
return rsd.cached_tot_size;
} else {
// if we had row_ids set up, we should have set cached_tot_size.
K2_CHECK_EQ(rsd.row_ids.Dim(), 0);
K2_CHECK_GT(rsd.row_splits.Dim(), 0);
const_cast<RaggedShapeDim &>(rsd).cached_tot_size = rsd.row_splits.Back();
return rsd.cached_tot_size;
}
}
}
// TODO(dan): change this so that on error it prints a warning if
// print_warnings==true, and then returns false.
bool RaggedShape::Validate(bool print_warnings) const {
NVTX_RANGE("RaggedShape::Validate");
ContextPtr c = Context();
int32_t num_axes = axes_.size();
ParallelRunner pr(c);
for (int32_t axis = 0; axis < num_axes; ++axis) {
With w(pr.NewStream());
const RaggedShapeDim &rsd = axes_[axis];
K2_CHECK_GE(rsd.row_splits.Dim(), 0);
if (rsd.cached_tot_size >= 0) {
if (!(rsd.row_splits.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_splits.Back())) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, row_splits.Back()="
<< rsd.row_splits.Back() << " vs. cached-tot-size="
<< rsd.cached_tot_size;
return false;
}
if (!((rsd.row_ids.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_ids.Dim()))) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, row_ids.Dim()="
<< rsd.row_ids.Dim() << " vs. cached-tot-size="
<< rsd.cached_tot_size;
return false;
}
} else {
if (rsd.cached_tot_size != -1 || rsd.row_ids.Dim() != 0) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, cached_tot_size="
<< rsd.cached_tot_size << ", row-ids.Dim()=" << rsd.row_ids.Dim();
return false;
}
}
int32_t num_elems;
// Check row_splits.
{
// meta[0] is a bool, ok == 1, not-ok == 0.
// meta[1] will contain the number of row_splits.
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *num_elems_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data();
int32_t num_rows = rsd.row_splits.Dim() - 1;
auto lambda_check_row_splits =
[=] __host__ __device__(int32_t i) -> void {
int32_t this_idx = row_splits_data[i];
if (i == 0 && this_idx != 0) *ok_data = 0;
if (i < num_rows) {
int32_t next_idx = row_splits_data[i + 1];
if (next_idx < this_idx) *ok_data = 0;
} else {
K2_CHECK(i == num_rows);
*num_elems_data = this_idx;
}
};
Eval(c, num_rows + 1, lambda_check_row_splits);
meta = meta.To(GetCpuContext());
num_elems = meta[1];
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-splits: for axes_[" << axis
<< "], row_splits = " << rsd.row_splits;
}
if (rsd.cached_tot_size > 0 && rsd.cached_tot_size != num_elems) {
K2_LOG(FATAL) << "Problem validating row-splits: for axes_[" << axis
<< "], row_splits[-1] = " << num_elems
<< " but cached_tot_size == " << rsd.cached_tot_size;
}
}
if (axis + 1 < num_axes) {
int32_t next_num_rows = axes_[axis + 1].row_splits.Dim() - 1;
if (num_elems != next_num_rows) {
K2_LOG(FATAL) << "Ragged shape has num_elems for axes_[" << axis
<< "] == " << num_elems << " and num-rows for axes_["
<< (axis + 1) << "] == " << next_num_rows;
}
}
if (rsd.row_ids.Dim() != 0) { // check row_ids.
K2_CHECK(IsCompatible(rsd.row_ids, rsd.row_splits));
// 1st elem is `ok` (1 or 0); 2nd elem is location of bad index
// into row_splits
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *bad_index_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data(),
*row_ids_data = rsd.row_ids.Data();
int32_t num_elems_from_row_ids = rsd.row_ids.Dim(),
num_rows = rsd.row_splits.Dim() - 1;
K2_CHECK_EQ(num_elems, num_elems_from_row_ids);
auto lambda_check_row_ids = [=] __host__ __device__(int32_t i) -> void {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1]) {
*ok_data = 0;
*bad_index_data = i;
}
};
// TODO: could do this and the other one in separate streams.
Eval(c, num_elems, lambda_check_row_ids);
meta = meta.To(GetCpuContext()); // since we have 2 accesses, this should
// be faster.
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-ids: for axes_[" << axis
<< "], row_splits = " << rsd.row_splits
<< ", row_ids = " << rsd.row_ids << ", see index "
<< meta[1] << " of row_ids, whose dim is "
<< rsd.row_ids.Dim();
}
}
if (axis + 1 < axes_.size()) {
K2_CHECK(IsCompatible(rsd.row_splits, axes_[axis + 1].row_splits));
}
}
return true;
}
bool Equal(RaggedShape &a, RaggedShape &b) {
NVTX_RANGE("Equal(RaggedShape)");
if (a.NumAxes() != b.NumAxes()) return false;
for (int32_t i = 1; i < a.NumAxes(); i++) {
if (a.RowSplits(i).Dim() != b.RowSplits(i).Dim() ||
!Equal(a.RowSplits(i), b.RowSplits(i)))
return false;
}
return true;
}
std::istream &operator>>(std::istream &is,
RaggedShape &shape) {
NVTX_RANGE("operator>>(RaggedShape)");
// Note: element 0 of 'row_splits' will end up being
// discarded; the others will become the axes of `shape`.
std::vector<std::vector<int32_t> > row_splits;
int32_t cur_level = 0,
num_elems = 0;
while (1) {
is >> std::ws; // eat whitespace
if (!is.good()) {
is.setstate(std::ios::failbit);
return is;
}
int c = is.get();
if (c == static_cast<int32_t>('[')) {
cur_level++;
while (row_splits.size() < static_cast<size_t>(cur_level)) {
if (num_elems != 0) {
is.setstate(std::ios::failbit);
return is;
}
row_splits.push_back(std::vector<int32_t>(1, 0));
}
} else if (c == static_cast<int32_t>(']')) {
cur_level--;
if (cur_level <= 0) { // Done; return...
if (cur_level < 0) { // ']' without '['.
is.setstate(std::ios::failbit);
return is;
}
row_splits.erase(row_splits.begin());
if (row_splits.empty()) {
// Assume 2 axes even though the num-axes is ambiguous from the input.
// row_splits is 0 0.
row_splits.push_back(std::vector<int32_t>(1, 0));
}
std::vector<RaggedShapeDim> axes(row_splits.size());
for (size_t i = 0; i < row_splits.size(); i++) {
axes[i].row_splits = Array1<int32_t>(GetCpuContext(), row_splits[i]);
axes[i].cached_tot_size = -1;
}
shape = RaggedShape(axes);
return is;
}
row_splits[cur_level].push_back(
(cur_level + 1 >= row_splits.size()) ?
num_elems : (row_splits[cur_level+1].size() - 1));
} else if (c == static_cast<int32_t>('x')) {
if (cur_level != static_cast<int32_t>(row_splits.size()) ||
cur_level < 2) {
is.setstate(std::ios::failbit);
return is;
}
num_elems++;
} else {
is.setstate(std::ios::failbit);
return is;
}
}
}
} // namespace k2
|
398062345898075b57c8e9a45e69ad62a594a8e6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Architektura procesoru (ACH 2018)
* Projekt c.2 (CUDA)
* Login: xpawlu00
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
__global__ void calculate_gravitation_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
float r;
float4 d;
float4 p1_pos = p.pos[idx];
float4 vel = {0.0f, 0.0f, 0.0f};
for (int i = 0; i < N; i++) {
if (i == idx)
continue;
float4 p2_pos = p.pos[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > COLLISION_DISTANCE) {
float p2_weight = p.vel[i].w;
vel.x += (G * p2_weight) / pow(r, 3) * (d.x) * dt;
vel.y += (G * p2_weight) / pow(r, 3) * (d.y) * dt;
vel.z += (G * p2_weight) / pow(r, 3) * (d.z) * dt;
}
}
tmp_vel.vel[idx] = vel;
}
__global__ void calculate_collision_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
float4 d;
float r;
float4 p1_pos = p.pos[idx];
float4 tmp_vel_local = tmp_vel.vel[idx];
float4 p1_vel = p.vel[idx];
float weight1 = p1_vel.w;
for (int i = 0; i < N; i++) {
if (i == idx)
continue;
float4 p2_pos = p.pos[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
float4 p2_vel = p.vel[i];
float weight2 = p2_vel.w;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > 0.0f && r < COLLISION_DISTANCE) {
float weight = weight1 / weight2;
tmp_vel_local.x += (((weight - 1) * p1_vel.x + 2 * p2_vel.x)/(1 + weight) - p1_vel.x);
tmp_vel_local.y += (((weight - 1) * p1_vel.y + 2 * p2_vel.y)/(1 + weight) - p1_vel.y);
tmp_vel_local.z += (((weight - 1) * p1_vel.z + 2 * p2_vel.z)/(1 + weight) - p1_vel.z);
}
}
tmp_vel.vel[idx] = tmp_vel_local;
}
__global__ void update_particle(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
p.vel[idx].x += tmp_vel.vel[idx].x;
p.vel[idx].y += tmp_vel.vel[idx].y;
p.vel[idx].z += tmp_vel.vel[idx].z;
p.pos[idx].x += p.vel[idx].x * dt;
p.pos[idx].y += p.vel[idx].y * dt;
p.pos[idx].z += p.vel[idx].z * dt;
}
__host__ void particles_read(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++) {
fscanf(fp, "%f %f %f %f %f %f %f \n",
&p.pos[i].x, &p.pos[i].y, &p.pos[i].z,
&p.vel[i].x, &p.vel[i].y, &p.vel[i].z, &p.vel[i].w);
}
}
__host__ void particles_write(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++) {
fprintf(fp, "%f %f %f %f %f %f %f \n",
p.pos[i].x, p.pos[i].y, p.pos[i].z,
p.vel[i].x, p.vel[i].y, p.vel[i].z, p.vel[i].w);
}
}
|
398062345898075b57c8e9a45e69ad62a594a8e6.cu
|
/*
* Architektura procesoru (ACH 2018)
* Projekt c.2 (CUDA)
* Login: xpawlu00
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
__global__ void calculate_gravitation_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
float r;
float4 d;
float4 p1_pos = p.pos[idx];
float4 vel = {0.0f, 0.0f, 0.0f};
for (int i = 0; i < N; i++) {
if (i == idx)
continue;
float4 p2_pos = p.pos[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > COLLISION_DISTANCE) {
float p2_weight = p.vel[i].w;
vel.x += (G * p2_weight) / pow(r, 3) * (d.x) * dt;
vel.y += (G * p2_weight) / pow(r, 3) * (d.y) * dt;
vel.z += (G * p2_weight) / pow(r, 3) * (d.z) * dt;
}
}
tmp_vel.vel[idx] = vel;
}
__global__ void calculate_collision_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
float4 d;
float r;
float4 p1_pos = p.pos[idx];
float4 tmp_vel_local = tmp_vel.vel[idx];
float4 p1_vel = p.vel[idx];
float weight1 = p1_vel.w;
for (int i = 0; i < N; i++) {
if (i == idx)
continue;
float4 p2_pos = p.pos[i];
d.x = p2_pos.x - p1_pos.x;
d.y = p2_pos.y - p1_pos.y;
d.z = p2_pos.z - p1_pos.z;
float4 p2_vel = p.vel[i];
float weight2 = p2_vel.w;
r = sqrt(d.x*d.x + d.y*d.y + d.z*d.z);
if (r > 0.0f && r < COLLISION_DISTANCE) {
float weight = weight1 / weight2;
tmp_vel_local.x += (((weight - 1) * p1_vel.x + 2 * p2_vel.x)/(1 + weight) - p1_vel.x);
tmp_vel_local.y += (((weight - 1) * p1_vel.y + 2 * p2_vel.y)/(1 + weight) - p1_vel.y);
tmp_vel_local.z += (((weight - 1) * p1_vel.z + 2 * p2_vel.z)/(1 + weight) - p1_vel.z);
}
}
tmp_vel.vel[idx] = tmp_vel_local;
}
__global__ void update_particle(t_particles p, t_velocities tmp_vel, int N, float dt)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N)
return;
p.vel[idx].x += tmp_vel.vel[idx].x;
p.vel[idx].y += tmp_vel.vel[idx].y;
p.vel[idx].z += tmp_vel.vel[idx].z;
p.pos[idx].x += p.vel[idx].x * dt;
p.pos[idx].y += p.vel[idx].y * dt;
p.pos[idx].z += p.vel[idx].z * dt;
}
__host__ void particles_read(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++) {
fscanf(fp, "%f %f %f %f %f %f %f \n",
&p.pos[i].x, &p.pos[i].y, &p.pos[i].z,
&p.vel[i].x, &p.vel[i].y, &p.vel[i].z, &p.vel[i].w);
}
}
__host__ void particles_write(FILE *fp, t_particles &p, int N)
{
for (int i = 0; i < N; i++) {
fprintf(fp, "%f %f %f %f %f %f %f \n",
p.pos[i].x, p.pos[i].y, p.pos[i].z,
p.vel[i].x, p.vel[i].y, p.vel[i].z, p.vel[i].w);
}
}
|
0d8b885ca8fb50e62477512a5d6d6a893b1237b9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../matrice.h"
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#define DIM_PORTION 32
#define LIGNES_BLOC 8
// Code GPU
__global__ void transpose_device(const float *input, float *output, int n)
{
__shared__ float matrice_shared[DIM_PORTION][DIM_PORTION];
int x_matrice = blockIdx.x * blockDim.x + threadIdx.x;
int y_matrice = blockIdx.y * blockDim.y + threadIdx.y;
for (int j = 0; j < DIM_PORTION; j += LIGNES_BLOC)
{
if (x_matrice < n && y_matrice < n)
{
matrice_shared[threadIdx.x][threadIdx.y] = input[(j + y_matrice) * n + x_matrice];
}
__syncthreads();
if (x_matrice < n && y_matrice < n)
{
output[(y_matrice + j) * n + x_matrice] = matrice_shared[threadIdx.y][threadIdx.x];
}
}
}
int main(int argc, char **argv)
{
int n(0);
bool affiche(false);
user_input(affiche,n,argc,argv);
size_t size = n * n * sizeof(float);
// Matrices CPU
float *h_A = nullptr, *h_B = nullptr;
// Matrices GPU
float *d_A = nullptr, *d_B = nullptr;
// Allocatation des vecteurs dans la mmoire CPU
h_A = new float[n * n];
h_B = new float[n * n];
// Allocation des vecteurs dans la mmoire GPU
checkCudaErrors(hipMalloc((void **)&d_A, size));
checkCudaErrors(hipMalloc((void **)&d_B, size));
// Initialisation de la matrice A
srand(time(NULL));
genmat(h_A, n);
// Copie de la matrice A dans la mmoire GPU
checkCudaErrors(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice));
// Definition de la taille des blocs et de la grille
dim3 threadsPerBlock(DIM_PORTION, LIGNES_BLOC);
dim3 numBlocks(ceil(n / (float)threadsPerBlock.x), ceil(n / (float)threadsPerBlock.x));
std::cout << "bx: " << numBlocks.x << " by: " << numBlocks.y << "\n";
hipLaunchKernelGGL(( transpose_device), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, n);
checkCudaErrors(hipPeekAtLastError());
checkCudaErrors(hipDeviceSynchronize());
// Copie du rsultat
checkCudaErrors(hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost));
printf("Erreur max: %e\n", verify(h_A, h_B, n));
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
const int nb = 10;
checkCudaErrors(hipEventRecord(start, 0));
for (int i = 0; i < nb; i++)
hipLaunchKernelGGL(( transpose_device), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, n);
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
float t_ms;
checkCudaErrors(hipEventElapsedTime(&t_ms, start, stop));
t_ms /= nb;
t_ms /= 1000;
float octets_echanges(2 * size / pow(10, 9));
affichage_resultats_du_kernel(h_A, h_B, n, t_ms, octets_echanges, affiche);
free_gpu(d_A);
free_gpu(d_B);
// Deallocation de la memoire CPU
free_cpu(h_A);
free_cpu(h_B);
return 0;
}
|
0d8b885ca8fb50e62477512a5d6d6a893b1237b9.cu
|
#include "../matrice.h"
#include <cuda_runtime.h>
#include <helper_cuda.h>
#define DIM_PORTION 32
#define LIGNES_BLOC 8
// Code GPU
__global__ void transpose_device(const float *input, float *output, int n)
{
__shared__ float matrice_shared[DIM_PORTION][DIM_PORTION];
int x_matrice = blockIdx.x * blockDim.x + threadIdx.x;
int y_matrice = blockIdx.y * blockDim.y + threadIdx.y;
for (int j = 0; j < DIM_PORTION; j += LIGNES_BLOC)
{
if (x_matrice < n && y_matrice < n)
{
matrice_shared[threadIdx.x][threadIdx.y] = input[(j + y_matrice) * n + x_matrice];
}
__syncthreads();
if (x_matrice < n && y_matrice < n)
{
output[(y_matrice + j) * n + x_matrice] = matrice_shared[threadIdx.y][threadIdx.x];
}
}
}
int main(int argc, char **argv)
{
int n(0);
bool affiche(false);
user_input(affiche,n,argc,argv);
size_t size = n * n * sizeof(float);
// Matrices CPU
float *h_A = nullptr, *h_B = nullptr;
// Matrices GPU
float *d_A = nullptr, *d_B = nullptr;
// Allocatation des vecteurs dans la mémoire CPU
h_A = new float[n * n];
h_B = new float[n * n];
// Allocation des vecteurs dans la mémoire GPU
checkCudaErrors(cudaMalloc((void **)&d_A, size));
checkCudaErrors(cudaMalloc((void **)&d_B, size));
// Initialisation de la matrice A
srand(time(NULL));
genmat(h_A, n);
// Copie de la matrice A dans la mémoire GPU
checkCudaErrors(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
// Definition de la taille des blocs et de la grille
dim3 threadsPerBlock(DIM_PORTION, LIGNES_BLOC);
dim3 numBlocks(ceil(n / (float)threadsPerBlock.x), ceil(n / (float)threadsPerBlock.x));
std::cout << "bx: " << numBlocks.x << " by: " << numBlocks.y << "\n";
transpose_device<<<numBlocks, threadsPerBlock>>>(d_A, d_B, n);
checkCudaErrors(cudaPeekAtLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Copie du résultat
checkCudaErrors(cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost));
printf("Erreur max: %e\n", verify(h_A, h_B, n));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
const int nb = 10;
checkCudaErrors(cudaEventRecord(start, 0));
for (int i = 0; i < nb; i++)
transpose_device<<<numBlocks, threadsPerBlock>>>(d_A, d_B, n);
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
float t_ms;
checkCudaErrors(cudaEventElapsedTime(&t_ms, start, stop));
t_ms /= nb;
t_ms /= 1000;
float octets_echanges(2 * size / pow(10, 9));
affichage_resultats_du_kernel(h_A, h_B, n, t_ms, octets_echanges, affiche);
free_gpu(d_A);
free_gpu(d_B);
// Deallocation de la memoire CPU
free_cpu(h_A);
free_cpu(h_B);
return 0;
}
|
5b5ec3add1adf0c1c780c05a64dae753704eb245.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#include <cutil_inline.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#endif
#include <cuda_gl_interop.h>
#include "particles_kernel.cu"
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("exiting...\n");
cutilExit(argc, argv);
exit(0);
}
} else {
devID = cutGetMaxGflopsDeviceId();
hipSetDevice( devID );
}
}
void cudaGLInit(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
cutilDeviceInit(argc, argv);
} else {
hipGLSetGLDevice( cutGetMaxGflopsDeviceId() );
}
}
void allocateArray(void **devPtr, size_t size)
{
cutilSafeCall(hipMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
cutilSafeCall(hipFree(devPtr));
}
void threadSync()
{
cutilSafeCall(hipDeviceSynchronize());
}
void copyArrayToDevice(void* device, const void* host, int offset, int size)
{
cutilSafeCall(hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice));
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
cutilSafeCall(hipGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
hipGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cutilSafeCall(hipGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
cutilSafeCall(hipGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
cutilSafeCall(hipGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cutilSafeCall(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void* host, const void* device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
device = mapGLBufferObject(cuda_vbo_resource);
cutilSafeCall(hipMemcpy(host, device, size, hipMemcpyDeviceToHost));
if (cuda_vbo_resource)
unmapGLBufferObject(*cuda_vbo_resource);
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
cutilSafeCall( hipMemcpyToSymbol(params, hostParams, sizeof(SimParams)) );
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void integrateSystem(float *pos,
float *vel,
float deltaTime,
uint numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( integrate), dim3(numBlocks), dim3(numThreads) , 0, 0, (float4*)pos,
(float4*)vel,
deltaTime,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("integrate kernel execution failed");
}
void calcHash(uint* gridParticleHash,
uint* gridParticleIndex,
float* pos,
int numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads) , 0, 0, gridParticleHash,
gridParticleIndex,
(float4 *) pos,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
}
void reorderDataAndFindCellStart(uint* cellStart,
uint* cellEnd,
float* sortedPos,
float* sortedVel,
uint* gridParticleHash,
uint* gridParticleIndex,
float* oldPos,
float* oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
cutilSafeCall(hipMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
#if USE_TEX
cutilSafeCall(hipBindTexture(0, oldPosTex, oldPos, numParticles*sizeof(float4)));
cutilSafeCall(hipBindTexture(0, oldVelTex, oldVel, numParticles*sizeof(float4)));
#endif
uint smemSize = sizeof(uint)*(numThreads+1);
hipLaunchKernelGGL(( reorderDataAndFindCellStartD), dim3(numBlocks), dim3(numThreads), smemSize, 0,
cellStart,
cellEnd,
(float4 *) sortedPos,
(float4 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float4 *) oldPos,
(float4 *) oldVel,
numParticles);
cutilCheckMsg("Kernel execution failed: reorderDataAndFindCellStartD");
#if USE_TEX
cutilSafeCall(hipUnbindTexture(oldPosTex));
cutilSafeCall(hipUnbindTexture(oldVelTex));
#endif
}
void collide(float* newVel,
float* sortedPos,
float* sortedVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles,
uint numCells)
{
#if USE_TEX
cutilSafeCall(hipBindTexture(0, oldPosTex, sortedPos, numParticles*sizeof(float4)));
cutilSafeCall(hipBindTexture(0, oldVelTex, sortedVel, numParticles*sizeof(float4)));
cutilSafeCall(hipBindTexture(0, cellStartTex, cellStart, numCells*sizeof(uint)));
cutilSafeCall(hipBindTexture(0, cellEndTex, cellEnd, numCells*sizeof(uint)));
#endif
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( collideD), dim3(numBlocks), dim3(numThreads) , 0, 0, (float4*)newVel,
(float4*)sortedPos,
(float4*)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
#if USE_TEX
cutilSafeCall(hipUnbindTexture(oldPosTex));
cutilSafeCall(hipUnbindTexture(oldVelTex));
cutilSafeCall(hipUnbindTexture(cellStartTex));
cutilSafeCall(hipUnbindTexture(cellEndTex));
#endif
}
} // extern "C"
|
5b5ec3add1adf0c1c780c05a64dae753704eb245.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#include <cutil_inline.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#endif
#include <cuda_gl_interop.h>
#include "particles_kernel.cu"
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("exiting...\n");
cutilExit(argc, argv);
exit(0);
}
} else {
devID = cutGetMaxGflopsDeviceId();
cudaSetDevice( devID );
}
}
void cudaGLInit(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
cutilDeviceInit(argc, argv);
} else {
cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() );
}
}
void allocateArray(void **devPtr, size_t size)
{
cutilSafeCall(cudaMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
cutilSafeCall(cudaFree(devPtr));
}
void threadSync()
{
cutilSafeCall(cudaThreadSynchronize());
}
void copyArrayToDevice(void* device, const void* host, int offset, int size)
{
cutilSafeCall(cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice));
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
cutilSafeCall(cudaGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
cudaGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cutilSafeCall(cudaGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
cutilSafeCall(cudaGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
cutilSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cutilSafeCall(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void* host, const void* device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
device = mapGLBufferObject(cuda_vbo_resource);
cutilSafeCall(cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost));
if (cuda_vbo_resource)
unmapGLBufferObject(*cuda_vbo_resource);
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
cutilSafeCall( cudaMemcpyToSymbol(params, hostParams, sizeof(SimParams)) );
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void integrateSystem(float *pos,
float *vel,
float deltaTime,
uint numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
integrate<<< numBlocks, numThreads >>>((float4*)pos,
(float4*)vel,
deltaTime,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("integrate kernel execution failed");
}
void calcHash(uint* gridParticleHash,
uint* gridParticleIndex,
float* pos,
int numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
calcHashD<<< numBlocks, numThreads >>>(gridParticleHash,
gridParticleIndex,
(float4 *) pos,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
}
void reorderDataAndFindCellStart(uint* cellStart,
uint* cellEnd,
float* sortedPos,
float* sortedVel,
uint* gridParticleHash,
uint* gridParticleIndex,
float* oldPos,
float* oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
cutilSafeCall(cudaMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
#if USE_TEX
cutilSafeCall(cudaBindTexture(0, oldPosTex, oldPos, numParticles*sizeof(float4)));
cutilSafeCall(cudaBindTexture(0, oldVelTex, oldVel, numParticles*sizeof(float4)));
#endif
uint smemSize = sizeof(uint)*(numThreads+1);
reorderDataAndFindCellStartD<<< numBlocks, numThreads, smemSize>>>(
cellStart,
cellEnd,
(float4 *) sortedPos,
(float4 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float4 *) oldPos,
(float4 *) oldVel,
numParticles);
cutilCheckMsg("Kernel execution failed: reorderDataAndFindCellStartD");
#if USE_TEX
cutilSafeCall(cudaUnbindTexture(oldPosTex));
cutilSafeCall(cudaUnbindTexture(oldVelTex));
#endif
}
void collide(float* newVel,
float* sortedPos,
float* sortedVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles,
uint numCells)
{
#if USE_TEX
cutilSafeCall(cudaBindTexture(0, oldPosTex, sortedPos, numParticles*sizeof(float4)));
cutilSafeCall(cudaBindTexture(0, oldVelTex, sortedVel, numParticles*sizeof(float4)));
cutilSafeCall(cudaBindTexture(0, cellStartTex, cellStart, numCells*sizeof(uint)));
cutilSafeCall(cudaBindTexture(0, cellEndTex, cellEnd, numCells*sizeof(uint)));
#endif
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
collideD<<< numBlocks, numThreads >>>((float4*)newVel,
(float4*)sortedPos,
(float4*)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
#if USE_TEX
cutilSafeCall(cudaUnbindTexture(oldPosTex));
cutilSafeCall(cudaUnbindTexture(oldVelTex));
cutilSafeCall(cudaUnbindTexture(cellStartTex));
cutilSafeCall(cudaUnbindTexture(cellEndTex));
#endif
}
} // extern "C"
|
3c9008a99296ef499cfaac6496f88c111f8a27ca.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <chrono>
// includes, kernels
#include "matrixmul_kernel.hip"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
bool CompareMatrices(Matrix A, Matrix B);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0, errorN = 0;
srand(2012);
// Check command line for input matrix files
if(argc != 3 && argc != 4)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, argv[1]);
errorN = ReadFile(&N, argv[2]);
// check for read errors
if(errorM != size_elements || errorN != size_elements)
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
computeGold(reference.elements, M.elements, N.elements, HM, WM, WN);
// check if the device result is equivalent to the expected solution
bool res = CompareMatrices(reference, P);
printf("Test %s\n", res ? "PASSED" : "FAILED");
// output result if output file is requested
if(argc == 4)
{
WriteFile(P, argv[3]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Interface host call to the device kernel code and invoke the kernel
//* steps:
//* 1. allocate device matricies d_M, d_N and d_P with length same as input matricies
Matrix d_M = AllocateDeviceMatrix(M);
Matrix d_N = AllocateDeviceMatrix(N);
Matrix d_P = AllocateDeviceMatrix(P);
//* 2. copy M to d_M, N to d_N
CopyToDeviceMatrix(d_M, M);
CopyToDeviceMatrix(d_N, N);
//* 3. launch kernel to compute d_P = d_M * d_N
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(WP, HP, 1);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_M, d_N, d_P);
//* 4. copy d_P back to host vector P
CopyFromDeviceMatrix(P,d_P);
//* 5. free device vectors d_M, d_N and d_P
hipFree(&d_M);
hipFree(&d_N);
hipFree(&d_P);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->width * M->height;
FILE* input = fopen(file_name, "r");
unsigned i = 0;
for (i = 0; i < data_read; i++)
fscanf(input, "%f", &(M->elements[i]));
return data_read;
}
// Write a floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int size = M.width * M.height;
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < size; i++)
fprintf(output, "%f ", M.elements[i]);
}
// returns true iff A and B have same elements in same order
bool CompareMatrices(Matrix A, Matrix B) {
unsigned int size = A.width * A.height;
if ( (A.width != B.width) || (A.height != B.height) )
return false;
for (unsigned i = 0; i < size; i++)
if (abs(A.elements[i] - B.elements[i]) > 0.0001f)
return false;
return true;
}
|
3c9008a99296ef499cfaac6496f88c111f8a27ca.cu
|
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <chrono>
// includes, kernels
#include "matrixmul_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
bool CompareMatrices(Matrix A, Matrix B);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0, errorN = 0;
srand(2012);
// Check command line for input matrix files
if(argc != 3 && argc != 4)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, argv[1]);
errorN = ReadFile(&N, argv[2]);
// check for read errors
if(errorM != size_elements || errorN != size_elements)
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
computeGold(reference.elements, M.elements, N.elements, HM, WM, WN);
// check if the device result is equivalent to the expected solution
bool res = CompareMatrices(reference, P);
printf("Test %s\n", res ? "PASSED" : "FAILED");
// output result if output file is requested
if(argc == 4)
{
WriteFile(P, argv[3]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Interface host call to the device kernel code and invoke the kernel
//* steps:
//* 1. allocate device matricies d_M, d_N and d_P with length same as input matricies
Matrix d_M = AllocateDeviceMatrix(M);
Matrix d_N = AllocateDeviceMatrix(N);
Matrix d_P = AllocateDeviceMatrix(P);
//* 2. copy M to d_M, N to d_N
CopyToDeviceMatrix(d_M, M);
CopyToDeviceMatrix(d_N, N);
//* 3. launch kernel to compute d_P = d_M * d_N
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(WP, HP, 1);
MatrixMulKernel<<<dimGrid, dimBlock>>>(d_M, d_N, d_P);
//* 4. copy d_P back to host vector P
CopyFromDeviceMatrix(P,d_P);
//* 5. free device vectors d_M, d_N and d_P
cudaFree(&d_M);
cudaFree(&d_N);
cudaFree(&d_P);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->width * M->height;
FILE* input = fopen(file_name, "r");
unsigned i = 0;
for (i = 0; i < data_read; i++)
fscanf(input, "%f", &(M->elements[i]));
return data_read;
}
// Write a floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int size = M.width * M.height;
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < size; i++)
fprintf(output, "%f ", M.elements[i]);
}
// returns true iff A and B have same elements in same order
bool CompareMatrices(Matrix A, Matrix B) {
unsigned int size = A.width * A.height;
if ( (A.width != B.width) || (A.height != B.height) )
return false;
for (unsigned i = 0; i < size; i++)
if (abs(A.elements[i] - B.elements[i]) > 0.0001f)
return false;
return true;
}
|
08b7b6fdc50ce92620cb57057b5492b29351efe8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <iostream>
#include <vector>
#include "CudaContextManager.cuh"
#include "GPUTimer.cuh"
#include "Transform.cuh"
CudaContextManager *CudaContextManager::gCudaContextManager = NULL;
static float TestMemoryAllocate(int times) {
const size_t dev_id = 0;
CUDA_ERROR(hipSetDevice(dev_id));
CudaContext *context =
CudaContextManager::GetCudaContextManager()->GetCudaContext(dev_id);
const size_t small_memory_batch_size = 1024ULL * 1024 * 1024;
const size_t large_memory_batch_size = 2ULL * 1024 * 1024 * 1024;
GPUTimer timer;
timer.StartTimer();
for (int i = 0; i < times; ++i) {
void *mem = NULL;
mem = context->Malloc(small_memory_batch_size);
context->Free(mem, small_memory_batch_size);
mem = context->Malloc(large_memory_batch_size);
context->Free(mem, large_memory_batch_size);
}
timer.EndTimer();
return timer.GetElapsedMilliSeconds();
}
static float TestMemoryAllocateAndAccess(int times) {
const size_t dev_id = 0;
CUDA_ERROR(hipSetDevice(dev_id));
CudaContext *context =
CudaContextManager::GetCudaContextManager()->GetCudaContext(dev_id);
const size_t small_memory_batch_size = 1024ULL * 1024 * 1024;
const size_t large_memory_batch_size = 2ULL * 1024 * 1024 * 1024;
const size_t access_count = 100000000;
GPUTimer timer;
timer.StartTimer();
for (int i = 0; i < times; ++i) {
void *mem = NULL;
char *data = NULL;
mem = context->Malloc(small_memory_batch_size);
data = (char *)mem;
// hipMemset(data, 0, sizeof(char) * small_memory_batch_size);
GpuUtils::Transform::Transform([=] DEVICE(int index) { data[index] = 'a'; },
access_count, context);
context->Free(mem, small_memory_batch_size);
mem = context->Malloc(large_memory_batch_size);
data = (char *)mem;
// hipMemset(data, 0, sizeof(char) * large_memory_batch_size);
GpuUtils::Transform::Transform([=] DEVICE(int index) { data[index] = 'a'; },
access_count, context);
context->Free(mem, large_memory_batch_size);
}
timer.EndTimer();
return timer.GetElapsedMilliSeconds();
}
int main(int argc, char **argv) {
int times = 100;
if (argc == 2) {
times = atoi(argv[1]);
}
const size_t gigabytes = 1024ULL * 1024 * 1024;
std::vector<float> run_times;
for (size_t i = 1; i <= 8; ++i) {
CudaContextManager::CreateCudaContextManager(2, CNMEM_MANAGED);
const size_t dev_id = 0;
CUDA_ERROR(hipSetDevice(dev_id));
CudaContext *context =
CudaContextManager::GetCudaContextManager()->GetCudaContext(dev_id);
size_t consumed = gigabytes * i;
void *mem = context->Malloc(consumed);
// float t = TestMemoryAllocate(times);
float t = TestMemoryAllocateAndAccess(times);
run_times.push_back(t);
context->Free(mem, consumed);
CudaContextManager::FreeCudaContextManager();
}
std::cout << "times=" << times << std::endl;
for (size_t i = 0; i < 8; ++i) {
std::cout << i + 1 << "GB consumed, elapsed_time=" << run_times[i]
<< std::endl;
}
return 0;
}
|
08b7b6fdc50ce92620cb57057b5492b29351efe8.cu
|
#include <cstdlib>
#include <iostream>
#include <vector>
#include "CudaContextManager.cuh"
#include "GPUTimer.cuh"
#include "Transform.cuh"
CudaContextManager *CudaContextManager::gCudaContextManager = NULL;
static float TestMemoryAllocate(int times) {
const size_t dev_id = 0;
CUDA_ERROR(cudaSetDevice(dev_id));
CudaContext *context =
CudaContextManager::GetCudaContextManager()->GetCudaContext(dev_id);
const size_t small_memory_batch_size = 1024ULL * 1024 * 1024;
const size_t large_memory_batch_size = 2ULL * 1024 * 1024 * 1024;
GPUTimer timer;
timer.StartTimer();
for (int i = 0; i < times; ++i) {
void *mem = NULL;
mem = context->Malloc(small_memory_batch_size);
context->Free(mem, small_memory_batch_size);
mem = context->Malloc(large_memory_batch_size);
context->Free(mem, large_memory_batch_size);
}
timer.EndTimer();
return timer.GetElapsedMilliSeconds();
}
static float TestMemoryAllocateAndAccess(int times) {
const size_t dev_id = 0;
CUDA_ERROR(cudaSetDevice(dev_id));
CudaContext *context =
CudaContextManager::GetCudaContextManager()->GetCudaContext(dev_id);
const size_t small_memory_batch_size = 1024ULL * 1024 * 1024;
const size_t large_memory_batch_size = 2ULL * 1024 * 1024 * 1024;
const size_t access_count = 100000000;
GPUTimer timer;
timer.StartTimer();
for (int i = 0; i < times; ++i) {
void *mem = NULL;
char *data = NULL;
mem = context->Malloc(small_memory_batch_size);
data = (char *)mem;
// cudaMemset(data, 0, sizeof(char) * small_memory_batch_size);
GpuUtils::Transform::Transform([=] DEVICE(int index) { data[index] = 'a'; },
access_count, context);
context->Free(mem, small_memory_batch_size);
mem = context->Malloc(large_memory_batch_size);
data = (char *)mem;
// cudaMemset(data, 0, sizeof(char) * large_memory_batch_size);
GpuUtils::Transform::Transform([=] DEVICE(int index) { data[index] = 'a'; },
access_count, context);
context->Free(mem, large_memory_batch_size);
}
timer.EndTimer();
return timer.GetElapsedMilliSeconds();
}
int main(int argc, char **argv) {
int times = 100;
if (argc == 2) {
times = atoi(argv[1]);
}
const size_t gigabytes = 1024ULL * 1024 * 1024;
std::vector<float> run_times;
for (size_t i = 1; i <= 8; ++i) {
CudaContextManager::CreateCudaContextManager(2, CNMEM_MANAGED);
const size_t dev_id = 0;
CUDA_ERROR(cudaSetDevice(dev_id));
CudaContext *context =
CudaContextManager::GetCudaContextManager()->GetCudaContext(dev_id);
size_t consumed = gigabytes * i;
void *mem = context->Malloc(consumed);
// float t = TestMemoryAllocate(times);
float t = TestMemoryAllocateAndAccess(times);
run_times.push_back(t);
context->Free(mem, consumed);
CudaContextManager::FreeCudaContextManager();
}
std::cout << "times=" << times << std::endl;
for (size_t i = 0; i < 8; ++i) {
std::cout << i + 1 << "GB consumed, elapsed_time=" << run_times[i]
<< std::endl;
}
return 0;
}
|
d6aa222cb459ae097b27cc4d5ccffdff1919e792.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.3
* copyright (c) 2019, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre
* Date: October 2019
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "gpu_nn.h"
#include "gpu_nn_kernels.h"
#include "../gpu_hw.h"
#include "../gpu_tensor.h"
#include "../gpu_kernels.h"
#include "../../../tensor/tensor.h"
#include "../../../descriptors/descriptors.h"
void gpu_relu(Tensor *A,Tensor *B){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( relu), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_relu");
}
void gpu_d_relu(Tensor *D,Tensor *I,Tensor *PD) {
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_relu), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_relu");
}
void gpu_thresholded_relu(Tensor *A,Tensor *B,float param){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( thresholded_relu), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,param,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_thresholded_relu");
}
void gpu_d_thresholded_relu(Tensor *D,Tensor *I,Tensor *PD,float param) {
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_thresholded_relu), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,param,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_thresholded_relu");
}
void gpu_leaky_relu(Tensor *A,Tensor *B,float param){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( leaky_relu), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,param,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_leaky_relu");
}
void gpu_d_leaky_relu(Tensor *D,Tensor *I,Tensor *PD,float param) {
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_leaky_relu), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,param,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_leaky_relu");
}
void gpu_elu(Tensor *A,Tensor *B,float param){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( elu), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,param,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_elu");
}
void gpu_d_elu(Tensor *D,Tensor *I,Tensor *PD,float param) {
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_elu), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,param,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_elu");
}
void gpu_softplus(Tensor *A,Tensor *B){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( softplus), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_softplus");
}
void gpu_d_softplus(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_softplus), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_softplus");
}
void gpu_softsign(Tensor *A,Tensor *B){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A)
hipLaunchKernelGGL(( softsign), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_softsign");
}
void gpu_d_softsign(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_softsign), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_softsign");
}
void gpu_linear(Tensor *A,Tensor *B,float param){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( linear), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,param,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_linear");
}
void gpu_d_linear(Tensor *D,Tensor *I,Tensor *PD,float param) {
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_linear), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,param,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_linear");
}
void gpu_sigmoid(Tensor *A,Tensor *B){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( sigmoid), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_sigmoid");
}
void gpu_d_sigmoid(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_sigmoid), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_sigmoid");
}
void gpu_hard_sigmoid(Tensor *A,Tensor *B){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( hard_sigmoid), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_hard_sigmoid");
}
void gpu_d_hard_sigmoid(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_hard_sigmoid), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_hard_sigmoid");
}
void gpu_exp(Tensor *A,Tensor *B){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( exp), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_exp");
}
void gpu_d_exp(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_exp), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_exp");
}
void gpu_tanh(Tensor *A,Tensor *B){
int device=A->gpu_device;
hipSetDevice(device);
setDims(A);
hipLaunchKernelGGL(( tanh), dim3(dimGrid),dim3(dimBlock), 0, 0, A->ptr,B->ptr,A->size);
check_cuda(hipDeviceSynchronize(),"gpu_tanh");
}
void gpu_d_tanh(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
hipSetDevice(device);
setDims(D)
hipLaunchKernelGGL(( d_tanh), dim3(dimGrid),dim3(dimBlock), 0, 0, D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(hipDeviceSynchronize(),"gpu_d_tanh");
}
void gpu_softmax(Tensor *A,Tensor *B){
int device=A->gpu_device;
hipSetDevice(device);
int r,c;
r=A->shape[0];
c=A->shape[1];
dim3 dimGrid(1);
dim3 dimBlock(MAX_TPB);
int i;
for(i=0;i<r/MAX_TPB;i++) {
float *aptr=A->ptr+(i*MAX_TPB*c);
float *bptr=B->ptr+(i*MAX_TPB*c);
int size=MAX_TPB*c;
float* aux=gpu_create_tensor(device,size);
hipLaunchKernelGGL(( softmax), dim3(dimGrid),dim3(dimBlock), 0, 0, aptr,bptr,aux,c,size);
check_cuda(hipDeviceSynchronize(),"gpu_softmax");
gpu_delete_tensor(device,aux);
}
if (r%MAX_TPB) {
dim3 dimGridm(1);
dim3 dimBlockm(r%MAX_TPB);
float *aptr=A->ptr+(i*MAX_TPB*c);
float *bptr=B->ptr+(i*MAX_TPB*c);
int size=(r%MAX_TPB)*c;
float* aux=gpu_create_tensor(device,size);
hipLaunchKernelGGL(( softmax), dim3(dimGridm),dim3(dimBlockm), 0, 0, aptr,bptr,aux,c,size);
check_cuda(hipDeviceSynchronize(),"gpu_softmax");
gpu_delete_tensor(device,aux);
}
}
|
d6aa222cb459ae097b27cc4d5ccffdff1919e792.cu
|
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.3
* copyright (c) 2019, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: October 2019
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas_v2.h>
#include "gpu_nn.h"
#include "gpu_nn_kernels.h"
#include "../gpu_hw.h"
#include "../gpu_tensor.h"
#include "../gpu_kernels.h"
#include "../../../tensor/tensor.h"
#include "../../../descriptors/descriptors.h"
void gpu_relu(Tensor *A,Tensor *B){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
relu<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_relu");
}
void gpu_d_relu(Tensor *D,Tensor *I,Tensor *PD) {
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_relu<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_relu");
}
void gpu_thresholded_relu(Tensor *A,Tensor *B,float param){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
thresholded_relu<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,param,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_thresholded_relu");
}
void gpu_d_thresholded_relu(Tensor *D,Tensor *I,Tensor *PD,float param) {
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_thresholded_relu<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,param,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_thresholded_relu");
}
void gpu_leaky_relu(Tensor *A,Tensor *B,float param){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
leaky_relu<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,param,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_leaky_relu");
}
void gpu_d_leaky_relu(Tensor *D,Tensor *I,Tensor *PD,float param) {
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_leaky_relu<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,param,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_leaky_relu");
}
void gpu_elu(Tensor *A,Tensor *B,float param){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
elu<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,param,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_elu");
}
void gpu_d_elu(Tensor *D,Tensor *I,Tensor *PD,float param) {
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_elu<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,param,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_elu");
}
void gpu_softplus(Tensor *A,Tensor *B){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
softplus<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_softplus");
}
void gpu_d_softplus(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_softplus<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_softplus");
}
void gpu_softsign(Tensor *A,Tensor *B){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A)
softsign<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_softsign");
}
void gpu_d_softsign(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_softsign<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_softsign");
}
void gpu_linear(Tensor *A,Tensor *B,float param){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
linear<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,param,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_linear");
}
void gpu_d_linear(Tensor *D,Tensor *I,Tensor *PD,float param) {
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_linear<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,param,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_linear");
}
void gpu_sigmoid(Tensor *A,Tensor *B){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
sigmoid<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_sigmoid");
}
void gpu_d_sigmoid(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_sigmoid<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_sigmoid");
}
void gpu_hard_sigmoid(Tensor *A,Tensor *B){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
hard_sigmoid<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_hard_sigmoid");
}
void gpu_d_hard_sigmoid(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_hard_sigmoid<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_hard_sigmoid");
}
void gpu_exp(Tensor *A,Tensor *B){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
exp<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_exp");
}
void gpu_d_exp(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_exp<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_exp");
}
void gpu_tanh(Tensor *A,Tensor *B){
int device=A->gpu_device;
cudaSetDevice(device);
setDims(A);
tanh<<<dimGrid,dimBlock>>>(A->ptr,B->ptr,A->size);
check_cuda(cudaDeviceSynchronize(),"gpu_tanh");
}
void gpu_d_tanh(Tensor *D,Tensor *I,Tensor *PD){
int device=D->gpu_device;
cudaSetDevice(device);
setDims(D)
d_tanh<<<dimGrid,dimBlock>>>(D->ptr,I->ptr,PD->ptr,D->size);
check_cuda(cudaDeviceSynchronize(),"gpu_d_tanh");
}
void gpu_softmax(Tensor *A,Tensor *B){
int device=A->gpu_device;
cudaSetDevice(device);
int r,c;
r=A->shape[0];
c=A->shape[1];
dim3 dimGrid(1);
dim3 dimBlock(MAX_TPB);
int i;
for(i=0;i<r/MAX_TPB;i++) {
float *aptr=A->ptr+(i*MAX_TPB*c);
float *bptr=B->ptr+(i*MAX_TPB*c);
int size=MAX_TPB*c;
float* aux=gpu_create_tensor(device,size);
softmax<<<dimGrid,dimBlock>>>(aptr,bptr,aux,c,size);
check_cuda(cudaDeviceSynchronize(),"gpu_softmax");
gpu_delete_tensor(device,aux);
}
if (r%MAX_TPB) {
dim3 dimGridm(1);
dim3 dimBlockm(r%MAX_TPB);
float *aptr=A->ptr+(i*MAX_TPB*c);
float *bptr=B->ptr+(i*MAX_TPB*c);
int size=(r%MAX_TPB)*c;
float* aux=gpu_create_tensor(device,size);
softmax<<<dimGridm,dimBlockm>>>(aptr,bptr,aux,c,size);
check_cuda(cudaDeviceSynchronize(),"gpu_softmax");
gpu_delete_tensor(device,aux);
}
}
|
1e6eca6349e588d8cc96ac1da1e384016e71f233.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Loss::Loss(const std::string& loss)
{
if (loss == "categorical_crossentropy")
loss_type = LOSS_CATEGORICAL_CROSSENTROPY;
else if (loss == "sparse_categorical_crossentropy")
loss_type = LOSS_SPARSE_CATEGORICAL_CROSSENTROPY;
else if (loss == "mean_squared_error")
loss_type = LOSS_MEAN_SQUARED_ERROR_AVG_REDUCE;
else
// Unrecognized loss type
assert(false);
}
Loss::Loss(LossType _loss_type)
: loss_type(_loss_type)
{}
__global__
void sparse_categorical_crossentropy_loss_backward(
float *logit_grad,
const int *label,
coord_t num_samples,
coord_t num_classes)
{
CUDA_KERNEL_LOOP(i, num_samples)
{
int label_idx = label[i];
logit_grad[i * num_classes + label_idx] -= 1.0f;
}
}
__global__
void categorical_crossentropy_loss_backward(
float *logit_grad,
const float *logit,
const float *label,
coord_t num_elements)
{
CUDA_KERNEL_LOOP(i, num_elements)
{
logit_grad[i] = logit[i] - label[i];
}
}
__global__
void mean_squared_error_avg_loss_backward(
float *logit_grad,
const float *logit,
const float *label,
coord_t num_elements)
{
CUDA_KERNEL_LOOP(i, num_elements)
{
logit_grad[i] = logit[i] - label[i];
}
}
__host__
void Loss::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Loss* loss = (Loss*) task->args;
if (loss->loss_type == LOSS_SPARSE_CATEGORICAL_CROSSENTROPY) {
//sparse_categorical_crossentropy has label of dim: (batch_size, 1)
TensorAccessorW<float, 2> acc_logit_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_logit(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<int, 2> acc_label(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
int num_samples = acc_logit.rect.hi[1] - acc_logit.rect.lo[1] + 1;
int num_classes = acc_logit.rect.hi[0] - acc_logit.rect.lo[0] + 1;
assert(acc_logit_grad.rect == acc_logit.rect);
assert(acc_label.rect.hi[1] == acc_logit.rect.hi[1]);
assert(acc_label.rect.lo[1] == acc_logit.rect.lo[1]);
assert(acc_label.rect.lo[0] == acc_label.rect.hi[0]);
checkCUDA(hipMemcpy(acc_logit_grad.ptr, acc_logit.ptr,
acc_logit.rect.volume() * sizeof(float),
hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( sparse_categorical_crossentropy_loss_backward), dim3(GET_BLOCKS(num_samples)), dim3(CUDA_NUM_THREADS), 0, 0,
acc_logit_grad.ptr, acc_label.ptr, num_samples, num_classes);
// Scale logit gradients by op->scale_factor
hipLaunchKernelGGL(( scale_kernel), dim3(GET_BLOCKS(acc_logit_grad.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
acc_logit_grad.ptr, acc_logit_grad.rect.volume(), 0, loss->scale_factor);
} else {
TensorAccessorW<float, 2> acc_logit_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_logit(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 2> acc_label(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
// other loss require label and logit have identical shape
assert(acc_logit.rect == acc_label.rect);
assert(acc_logit_grad.rect == acc_logit.rect);
int num_samples = acc_logit.rect.hi[1] - acc_logit.rect.lo[1] + 1;
int num_channels = acc_logit.rect.hi[0] - acc_logit.rect.lo[0] + 1;
if (loss->loss_type == LOSS_CATEGORICAL_CROSSENTROPY) {
hipLaunchKernelGGL(( categorical_crossentropy_loss_backward), dim3(GET_BLOCKS(acc_logit.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
acc_logit_grad.ptr, acc_logit.ptr, acc_label.ptr,
acc_logit.rect.volume());
// Scale logit gradients by loss->scale_factor
hipLaunchKernelGGL(( scale_kernel), dim3(GET_BLOCKS(acc_logit_grad.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
acc_logit_grad.ptr, acc_logit_grad.rect.volume(), 0, loss->scale_factor);
} else if (loss->loss_type == LOSS_MEAN_SQUARED_ERROR_AVG_REDUCE) {
hipLaunchKernelGGL(( mean_squared_error_avg_loss_backward), dim3(GET_BLOCKS(acc_logit.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
acc_logit_grad.ptr, acc_logit.ptr, acc_label.ptr,
acc_logit.rect.volume());
// Scale logit gradients by loss->scale_factor
hipLaunchKernelGGL(( scale_kernel), dim3(GET_BLOCKS(acc_logit_grad.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
acc_logit_grad.ptr, acc_logit_grad.rect.volume(), 0, loss->scale_factor);
} else {
fprintf(stderr, "Unsupported loss --- report this error to the FlexFlow developers\n");
assert(false);
}
}
}
void Loss::backward(FFModel* model,
const Tensor* logit,
const Tensor* label)
{
// Compute scale factor for loss backpropagation
scale_factor = 1.0f/ logit->adim[logit->numDim-1];
//scale_factor = 1.0f;
// Use the same parallel strategy as the owner of logit
std::string pcname = logit->owner_op->name;
IndexSpaceT<2> task_is = IndexSpaceT<2>(model->get_or_create_task_is(2, pcname));
Context ctx = model->config.lg_ctx;
Runtime* runtime = model->config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
Rect<2> logit_rect = runtime->get_index_partition_color_space(
ctx, logit->part.get_index_partition());
Rect<2> label_rect = runtime->get_index_partition_color_space(
ctx, label->part.get_index_partition());
if((logit_rect != part_rect) || (label_rect != part_rect)) {
fprintf(stderr, "Encounter inconsistency in parallelizing loss computation");
assert(false);
}
ArgumentMap argmap;
IndexLauncher launcher(LOSS_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Loss)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(pcname));
launcher.add_region_requirement(
RegionRequirement(logit->part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, logit->region_grad));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(logit->part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, logit->region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(label->part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, label->region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
|
1e6eca6349e588d8cc96ac1da1e384016e71f233.cu
|
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Loss::Loss(const std::string& loss)
{
if (loss == "categorical_crossentropy")
loss_type = LOSS_CATEGORICAL_CROSSENTROPY;
else if (loss == "sparse_categorical_crossentropy")
loss_type = LOSS_SPARSE_CATEGORICAL_CROSSENTROPY;
else if (loss == "mean_squared_error")
loss_type = LOSS_MEAN_SQUARED_ERROR_AVG_REDUCE;
else
// Unrecognized loss type
assert(false);
}
Loss::Loss(LossType _loss_type)
: loss_type(_loss_type)
{}
__global__
void sparse_categorical_crossentropy_loss_backward(
float *logit_grad,
const int *label,
coord_t num_samples,
coord_t num_classes)
{
CUDA_KERNEL_LOOP(i, num_samples)
{
int label_idx = label[i];
logit_grad[i * num_classes + label_idx] -= 1.0f;
}
}
__global__
void categorical_crossentropy_loss_backward(
float *logit_grad,
const float *logit,
const float *label,
coord_t num_elements)
{
CUDA_KERNEL_LOOP(i, num_elements)
{
logit_grad[i] = logit[i] - label[i];
}
}
__global__
void mean_squared_error_avg_loss_backward(
float *logit_grad,
const float *logit,
const float *label,
coord_t num_elements)
{
CUDA_KERNEL_LOOP(i, num_elements)
{
logit_grad[i] = logit[i] - label[i];
}
}
__host__
void Loss::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Loss* loss = (Loss*) task->args;
if (loss->loss_type == LOSS_SPARSE_CATEGORICAL_CROSSENTROPY) {
//sparse_categorical_crossentropy has label of dim: (batch_size, 1)
TensorAccessorW<float, 2> acc_logit_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_logit(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<int, 2> acc_label(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
int num_samples = acc_logit.rect.hi[1] - acc_logit.rect.lo[1] + 1;
int num_classes = acc_logit.rect.hi[0] - acc_logit.rect.lo[0] + 1;
assert(acc_logit_grad.rect == acc_logit.rect);
assert(acc_label.rect.hi[1] == acc_logit.rect.hi[1]);
assert(acc_label.rect.lo[1] == acc_logit.rect.lo[1]);
assert(acc_label.rect.lo[0] == acc_label.rect.hi[0]);
checkCUDA(cudaMemcpy(acc_logit_grad.ptr, acc_logit.ptr,
acc_logit.rect.volume() * sizeof(float),
cudaMemcpyDeviceToDevice));
sparse_categorical_crossentropy_loss_backward<<<GET_BLOCKS(num_samples), CUDA_NUM_THREADS>>>(
acc_logit_grad.ptr, acc_label.ptr, num_samples, num_classes);
// Scale logit gradients by op->scale_factor
scale_kernel<<<GET_BLOCKS(acc_logit_grad.rect.volume()), CUDA_NUM_THREADS>>>(
acc_logit_grad.ptr, acc_logit_grad.rect.volume(), 0, loss->scale_factor);
} else {
TensorAccessorW<float, 2> acc_logit_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_logit(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 2> acc_label(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
// other loss require label and logit have identical shape
assert(acc_logit.rect == acc_label.rect);
assert(acc_logit_grad.rect == acc_logit.rect);
int num_samples = acc_logit.rect.hi[1] - acc_logit.rect.lo[1] + 1;
int num_channels = acc_logit.rect.hi[0] - acc_logit.rect.lo[0] + 1;
if (loss->loss_type == LOSS_CATEGORICAL_CROSSENTROPY) {
categorical_crossentropy_loss_backward<<<GET_BLOCKS(acc_logit.rect.volume()), CUDA_NUM_THREADS>>>(
acc_logit_grad.ptr, acc_logit.ptr, acc_label.ptr,
acc_logit.rect.volume());
// Scale logit gradients by loss->scale_factor
scale_kernel<<<GET_BLOCKS(acc_logit_grad.rect.volume()), CUDA_NUM_THREADS>>>(
acc_logit_grad.ptr, acc_logit_grad.rect.volume(), 0, loss->scale_factor);
} else if (loss->loss_type == LOSS_MEAN_SQUARED_ERROR_AVG_REDUCE) {
mean_squared_error_avg_loss_backward<<<GET_BLOCKS(acc_logit.rect.volume()), CUDA_NUM_THREADS>>>(
acc_logit_grad.ptr, acc_logit.ptr, acc_label.ptr,
acc_logit.rect.volume());
// Scale logit gradients by loss->scale_factor
scale_kernel<<<GET_BLOCKS(acc_logit_grad.rect.volume()), CUDA_NUM_THREADS>>>(
acc_logit_grad.ptr, acc_logit_grad.rect.volume(), 0, loss->scale_factor);
} else {
fprintf(stderr, "Unsupported loss --- report this error to the FlexFlow developers\n");
assert(false);
}
}
}
void Loss::backward(FFModel* model,
const Tensor* logit,
const Tensor* label)
{
// Compute scale factor for loss backpropagation
scale_factor = 1.0f/ logit->adim[logit->numDim-1];
//scale_factor = 1.0f;
// Use the same parallel strategy as the owner of logit
std::string pcname = logit->owner_op->name;
IndexSpaceT<2> task_is = IndexSpaceT<2>(model->get_or_create_task_is(2, pcname));
Context ctx = model->config.lg_ctx;
Runtime* runtime = model->config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
Rect<2> logit_rect = runtime->get_index_partition_color_space(
ctx, logit->part.get_index_partition());
Rect<2> label_rect = runtime->get_index_partition_color_space(
ctx, label->part.get_index_partition());
if((logit_rect != part_rect) || (label_rect != part_rect)) {
fprintf(stderr, "Encounter inconsistency in parallelizing loss computation");
assert(false);
}
ArgumentMap argmap;
IndexLauncher launcher(LOSS_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Loss)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(pcname));
launcher.add_region_requirement(
RegionRequirement(logit->part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, logit->region_grad));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(logit->part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, logit->region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(label->part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, label->region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
|
5c046fc99c051d9682d5ce6dbce71b0a65c3f9f8.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)).
* AZ=Y -->Z=A\Y (using QR factorization)
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include "CuMatlab_sparseSSR.cu"
#include "CuMatlab_sparseSSC.cu"
#include "CuMatlab_sparseDDR.cu"
#include "CuMatlab_sparseDDC.cu"
#include "CuMatlab_sparseSDR.cu"
#include "CuMatlab_sparseSDC.cu"
#include "CuMatlab_sparseDSR.cu"
#include "CuMatlab_sparseDSC.cu"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
extern "C" static void mexCuMatlab_sparseSSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDDR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSDR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs==2 && nlhs==1) {
if (mxIsGPUArray(prhs[0]) && mxIsGPUArray(prhs[1])) {
mxGPUArray const *tempGPU1;
tempGPU1 = mxGPUCreateFromMxArray(prhs[0]);
mxGPUArray const *tempGPU2;
tempGPU2 = mxGPUCreateFromMxArray(prhs[1]);
if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxREAL) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxREAL) ){
if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseSSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseDDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseSDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseDSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
}
else if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxCOMPLEX) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxCOMPLEX) ){
if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseSSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseDDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseSDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseDSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
}
else{
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else if(!mxIsGPUArray(prhs[0]) && !mxIsGPUArray(prhs[1])) {
if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[1]))){
if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseSSR(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseDDR(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseSDR(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseDSR(nlhs, plhs,
nrhs, prhs);
return;
}
}
else if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[1]))){
if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){
mexCuMatlab_sparseSSC(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){
mexCuMatlab_sparseDDC(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){
mexCuMatlab_sparseSDC(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){
mexCuMatlab_sparseDSC(nlhs, plhs,
nrhs, prhs);
return;
}
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
}
else if ((nrhs<2) || (nrhs>2) || (nlhs<1) || (nlhs>1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input/output arguments! input arguments must be two and output argument must be one\n");
return;
}
}
|
5c046fc99c051d9682d5ce6dbce71b0a65c3f9f8.cu
|
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)).
* AZ=Y -->Z=A\Y (using QR factorization)
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include "CuMatlab_sparseSSR.cu"
#include "CuMatlab_sparseSSC.cu"
#include "CuMatlab_sparseDDR.cu"
#include "CuMatlab_sparseDDC.cu"
#include "CuMatlab_sparseSDR.cu"
#include "CuMatlab_sparseSDC.cu"
#include "CuMatlab_sparseDSR.cu"
#include "CuMatlab_sparseDSC.cu"
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" static void mexCuMatlab_sparseSSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDDR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSDR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_sparseDSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs==2 && nlhs==1) {
if (mxIsGPUArray(prhs[0]) && mxIsGPUArray(prhs[1])) {
mxGPUArray const *tempGPU1;
tempGPU1 = mxGPUCreateFromMxArray(prhs[0]);
mxGPUArray const *tempGPU2;
tempGPU2 = mxGPUCreateFromMxArray(prhs[1]);
if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxREAL) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxREAL) ){
if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseSSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseDDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseSDR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) {
mexCuMatlab_sparseDSR(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
}
else if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxCOMPLEX) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxCOMPLEX) ){
if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseSSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseDDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseSDC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){
mexCuMatlab_sparseDSC(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
return;
}
}
else{
mxGPUDestroyGPUArray(tempGPU1);
mxGPUDestroyGPUArray(tempGPU2);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else if(!mxIsGPUArray(prhs[0]) && !mxIsGPUArray(prhs[1])) {
if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[1]))){
if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseSSR(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseDDR(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseSDR(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) {
mexCuMatlab_sparseDSR(nlhs, plhs,
nrhs, prhs);
return;
}
}
else if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[1]))){
if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){
mexCuMatlab_sparseSSC(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){
mexCuMatlab_sparseDDC(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){
mexCuMatlab_sparseSDC(nlhs, plhs,
nrhs, prhs);
return;
}
if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){
mexCuMatlab_sparseDSC(nlhs, plhs,
nrhs, prhs);
return;
}
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
}
else if ((nrhs<2) || (nrhs>2) || (nlhs<1) || (nlhs>1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input/output arguments! input arguments must be two and output argument must be one\n");
return;
}
}
|
9f0907fb438ce0fcfaf790fd540a1c786ef3e5f4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*--------------------------------------------------------------------------- */
/* tri_gpu: compute the number of triangles in a graph (GPU method) */
/*--------------------------------------------------------------------------- */
// READ THIS:
// This code is way over-commented because I'm giving you lots of instruction
// on how to write a CUDA kernel and its CPU driver. Please delete ALL C++
// style comments in this file (and only this file) that used the '//' comment
// style! Replace them with your own that describe how you solved each part of
// this problem. Keep the comments in the old style /* like this */, since
// those are useful. Feel free to rewrite those /*comments*/ if you like.
#include "tri_def.h"
// I recommend using a 2D array of threads, x-by-y, since you have two nested
// for loops in the code below. I recommend a single 1D array of threadblocks.
// Each threadblock must do only one column (or node) j at a time, since it
// needs to use the Mark array of size n to mark the neighbors of j, for use in
// the two nested loops. I will let you figure out the dimensions to use. If
// you are having trouble getting the code to work, try one block with a single
// thread (1-by-1 thread grid). You won't have any synchronization problems,
// but of course your code will be exceedingly slow.
// However, if you want to use a 1D array of threads, feel free to do so. Just
// be sure to keep things parallel. Don't force one thread in the threadblock
// to do just one iteration of the "for (p = ...)" iteration below, for
// example. That will painfully be slow (and points taken off for a bad
// algorithm).
// NBLOCKS needs to be large enough to keep the 13 SMs on GPU busy. Don't make
// NBLOCKS too high, however. Your kernel will need a Marks array of size
// NBLOCKS*n, so that each threadblock and have its own private Mark arry of
// size n. If NBLOCKS is high you will use all the GPU memory for the Marks
// array, and you won't be able to solve the problems on the GPU.
/* -------------------------------------------------------------------------- */
/* tri_kernel: GPU kernel */
/* -------------------------------------------------------------------------- */
/* launched with <<<NBLOCKS, dim3(NX,NY)>>> */
// or modify it to launch with <<<NBLOCKS,NTHREADS>>> as you prefer
#define NBLOCKS 512 //TODO /* gridDim.x */
#define NX 28 //TODO /* blockDim.x (# of threads in x dimension) */
#define NY 29 //TODO /* blockDim.y (# of threads in y dimension) */
#define NTHREADS (NY * NX)
__global__ void tri_kernel
(
/* inputs, not modified: */
const int *Ap, /* column pointers, size n+1 */
const int *Ai, /* row indices */
const int n, /* A has n ndoes */
/* workspace */
bool *Marks, /* size NBLOCKS*n so each threadblock has */
/* its own array of size n */
/* output: */
int64_t Ntri_result [NBLOCKS] /* # triangles found by each threadblock */
)
{
//POINTING THE Mark VARIABLE TO THE RIGHT POSITIONING OF THE MARKS ARRAY FOR THAT PARTICULAR BLOCKID
bool *Mark = Marks + (n*blockIdx.x); //TODO
//CREATING A GLOBAL ID FOR EACH THREAD IN A BLOCK. WE ARE GOING TO ASSIGIN
//THE FOR LOOP GOES FROM 0 TO n AND INCREMENTS BY NTHREADS.
//ITS NOT THAT ALL THE THREADS IN A BLOCK WORK IN SEQUENCE, RATHER EACH THREAD WILL GRAB HIS OWN ID AND DO ITS JOB
//HERE, WE ARE EMPTYING OUT THE MARK ARRAY FOR THAT SPECIFIC BLOCKID
int id = threadIdx.y * blockDim.x + threadIdx.x ; //TODO
for (int i = id ; i < n ; i+=NTHREADS) //TODO
{
Mark [i] = 0 ;
}
/* ensure all threads have cleared the Mark array */
// What happens if some threads in this threadblock finish clearing their
// part of Mark, and then they start the work below before some other
// threads have finished clearing their part of Mark? Race condition! I
// put this sync threads here for you. You will need more elsewhere in
// this kernel. I will let you figure out where. When in doubt, extra
// syncthreads are not usually harmful to code correctness (too many can
// slow your code down however).
__syncthreads ( ) ;
/* each thread counts its own triangles in nt */
// This variable is local to each thread.
int64_t nt = 0 ;
/* count the triangles for node j, aka A(:,j) of the adjacency matrix */
//ASSIGINING EACH BLOCKS JOB FOR EACH COLUMN OF MATRIX TO DO THE JOB
//IF THE BLOCK NUMBER IS SMALLER THAN COLUMN NUMBER THEN SOME BLOCK(FULL OF THREADS) JUST NEVER WORK
//ALSO EACH BLOCK DOES WORK ON EVERY NBLOCKS-th BLOCK AND INCREMENTS BY THE NUMBER OF BLOCKS
for (int j = blockIdx.x ; j < n; j+=NBLOCKS) //TODO
{
/* All threads in a threadblock are working on this node j, */
/* equivalently, column A(:,j) of the adjacency matrix A */
/* scatter A(:,j) into Mark, marking all nodes adjacent to node j */
//THIS FOR LOOP, JUST PICKS UP THE INDEX REGION IT NEEDS TO ITERATE OVER
//FOR EACH J, WE GO TO THE SPECIFIC INDEX OF ARRAY Ap[] AND GO UNTIL THE NEXT INDEX OF THE ARRAY
//WE PUT 1 TO THE INDEX OF MARK ARRAY WHERE WE FIND AN EDGE SO THAT WE CAN COUNT THE EDGES NEXT
//USDE THE id VARIABLE BECAUSE id VARIABLE CONTAINS blockidx.x, threadidx.x, threadidx.y SO THAT EACH THREAD IS DOING INDIVIDUAL REGION OF EACH COLUMN
for (int p = Ap[j]+id ; p< Ap[j+1] ; p=p+NTHREADS) //TODO
{
int i = Ai [p] ;
/* edge (i,j) exists in the graph, mark node i */
Mark [i] = 1 ;
}
__syncthreads( );
/* compute sum(C(:,j)) where C=(A*A(:,j))*.(A(:,j)) */
//THESE TWO FOR LOOPS ARE THE MEAT OF THE PROJECT
//THE OUTER FOR LOOP GOING X DIRECTION AND THE INNER FOR LOOP GOING Y DIRECTION OF THE MATRIX
//FOR EACH NODE FOUND IN THE COLUMN(OUTER LOOP), WE SEE HOW MANY MANY FRIEND THIS NODE HAS
//FOR EACH FRIENDS, WE SEE IF THAT FRIEND IS ALSO THE CURRENT NODE'S FRIEND(INNER LOOP)
//IF A FRIEND OF FRIEND IS ALSO MY FRIENDS, THAT MAKES A TRIANGLE, WE INCREMENT nt VARIABLE IF WE DETECT SUCH PHENOMENON
for (int p = Ap[j]+threadIdx.x ; p< Ap[j+1] ; p=p+NX) //TODO
{
int k = Ai [p] ;
/* edge (k,j) exists in the graph */
for (int pa = Ap[k]+threadIdx.y ; pa < Ap[k+1] ; pa=pa+NY) //TODO
{
int i = Ai [pa] ;
/* edge (i,k) exists, count a triangle if (i,j) also exists */
nt += Mark [i] ;
}
}
__syncthreads ( ) ;
/* clear Mark for the next iteration */
//WE CLEAR THE Mark[] ARRAY SAME WAY WE FILLED IT UP
//THIS FOR LOOP IS EXACTLY THE SAME AS ABOVE
for (int p = Ap[j]+id ; p< Ap[j+1] ; p=p+NTHREADS) //TODO
{
int i = Ai [p] ;
/* edge (i,j) exists in the graph, mark node i */
Mark [i] = 0 ;
}
__syncthreads ( ) ;
/* now all of Mark[0..n-1] is all zero again */
// only a few of the entries in Mark have been used in this jth
// iteration.
}
/* each thread copies its result, nt, into a shared array, Ntri */
// Ntri is a shared array of size Ntri[blockDim.y][blockDim.x] ; but size
// must be constant so NY and NX are used. Every thread saves its triangle
// count in this __shared__ array so the results can be summed up for this
// threadblock. This part is done for you:
__shared__ int Ntri [NY][NX] ;
Ntri [threadIdx.y][threadIdx.x] = nt ;
__syncthreads ( ) ;
/* sum up all of Ntri and then one thread writes result to */
/* Ntri_result [blockIdx.x] */
// Now sum up all the triangles found by this threadblock,
// Ntri_result [blockIdx.x] = sum (Ntri). In your first attempt,
// I recommend using thread (0,0) to do this work all by itself.
// But don't stop there, do this reduction in parallel.
// Figure this out yourself.
//TODO
//THIS REUCTION IS DONE IN PARALLEL ONLY BY THREAD 0,0
//THIS TAKES ALL THE COUNTED TRIANGLE BY EACH NODE AND PUTS THEM IN Ntr_result[] WHICH KEEPS COUNT FOR EACH BLOCK
if(id==0){
Ntri_result[blockIdx.x] = 0;
for(int y =0 ;y < NY; y++){
for(int x =0; x < NX ; x++ ){
Ntri_result [blockIdx.x]+= Ntri [y][x];
}
}
__syncthreads ( ) ;
//printf("-----The Ntri_result[] for block %d is %i \n",blockIdx.x , Ntri_result[blockIdx.x]);
}
}
/* call a cuda method and check its error code */
// This is written for you already.
#define OK(method) \
{ \
err = method ; \
if (err != hipSuccess) \
{ \
printf ("ERROR: line %d\n%s\n", __LINE__, \
hipGetErrorString (err)) ; \
exit (1) ; \
} \
}
/* -------------------------------------------------------------------------- */
/* tri_gpu: driver function that runs on the host CPU */
/* -------------------------------------------------------------------------- */
int64_t tri_gpu /* # of triangles */
(
const int *Ap, /* node pointers, size n+1 */
const int *Ai, /* adjacency lists, size ne = Ap [n] */
const int n /* number of nodes in the graph */
)
{
hipError_t err = hipSuccess ;
/* allocate the graph on the GPU */
// This is written for you already.
int ne = Ap [n] ;
int *d_Ap, *d_Ai ;
OK (hipMalloc (&d_Ap, (n+1) * sizeof (int))) ;
OK (hipMalloc (&d_Ai, (ne ) * sizeof (int))) ;
/* copy the graph to the GPU */
//COPYING THE GRAPH FORM THE CPU TO THE GPU
OK (hipMemcpy (d_Ap, Ap, (n+1) * sizeof(int), hipMemcpyHostToDevice)); //TODO
OK (hipMemcpy (d_Ai, Ai, (ne ) * sizeof(int), hipMemcpyHostToDevice)); //TODO
/* allocate workspace on the GPU */
/* Marks array of size NBLOCKS * n * sizeof (bool), so each */
/* threadblock has its own bool Mark array of size n. */
bool *d_Marks ;
// CREATING d_Marks ARRAY OF SIZE NBLOKS*n*sizeof(bool) IN THE GPU
OK (hipMalloc (&d_Marks, (NBLOCKS * n * sizeof (bool)))) ; //TODO
/* allocate the result on the GPU */
int64_t *d_ntri ;
// USING CUDAMALLOC TO ALLOCATE THE D_NTRI RESULT ON THE GPU, OF SIZE NBLOCKS
OK (hipMalloc (&d_ntri, (NBLOCKS*sizeof(int64_t)))); //TODO
// start the timer (optional, if you want to time just the kernel):
// hipEvent_t start, stop ;
// OK (hipEventCreate (&start)) ;
// OK (hipEventCreate (&stop)) ;
// OK (hipEventRecord (start)) ;
/* launch the kernel */
// this is written for you
hipLaunchKernelGGL(( tri_kernel) , dim3(NBLOCKS), dim3(dim3(NX,NY)), 0, 0, d_Ap, d_Ai, n, d_Marks, d_ntri) ;
OK (hipGetLastError ( )) ;
// stop the timer (optional, if you want to time just the kernel)
// OK (hipEventRecord (stop)) ;
// OK (hipEventSynchronize (stop)) ;
// float milliseconds = 0;
// OK (hipEventElapsedTime (&milliseconds, start, stop)) ;
// printf ("GPU kernel time: %g sec\n", milliseconds / 1000) ;
/* get the result from the GPU: one value for each threadblock */
int64_t ntri = 0, ntris [NBLOCKS] ;
// GETTING THE D_NTRI ARRAY OF SIZE NBLOCKS FROM THE GPU
OK (hipMemcpy (ntris, d_ntri, (NBLOCKS*sizeof(int64_t)), hipMemcpyDeviceToHost )); //TODO
/* free space on the GPU */
// use hipFree to free all the things you hipMalloc'd.
// if you fail to do this some problems will run out of memory
//FREEING ALL THE MEMORY I HAVE ALLOCATED
//NOTE: I DIDNT FREE d_ntri AND d_Marks ARRAY HERE COZ THERE WERE NO "TODO" TO DO THAT
OK (hipFree (d_Ap)) ;
OK (hipFree (d_Ai )); //TODO
/* sum up the results for all threadblocks */
// the host has the result of each threadblock in ntris[NBLOCKS].
// sum them up here into ntri.
//TODO
//FINAL REDUCTION OF ALL TRIANGLE COUNT FOR ALL BLOCKS
ntri = 0;
for(int x = 0 ; x < NBLOCKS ; x++){
ntri+= ntris[x];
}
/* return the result */
return (ntri) ;
}
|
9f0907fb438ce0fcfaf790fd540a1c786ef3e5f4.cu
|
/*--------------------------------------------------------------------------- */
/* tri_gpu: compute the number of triangles in a graph (GPU method) */
/*--------------------------------------------------------------------------- */
// READ THIS:
// This code is way over-commented because I'm giving you lots of instruction
// on how to write a CUDA kernel and its CPU driver. Please delete ALL C++
// style comments in this file (and only this file) that used the '//' comment
// style! Replace them with your own that describe how you solved each part of
// this problem. Keep the comments in the old style /* like this */, since
// those are useful. Feel free to rewrite those /*comments*/ if you like.
#include "tri_def.h"
// I recommend using a 2D array of threads, x-by-y, since you have two nested
// for loops in the code below. I recommend a single 1D array of threadblocks.
// Each threadblock must do only one column (or node) j at a time, since it
// needs to use the Mark array of size n to mark the neighbors of j, for use in
// the two nested loops. I will let you figure out the dimensions to use. If
// you are having trouble getting the code to work, try one block with a single
// thread (1-by-1 thread grid). You won't have any synchronization problems,
// but of course your code will be exceedingly slow.
// However, if you want to use a 1D array of threads, feel free to do so. Just
// be sure to keep things parallel. Don't force one thread in the threadblock
// to do just one iteration of the "for (p = ...)" iteration below, for
// example. That will painfully be slow (and points taken off for a bad
// algorithm).
// NBLOCKS needs to be large enough to keep the 13 SMs on GPU busy. Don't make
// NBLOCKS too high, however. Your kernel will need a Marks array of size
// NBLOCKS*n, so that each threadblock and have its own private Mark arry of
// size n. If NBLOCKS is high you will use all the GPU memory for the Marks
// array, and you won't be able to solve the problems on the GPU.
/* -------------------------------------------------------------------------- */
/* tri_kernel: GPU kernel */
/* -------------------------------------------------------------------------- */
/* launched with <<<NBLOCKS, dim3(NX,NY)>>> */
// or modify it to launch with <<<NBLOCKS,NTHREADS>>> as you prefer
#define NBLOCKS 512 //TODO /* gridDim.x */
#define NX 28 //TODO /* blockDim.x (# of threads in x dimension) */
#define NY 29 //TODO /* blockDim.y (# of threads in y dimension) */
#define NTHREADS (NY * NX)
__global__ void tri_kernel
(
/* inputs, not modified: */
const int *Ap, /* column pointers, size n+1 */
const int *Ai, /* row indices */
const int n, /* A has n ndoes */
/* workspace */
bool *Marks, /* size NBLOCKS*n so each threadblock has */
/* its own array of size n */
/* output: */
int64_t Ntri_result [NBLOCKS] /* # triangles found by each threadblock */
)
{
//POINTING THE Mark VARIABLE TO THE RIGHT POSITIONING OF THE MARKS ARRAY FOR THAT PARTICULAR BLOCKID
bool *Mark = Marks + (n*blockIdx.x); //TODO
//CREATING A GLOBAL ID FOR EACH THREAD IN A BLOCK. WE ARE GOING TO ASSIGIN
//THE FOR LOOP GOES FROM 0 TO n AND INCREMENTS BY NTHREADS.
//ITS NOT THAT ALL THE THREADS IN A BLOCK WORK IN SEQUENCE, RATHER EACH THREAD WILL GRAB HIS OWN ID AND DO ITS JOB
//HERE, WE ARE EMPTYING OUT THE MARK ARRAY FOR THAT SPECIFIC BLOCKID
int id = threadIdx.y * blockDim.x + threadIdx.x ; //TODO
for (int i = id ; i < n ; i+=NTHREADS) //TODO
{
Mark [i] = 0 ;
}
/* ensure all threads have cleared the Mark array */
// What happens if some threads in this threadblock finish clearing their
// part of Mark, and then they start the work below before some other
// threads have finished clearing their part of Mark? Race condition! I
// put this sync threads here for you. You will need more elsewhere in
// this kernel. I will let you figure out where. When in doubt, extra
// syncthreads are not usually harmful to code correctness (too many can
// slow your code down however).
__syncthreads ( ) ;
/* each thread counts its own triangles in nt */
// This variable is local to each thread.
int64_t nt = 0 ;
/* count the triangles for node j, aka A(:,j) of the adjacency matrix */
//ASSIGINING EACH BLOCKS JOB FOR EACH COLUMN OF MATRIX TO DO THE JOB
//IF THE BLOCK NUMBER IS SMALLER THAN COLUMN NUMBER THEN SOME BLOCK(FULL OF THREADS) JUST NEVER WORK
//ALSO EACH BLOCK DOES WORK ON EVERY NBLOCKS-th BLOCK AND INCREMENTS BY THE NUMBER OF BLOCKS
for (int j = blockIdx.x ; j < n; j+=NBLOCKS) //TODO
{
/* All threads in a threadblock are working on this node j, */
/* equivalently, column A(:,j) of the adjacency matrix A */
/* scatter A(:,j) into Mark, marking all nodes adjacent to node j */
//THIS FOR LOOP, JUST PICKS UP THE INDEX REGION IT NEEDS TO ITERATE OVER
//FOR EACH J, WE GO TO THE SPECIFIC INDEX OF ARRAY Ap[] AND GO UNTIL THE NEXT INDEX OF THE ARRAY
//WE PUT 1 TO THE INDEX OF MARK ARRAY WHERE WE FIND AN EDGE SO THAT WE CAN COUNT THE EDGES NEXT
//USDE THE id VARIABLE BECAUSE id VARIABLE CONTAINS blockidx.x, threadidx.x, threadidx.y SO THAT EACH THREAD IS DOING INDIVIDUAL REGION OF EACH COLUMN
for (int p = Ap[j]+id ; p< Ap[j+1] ; p=p+NTHREADS) //TODO
{
int i = Ai [p] ;
/* edge (i,j) exists in the graph, mark node i */
Mark [i] = 1 ;
}
__syncthreads( );
/* compute sum(C(:,j)) where C=(A*A(:,j))*.(A(:,j)) */
//THESE TWO FOR LOOPS ARE THE MEAT OF THE PROJECT
//THE OUTER FOR LOOP GOING X DIRECTION AND THE INNER FOR LOOP GOING Y DIRECTION OF THE MATRIX
//FOR EACH NODE FOUND IN THE COLUMN(OUTER LOOP), WE SEE HOW MANY MANY FRIEND THIS NODE HAS
//FOR EACH FRIENDS, WE SEE IF THAT FRIEND IS ALSO THE CURRENT NODE'S FRIEND(INNER LOOP)
//IF A FRIEND OF FRIEND IS ALSO MY FRIENDS, THAT MAKES A TRIANGLE, WE INCREMENT nt VARIABLE IF WE DETECT SUCH PHENOMENON
for (int p = Ap[j]+threadIdx.x ; p< Ap[j+1] ; p=p+NX) //TODO
{
int k = Ai [p] ;
/* edge (k,j) exists in the graph */
for (int pa = Ap[k]+threadIdx.y ; pa < Ap[k+1] ; pa=pa+NY) //TODO
{
int i = Ai [pa] ;
/* edge (i,k) exists, count a triangle if (i,j) also exists */
nt += Mark [i] ;
}
}
__syncthreads ( ) ;
/* clear Mark for the next iteration */
//WE CLEAR THE Mark[] ARRAY SAME WAY WE FILLED IT UP
//THIS FOR LOOP IS EXACTLY THE SAME AS ABOVE
for (int p = Ap[j]+id ; p< Ap[j+1] ; p=p+NTHREADS) //TODO
{
int i = Ai [p] ;
/* edge (i,j) exists in the graph, mark node i */
Mark [i] = 0 ;
}
__syncthreads ( ) ;
/* now all of Mark[0..n-1] is all zero again */
// only a few of the entries in Mark have been used in this jth
// iteration.
}
/* each thread copies its result, nt, into a shared array, Ntri */
// Ntri is a shared array of size Ntri[blockDim.y][blockDim.x] ; but size
// must be constant so NY and NX are used. Every thread saves its triangle
// count in this __shared__ array so the results can be summed up for this
// threadblock. This part is done for you:
__shared__ int Ntri [NY][NX] ;
Ntri [threadIdx.y][threadIdx.x] = nt ;
__syncthreads ( ) ;
/* sum up all of Ntri and then one thread writes result to */
/* Ntri_result [blockIdx.x] */
// Now sum up all the triangles found by this threadblock,
// Ntri_result [blockIdx.x] = sum (Ntri). In your first attempt,
// I recommend using thread (0,0) to do this work all by itself.
// But don't stop there, do this reduction in parallel.
// Figure this out yourself.
//TODO
//THIS REUCTION IS DONE IN PARALLEL ONLY BY THREAD 0,0
//THIS TAKES ALL THE COUNTED TRIANGLE BY EACH NODE AND PUTS THEM IN Ntr_result[] WHICH KEEPS COUNT FOR EACH BLOCK
if(id==0){
Ntri_result[blockIdx.x] = 0;
for(int y =0 ;y < NY; y++){
for(int x =0; x < NX ; x++ ){
Ntri_result [blockIdx.x]+= Ntri [y][x];
}
}
__syncthreads ( ) ;
//printf("-----The Ntri_result[] for block %d is %i \n",blockIdx.x , Ntri_result[blockIdx.x]);
}
}
/* call a cuda method and check its error code */
// This is written for you already.
#define OK(method) \
{ \
err = method ; \
if (err != cudaSuccess) \
{ \
printf ("ERROR: line %d\n%s\n", __LINE__, \
cudaGetErrorString (err)) ; \
exit (1) ; \
} \
}
/* -------------------------------------------------------------------------- */
/* tri_gpu: driver function that runs on the host CPU */
/* -------------------------------------------------------------------------- */
int64_t tri_gpu /* # of triangles */
(
const int *Ap, /* node pointers, size n+1 */
const int *Ai, /* adjacency lists, size ne = Ap [n] */
const int n /* number of nodes in the graph */
)
{
cudaError_t err = cudaSuccess ;
/* allocate the graph on the GPU */
// This is written for you already.
int ne = Ap [n] ;
int *d_Ap, *d_Ai ;
OK (cudaMalloc (&d_Ap, (n+1) * sizeof (int))) ;
OK (cudaMalloc (&d_Ai, (ne ) * sizeof (int))) ;
/* copy the graph to the GPU */
//COPYING THE GRAPH FORM THE CPU TO THE GPU
OK (cudaMemcpy (d_Ap, Ap, (n+1) * sizeof(int), cudaMemcpyHostToDevice)); //TODO
OK (cudaMemcpy (d_Ai, Ai, (ne ) * sizeof(int), cudaMemcpyHostToDevice)); //TODO
/* allocate workspace on the GPU */
/* Marks array of size NBLOCKS * n * sizeof (bool), so each */
/* threadblock has its own bool Mark array of size n. */
bool *d_Marks ;
// CREATING d_Marks ARRAY OF SIZE NBLOKS*n*sizeof(bool) IN THE GPU
OK (cudaMalloc (&d_Marks, (NBLOCKS * n * sizeof (bool)))) ; //TODO
/* allocate the result on the GPU */
int64_t *d_ntri ;
// USING CUDAMALLOC TO ALLOCATE THE D_NTRI RESULT ON THE GPU, OF SIZE NBLOCKS
OK (cudaMalloc (&d_ntri, (NBLOCKS*sizeof(int64_t)))); //TODO
// start the timer (optional, if you want to time just the kernel):
// cudaEvent_t start, stop ;
// OK (cudaEventCreate (&start)) ;
// OK (cudaEventCreate (&stop)) ;
// OK (cudaEventRecord (start)) ;
/* launch the kernel */
// this is written for you
tri_kernel <<<NBLOCKS, dim3(NX,NY)>>> (d_Ap, d_Ai, n, d_Marks, d_ntri) ;
OK (cudaGetLastError ( )) ;
// stop the timer (optional, if you want to time just the kernel)
// OK (cudaEventRecord (stop)) ;
// OK (cudaEventSynchronize (stop)) ;
// float milliseconds = 0;
// OK (cudaEventElapsedTime (&milliseconds, start, stop)) ;
// printf ("GPU kernel time: %g sec\n", milliseconds / 1000) ;
/* get the result from the GPU: one value for each threadblock */
int64_t ntri = 0, ntris [NBLOCKS] ;
// GETTING THE D_NTRI ARRAY OF SIZE NBLOCKS FROM THE GPU
OK (cudaMemcpy (ntris, d_ntri, (NBLOCKS*sizeof(int64_t)), cudaMemcpyDeviceToHost )); //TODO
/* free space on the GPU */
// use cudaFree to free all the things you cudaMalloc'd.
// if you fail to do this some problems will run out of memory
//FREEING ALL THE MEMORY I HAVE ALLOCATED
//NOTE: I DIDNT FREE d_ntri AND d_Marks ARRAY HERE COZ THERE WERE NO "TODO" TO DO THAT
OK (cudaFree (d_Ap)) ;
OK (cudaFree (d_Ai )); //TODO
/* sum up the results for all threadblocks */
// the host has the result of each threadblock in ntris[NBLOCKS].
// sum them up here into ntri.
//TODO
//FINAL REDUCTION OF ALL TRIANGLE COUNT FOR ALL BLOCKS
ntri = 0;
for(int x = 0 ; x < NBLOCKS ; x++){
ntri+= ntris[x];
}
/* return the result */
return (ntri) ;
}
|
7d1b5078534d904f9378d5f70a59bc800b578f56.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* sigmoid_data, const Dtype* target, Dtype* scale, Dtype* oriloss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts, float alpha, float gamma) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>( target[ i ] );
if ( has_ignore_label_ && target_value == ignore_label_ ) {
scale[ i ] = 0;
oriloss[ i ] = 0;
counts[ i ] = 0;
}
else {
scale[ i ] = alpha * powf(1 - ( target_value == 1 ? sigmoid_data[ i ] : ( 1 - sigmoid_data[ i ] ) ), gamma);
oriloss[ i ] = -input_data[ i ] * ( target[ i ] - ( input_data[ i ] >= 0 ) ) -
log(1 + exp(input_data[ i ] - 2 * input_data[ i ] *
( input_data[ i ] >= 0 )));
counts[ i ] = 1;
}
}
}
template <typename Dtype>
__global__ void FocalLossBackwardSecondItemGPU(const int nthreads,
const Dtype* input_data, const Dtype* sigmoid_data, const Dtype* target, float alpha, float gamma, Dtype* secondItem) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>( target[ i ] );
Dtype expabsx = expf(input_data[ i ] > 0 ? -input_data[ i ] : input_data[ i ]);
secondItem[ i ] = alpha*gamma*
powf(1 - ( target_value == 1 ? sigmoid_data[ i ] : ( 1 - sigmoid_data[ i ] ) ), gamma - 1) *
expabsx / ( powf(expabsx, 2) + 2 * expabsx + 1 ) *
( target_value == 1 ? -1 : 1 );
}
}
template <typename Dtype>
__global__ void FocalLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>( target[ i ] );
if ( target_value == ignore_label ) {
diff[ i ] = 0;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[ 0 ] = bottom[ 0 ];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[ 0 ]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[ 0 ]->gpu_data();
const Dtype* target = bottom[ 1 ]->gpu_data();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[ 0 ]->mutable_gpu_diff();
Dtype* count_data = bottom[ 1 ]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >( count, input_data, sigmoid_output_data,
target, scaler_.mutable_gpu_data(), scaler_.mutable_gpu_diff(),
has_ignore_label_, ignore_label_, count_data, alpha_, gamma_ );
caffe_gpu_mul(count, scaler_.gpu_data(), scaler_.gpu_diff() , loss_data);
// Only launch another CUDA kernel if we actually need the valid count.
if ( normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_ ) {
caffe_gpu_asum(count, count_data, &valid_count);
}
else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
normalizer_ = get_normalizer(normalization_, valid_count);
top[ 0 ]->mutable_cpu_data()[ 0 ] = loss / normalizer_;
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
// scale_.data := scale .diff := oriloss
if ( propagate_down[ 1 ] ) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if ( propagate_down[ 0 ] ) {
// First, compute the diff
const int count = bottom[ 0 ]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[ 1 ]->gpu_data();
const Dtype* input_data = bottom[ 0 ]->gpu_data();
Dtype* bottom_diff = bottom[ 0 ]->mutable_gpu_diff();
// First item: d(oriloss)*scale
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
caffe_gpu_mul(count, scaler_.gpu_data(), bottom[ 0 ]->gpu_diff(), bottom_diff);
// Second item: oriloss*d(scale)
// save result in scaler_.data
FocalLossBackwardSecondItemGPU<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >( count,
input_data, sigmoid_output_data, target, alpha_, gamma_, scaler_.mutable_gpu_data() );
caffe_gpu_mul(count, scaler_.gpu_data(), scaler_.gpu_diff(), scaler_.mutable_gpu_data());
caffe_gpu_add(count, scaler_.gpu_data(), bottom[ 0 ]->gpu_diff(), bottom_diff);
// Zero out gradient of ignored targets.
if ( has_ignore_label_ ) {
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossIgnoreDiffGPU<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >( count, ignore_label_, target, bottom_diff );
}
// Scale down gradient
Dtype loss_weight = top[ 0 ]->cpu_diff()[ 0 ] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
7d1b5078534d904f9378d5f70a59bc800b578f56.cu
|
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* sigmoid_data, const Dtype* target, Dtype* scale, Dtype* oriloss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts, float alpha, float gamma) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>( target[ i ] );
if ( has_ignore_label_ && target_value == ignore_label_ ) {
scale[ i ] = 0;
oriloss[ i ] = 0;
counts[ i ] = 0;
}
else {
scale[ i ] = alpha * powf(1 - ( target_value == 1 ? sigmoid_data[ i ] : ( 1 - sigmoid_data[ i ] ) ), gamma);
oriloss[ i ] = -input_data[ i ] * ( target[ i ] - ( input_data[ i ] >= 0 ) ) -
log(1 + exp(input_data[ i ] - 2 * input_data[ i ] *
( input_data[ i ] >= 0 )));
counts[ i ] = 1;
}
}
}
template <typename Dtype>
__global__ void FocalLossBackwardSecondItemGPU(const int nthreads,
const Dtype* input_data, const Dtype* sigmoid_data, const Dtype* target, float alpha, float gamma, Dtype* secondItem) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>( target[ i ] );
Dtype expabsx = expf(input_data[ i ] > 0 ? -input_data[ i ] : input_data[ i ]);
secondItem[ i ] = alpha*gamma*
powf(1 - ( target_value == 1 ? sigmoid_data[ i ] : ( 1 - sigmoid_data[ i ] ) ), gamma - 1) *
expabsx / ( powf(expabsx, 2) + 2 * expabsx + 1 ) *
( target_value == 1 ? -1 : 1 );
}
}
template <typename Dtype>
__global__ void FocalLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>( target[ i ] );
if ( target_value == ignore_label ) {
diff[ i ] = 0;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[ 0 ] = bottom[ 0 ];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[ 0 ]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[ 0 ]->gpu_data();
const Dtype* target = bottom[ 1 ]->gpu_data();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[ 0 ]->mutable_gpu_diff();
Dtype* count_data = bottom[ 1 ]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >( count, input_data, sigmoid_output_data,
target, scaler_.mutable_gpu_data(), scaler_.mutable_gpu_diff(),
has_ignore_label_, ignore_label_, count_data, alpha_, gamma_ );
caffe_gpu_mul(count, scaler_.gpu_data(), scaler_.gpu_diff() , loss_data);
// Only launch another CUDA kernel if we actually need the valid count.
if ( normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_ ) {
caffe_gpu_asum(count, count_data, &valid_count);
}
else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
normalizer_ = get_normalizer(normalization_, valid_count);
top[ 0 ]->mutable_cpu_data()[ 0 ] = loss / normalizer_;
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
// scale_.data := scale .diff := oriloss
if ( propagate_down[ 1 ] ) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if ( propagate_down[ 0 ] ) {
// First, compute the diff
const int count = bottom[ 0 ]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[ 1 ]->gpu_data();
const Dtype* input_data = bottom[ 0 ]->gpu_data();
Dtype* bottom_diff = bottom[ 0 ]->mutable_gpu_diff();
// First item: d(oriloss)*scale
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
caffe_gpu_mul(count, scaler_.gpu_data(), bottom[ 0 ]->gpu_diff(), bottom_diff);
// Second item: oriloss*d(scale)
// save result in scaler_.data
FocalLossBackwardSecondItemGPU<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >( count,
input_data, sigmoid_output_data, target, alpha_, gamma_, scaler_.mutable_gpu_data() );
caffe_gpu_mul(count, scaler_.gpu_data(), scaler_.gpu_diff(), scaler_.mutable_gpu_data());
caffe_gpu_add(count, scaler_.gpu_data(), bottom[ 0 ]->gpu_diff(), bottom_diff);
// Zero out gradient of ignored targets.
if ( has_ignore_label_ ) {
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossIgnoreDiffGPU<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >( count, ignore_label_, target, bottom_diff );
}
// Scale down gradient
Dtype loss_weight = top[ 0 ]->cpu_diff()[ 0 ] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
0860279cf35f7415629d81297ce324ce54ef1023.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "./dummy_helper.cuh.cu"
namespace RayTracing
{
template<typename Derived, typename Base, typename ... Args>
__global__
void __AllocInstance(
Base** _texture,
Args ... args
)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
*_texture = new Derived(args...);
}
template<typename Base>
__global__
void __DeallocInstance(Base** _texture)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
delete *_texture;
}
template<typename Derived, typename Base, typename ... Args>
void AllocInstance(
Base** _texture,
Args ... args
)
{
CudaKernelChecker checker;
hipLaunchKernelGGL(( __AllocInstance<Derived, Base, Args...>), dim3(1), dim3(32), 0, 0, _texture, args...);
checker.check("AllocInstance");
}
template<typename Base>
void DeallocInstance(Base** _texture)
{
CudaKernelChecker checker;
hipLaunchKernelGGL(( __DeallocInstance), dim3(1), dim3(32), 0, 0, _texture);
checker.check("DeallocInstance");
}
template<typename Derived, typename Base, typename ... Args>
class CudaHeapObject
{
public:
mutable Base **ptr = nullptr;
public:
CudaHeapObject(Args ... args)
{
checkCudaErrors(hipMalloc(&ptr, sizeof(Base**)));
AllocInstance<Derived>(ptr, args...);
}
~CudaHeapObject()
{
if (ptr == nullptr)
return;
DeallocInstance(ptr);
checkCudaErrors(hipFree(ptr));
}
};
template<typename Derived, typename Base, typename ... Args>
class HeapObject
{
public:
mutable Base **ptr = nullptr;
public:
HeapObject(Args ... args)
{
ptr = new Base*;
*ptr = new Derived(args...);
}
~HeapObject()
{
if (ptr == nullptr)
return;
delete *ptr;
delete ptr;
}
};
} // namespace RayTracing
|
0860279cf35f7415629d81297ce324ce54ef1023.cu
|
#pragma once
#include "./dummy_helper.cuh.cu"
namespace RayTracing
{
template<typename Derived, typename Base, typename ... Args>
__global__
void __AllocInstance(
Base** _texture,
Args ... args
)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
*_texture = new Derived(args...);
}
template<typename Base>
__global__
void __DeallocInstance(Base** _texture)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
delete *_texture;
}
template<typename Derived, typename Base, typename ... Args>
void AllocInstance(
Base** _texture,
Args ... args
)
{
CudaKernelChecker checker;
__AllocInstance<Derived, Base, Args...><<<1, 32>>>(_texture, args...);
checker.check("AllocInstance");
}
template<typename Base>
void DeallocInstance(Base** _texture)
{
CudaKernelChecker checker;
__DeallocInstance<<<1, 32>>>(_texture);
checker.check("DeallocInstance");
}
template<typename Derived, typename Base, typename ... Args>
class CudaHeapObject
{
public:
mutable Base **ptr = nullptr;
public:
CudaHeapObject(Args ... args)
{
checkCudaErrors(cudaMalloc(&ptr, sizeof(Base**)));
AllocInstance<Derived>(ptr, args...);
}
~CudaHeapObject()
{
if (ptr == nullptr)
return;
DeallocInstance(ptr);
checkCudaErrors(cudaFree(ptr));
}
};
template<typename Derived, typename Base, typename ... Args>
class HeapObject
{
public:
mutable Base **ptr = nullptr;
public:
HeapObject(Args ... args)
{
ptr = new Base*;
*ptr = new Derived(args...);
}
~HeapObject()
{
if (ptr == nullptr)
return;
delete *ptr;
delete ptr;
}
};
} // namespace RayTracing
|
7856beb42942320a7c6699ce3eb0406c6523eb2c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/cudnn_ndconv_layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__global__ void sync_ndconv_groups() { }
template <typename Dtype>
void CudnnNdConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
size_t workspace_limit_bytes = this->channels_*sizeof(int);
for (int j = 0; j < this->kernel_shape_.size(); ++j) {
workspace_limit_bytes *= kernel_shape_[j];
}
++workspace_limit_bytes;
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
cudnnConvolutionFwdAlgo_t algo;
// pick the convolution algorithm
// TODO(shelhamer) this should be done during reshape
// TODO(shelhamer) the choice of automatic or manual algorithm picking
// should be exposed in proto
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g],
bottom_descs_[i],
filter_desc_,
conv_descs_[i],
top_descs_[i],
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_limit_bytes, // memoryLimitInBytes,
&algo));
// get minimum size of the workspace needed for the desired algorithm
size_t workspaceSizeInBytes_temp = 0;
CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g],
bottom_descs_[i],
filter_desc_,
conv_descs_[i],
top_descs_[i],
algo,
&workspaceSizeInBytes_temp));
if (workspaceSizeInBytes_temp > workspaceSizeInBytes) {
workspaceSizeInBytes = workspaceSizeInBytes_temp;
// free the existing workspace and allocate a new (larger) one
hipFree(this->workspace);
hipError_t err = hipMalloc(&(this->workspace),
workspaceSizeInBytes);
if (err != hipSuccess) {
// force zero memory path
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM;
workspace = NULL;
workspaceSizeInBytes = 0;
}
}
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
algo, workspace, workspaceSizeInBytes,
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor_v3(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_ndconv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CudnnNdConvolutionLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>&
top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ +
g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_ndconv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CudnnNdConvolutionLayer);
} // namespace caffe
#endif
|
7856beb42942320a7c6699ce3eb0406c6523eb2c.cu
|
#ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/cudnn_ndconv_layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__global__ void sync_ndconv_groups() { }
template <typename Dtype>
void CudnnNdConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
size_t workspace_limit_bytes = this->channels_*sizeof(int);
for (int j = 0; j < this->kernel_shape_.size(); ++j) {
workspace_limit_bytes *= kernel_shape_[j];
}
++workspace_limit_bytes;
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
cudnnConvolutionFwdAlgo_t algo;
// pick the convolution algorithm
// TODO(shelhamer) this should be done during reshape
// TODO(shelhamer) the choice of automatic or manual algorithm picking
// should be exposed in proto
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g],
bottom_descs_[i],
filter_desc_,
conv_descs_[i],
top_descs_[i],
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_limit_bytes, // memoryLimitInBytes,
&algo));
// get minimum size of the workspace needed for the desired algorithm
size_t workspaceSizeInBytes_temp = 0;
CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g],
bottom_descs_[i],
filter_desc_,
conv_descs_[i],
top_descs_[i],
algo,
&workspaceSizeInBytes_temp));
if (workspaceSizeInBytes_temp > workspaceSizeInBytes) {
workspaceSizeInBytes = workspaceSizeInBytes_temp;
// free the existing workspace and allocate a new (larger) one
cudaFree(this->workspace);
cudaError_t err = cudaMalloc(&(this->workspace),
workspaceSizeInBytes);
if (err != cudaSuccess) {
// force zero memory path
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM;
workspace = NULL;
workspaceSizeInBytes = 0;
}
}
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
algo, workspace, workspaceSizeInBytes,
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor_v3(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_ndconv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CudnnNdConvolutionLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>&
top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ +
g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_ndconv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CudnnNdConvolutionLayer);
} // namespace caffe
#endif
|
SOR_host.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "SOR_kernel.hu"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
#include<sys/stat.h>
#include<fcntl.h>
#include<string.h>
#include<errno.h>
const int n1 = 4096, n2 = 4096;
const int nn1 = 4108, nn2 = 4108;
int _jacobi_square(int arr1[nn1][nn2], int arr2[nn1][nn2], int stride, int r, int c){
int total = 0;
for (int row = r - stride; row <= r + stride; row++){
for (int col = c - stride; col <= c + stride; col++){
total += arr1[row][col];
}
}
return total / (stride + stride + 1) / (stride + stride + 1);
}
int _jacobi_cross(int arr1[nn1][nn2], int arr2[nn1][nn2], int stride, int r, int c){
int total = 0;
for (int row = r - stride; row < 0; row++){
total += arr1[row][c];
}
for (int row = 1; row < r + stride; row++){
total += arr1[row][c];
}
for (int col = c - stride; col <= c + stride; col++){
total += arr1[r][col];
}
return total / ((stride + stride + 1) * 2 - 1);
}
void SOR(int len1, int len2, int arr1[nn1][nn2], int arr2[nn1][nn2], int padd, int trial, int stride){
struct timeval tbegin, tend;
gettimeofday(&tbegin, NULL);
if (trial >= 1 && len1 >= padd + 1 && len2 >= padd + 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
int *dev_arr1;
int *dev_arr2;
cudaCheckReturn(hipMalloc((void **) &dev_arr1, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int)));
cudaCheckReturn(hipMalloc((void **) &dev_arr2, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int)));
if ((padd <= 4107 && padd + stride >= 2147483648) || (padd <= 4107 && stride <= -1) || (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647) || (stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647)) {
cudaCheckReturn(hipMemcpy(dev_arr1, arr1, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int), hipMemcpyHostToDevice));
cudaCheckReturn(hipMemcpy(dev_arr2, arr2, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int), hipMemcpyHostToDevice));
}
for (int c0 = 0; c0 < trial; c0 += 2) {
{
dim3 k0_dimBlock(16, 32);
dim3 k0_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192));
hipLaunchKernelGGL(( kernel0) , dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_arr1, dev_arr2, trial, padd, len1, len2, stride, c0);
cudaCheckKernel();
}
{
dim3 k1_dimBlock(16, 32);
dim3 k1_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192));
hipLaunchKernelGGL(( kernel1) , dim3(k1_dimGrid), dim3(k1_dimBlock), 0, 0, dev_arr1, dev_arr2, trial, padd, len1, len2, stride, c0);
cudaCheckKernel();
}
}
if ((padd <= 4107 && padd + stride >= 2147483648) || (padd <= 4107 && stride <= -1) || (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647) || (stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647)) {
cudaCheckReturn(hipMemcpy(arr1, dev_arr1, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int), hipMemcpyDeviceToHost));
cudaCheckReturn(hipMemcpy(arr2, dev_arr2, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_arr1));
cudaCheckReturn(hipFree(dev_arr2));
}
gettimeofday(&tend, NULL);
double tt = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0;
printf("execution time: %lf s\n", tt);
}
int main(){
int trial = 64;
int stride = 3;
int padd = stride * 2;
static int arr1[nn1][nn2];
static int arr2[nn1][nn2];
for (int row = 0; row < nn1; row++){
for (int col = 0; col < nn2; col++){
arr1[row][col] = rand() % 100;
arr2[row][col] = arr1[row][col];
}
}
SOR(n1 + padd, n2 + padd, arr1, arr2, padd, trial, stride);
return 0;
}
|
SOR_host.cu
|
#include <assert.h>
#include <stdio.h>
#include "SOR_kernel.hu"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
#include<sys/stat.h>
#include<fcntl.h>
#include<string.h>
#include<errno.h>
const int n1 = 4096, n2 = 4096;
const int nn1 = 4108, nn2 = 4108;
int _jacobi_square(int arr1[nn1][nn2], int arr2[nn1][nn2], int stride, int r, int c){
int total = 0;
for (int row = r - stride; row <= r + stride; row++){
for (int col = c - stride; col <= c + stride; col++){
total += arr1[row][col];
}
}
return total / (stride + stride + 1) / (stride + stride + 1);
}
int _jacobi_cross(int arr1[nn1][nn2], int arr2[nn1][nn2], int stride, int r, int c){
int total = 0;
for (int row = r - stride; row < 0; row++){
total += arr1[row][c];
}
for (int row = 1; row < r + stride; row++){
total += arr1[row][c];
}
for (int col = c - stride; col <= c + stride; col++){
total += arr1[r][col];
}
return total / ((stride + stride + 1) * 2 - 1);
}
void SOR(int len1, int len2, int arr1[nn1][nn2], int arr2[nn1][nn2], int padd, int trial, int stride){
struct timeval tbegin, tend;
gettimeofday(&tbegin, NULL);
if (trial >= 1 && len1 >= padd + 1 && len2 >= padd + 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
int *dev_arr1;
int *dev_arr2;
cudaCheckReturn(cudaMalloc((void **) &dev_arr1, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int)));
cudaCheckReturn(cudaMalloc((void **) &dev_arr2, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int)));
if ((padd <= 4107 && padd + stride >= 2147483648) || (padd <= 4107 && stride <= -1) || (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647) || (stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647)) {
cudaCheckReturn(cudaMemcpy(dev_arr1, arr1, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int), cudaMemcpyHostToDevice));
cudaCheckReturn(cudaMemcpy(dev_arr2, arr2, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int), cudaMemcpyHostToDevice));
}
for (int c0 = 0; c0 < trial; c0 += 2) {
{
dim3 k0_dimBlock(16, 32);
dim3 k0_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192));
kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_arr1, dev_arr2, trial, padd, len1, len2, stride, c0);
cudaCheckKernel();
}
{
dim3 k1_dimBlock(16, 32);
dim3 k1_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192));
kernel1 <<<k1_dimGrid, k1_dimBlock>>> (dev_arr1, dev_arr2, trial, padd, len1, len2, stride, c0);
cudaCheckKernel();
}
}
if ((padd <= 4107 && padd + stride >= 2147483648) || (padd <= 4107 && stride <= -1) || (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647) || (stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647)) {
cudaCheckReturn(cudaMemcpy(arr1, dev_arr1, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int), cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaMemcpy(arr2, dev_arr2, (len1 + stride >= 2147483648 && stride + 4107 >= padd && padd + stride <= 2147483647 ? 2147483648 : stride >= 0 && stride + 4107 >= padd && len1 + stride <= 2147483647 ? len1 + stride : len1) * (4108) * sizeof(int), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_arr1));
cudaCheckReturn(cudaFree(dev_arr2));
}
gettimeofday(&tend, NULL);
double tt = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0;
printf("execution time: %lf s\n", tt);
}
int main(){
int trial = 64;
int stride = 3;
int padd = stride * 2;
static int arr1[nn1][nn2];
static int arr2[nn1][nn2];
for (int row = 0; row < nn1; row++){
for (int col = 0; col < nn2; col++){
arr1[row][col] = rand() % 100;
arr2[row][col] = arr1[row][col];
}
}
SOR(n1 + padd, n2 + padd, arr1, arr2, padd, trial, stride);
return 0;
}
|
DistributionRandomKernel.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);
}
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);
}
void random_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_kernel(iter, gen);
}
REGISTER_DISPATCH(random_from_to_stub, &random_from_to_kernel);
REGISTER_DISPATCH(random_stub, &random_kernel);
REGISTER_DISPATCH(random_full_64_bits_range_stub, &random_full_64_bits_range_kernel);
}} // namespace at::native
|
DistributionRandomKernel.cu
|
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);
}
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);
}
void random_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_kernel(iter, gen);
}
REGISTER_DISPATCH(random_from_to_stub, &random_from_to_kernel);
REGISTER_DISPATCH(random_stub, &random_kernel);
REGISTER_DISPATCH(random_full_64_bits_range_stub, &random_full_64_bits_range_kernel);
}} // namespace at::native
|
891c7b68a325556c3b8bcd4451c52bb3e9025d46.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Copyright 2011-2012 Karsten Ahnert
Copyright 2011-2013 Mario Mulansky
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#include <iostream>
#include <cmath>
#include <utility>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <boost/numeric/odeint.hpp>
#include <boost/numeric/odeint/external/thrust/thrust.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_real.hpp>
#include <boost/random/variate_generator.hpp>
using namespace std;
using namespace boost::numeric::odeint;
//change this to float if your device does not support double computation
typedef double value_type;
//change this to host_vector< ... > of you want to run on CPU
typedef thrust::device_vector< value_type > state_type;
typedef thrust::device_vector< size_t > index_vector_type;
// typedef thrust::host_vector< value_type > state_type;
// typedef thrust::host_vector< size_t > index_vector_type;
const value_type sigma = 10.0;
const value_type b = 8.0 / 3.0;
//[ thrust_lorenz_parameters_define_simple_system
struct lorenz_system
{
struct lorenz_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
// unpack the parameter we want to vary and the Lorenz variables
value_type R = thrust::get< 3 >( t );
value_type x = thrust::get< 0 >( t );
value_type y = thrust::get< 1 >( t );
value_type z = thrust::get< 2 >( t );
thrust::get< 4 >( t ) = sigma * ( y - x );
thrust::get< 5 >( t ) = R * x - y - x * z;
thrust::get< 6 >( t ) = -b * z + x * y ;
}
};
lorenz_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ) ) ,
lorenz_functor() );
}
size_t m_N;
const state_type &m_beta;
};
//]
struct lorenz_perturbation_system
{
struct lorenz_perturbation_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type R = thrust::get< 1 >( t );
value_type x = thrust::get< 0 >( thrust::get< 0 >( t ) );
value_type y = thrust::get< 1 >( thrust::get< 0 >( t ) );
value_type z = thrust::get< 2 >( thrust::get< 0 >( t ) );
value_type dx = thrust::get< 3 >( thrust::get< 0 >( t ) );
value_type dy = thrust::get< 4 >( thrust::get< 0 >( t ) );
value_type dz = thrust::get< 5 >( thrust::get< 0 >( t ) );
thrust::get< 0 >( thrust::get< 2 >( t ) ) = sigma * ( y - x );
thrust::get< 1 >( thrust::get< 2 >( t ) ) = R * x - y - x * z;
thrust::get< 2 >( thrust::get< 2 >( t ) ) = -b * z + x * y ;
thrust::get< 3 >( thrust::get< 2 >( t ) ) = sigma * ( dy - dx );
thrust::get< 4 >( thrust::get< 2 >( t ) ) = ( R - z ) * dx - dy - x * dz;
thrust::get< 5 >( thrust::get< 2 >( t ) ) = y * dx + x * dy - b * dz;
}
};
lorenz_perturbation_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ) )
) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ,
boost::begin( dxdt ) + 6 * m_N ) )
) ) ,
lorenz_perturbation_functor() );
}
size_t m_N;
const state_type &m_beta;
};
struct lyap_observer
{
//[thrust_lorenz_parameters_observer_functor
struct lyap_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type &dx = thrust::get< 0 >( t );
value_type &dy = thrust::get< 1 >( t );
value_type &dz = thrust::get< 2 >( t );
value_type norm = sqrt( dx * dx + dy * dy + dz * dz );
dx /= norm;
dy /= norm;
dz /= norm;
thrust::get< 3 >( t ) += log( norm );
}
};
//]
lyap_observer( size_t N , size_t every = 100 )
: m_N( N ) , m_lyap( N ) , m_every( every ) , m_count( 0 )
{
thrust::fill( m_lyap.begin() , m_lyap.end() , 0.0 );
}
template< class Lyap >
void fill_lyap( Lyap &lyap )
{
thrust::copy( m_lyap.begin() , m_lyap.end() , lyap.begin() );
for( size_t i=0 ; i<lyap.size() ; ++i )
lyap[i] /= m_t_overall;
}
template< class State >
void operator()( State &x , value_type t )
{
if( ( m_count != 0 ) && ( ( m_count % m_every ) == 0 ) )
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
m_lyap.begin() ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ,
m_lyap.end() ) ) ,
lyap_functor() );
clog << t << "\n";
}
++m_count;
m_t_overall = t;
}
size_t m_N;
state_type m_lyap;
size_t m_every;
size_t m_count;
value_type m_t_overall;
};
const size_t N = 1024*2;
const value_type dt = 0.01;
int main( int arc , char* argv[] )
{
int driver_version , runtime_version;
hipDriverGetVersion( &driver_version );
hipRuntimeGetVersion ( &runtime_version );
cout << driver_version << "\t" << runtime_version << endl;
//[ thrust_lorenz_parameters_define_beta
vector< value_type > beta_host( N );
const value_type beta_min = 0.0 , beta_max = 56.0;
for( size_t i=0 ; i<N ; ++i )
beta_host[i] = beta_min + value_type( i ) * ( beta_max - beta_min ) / value_type( N - 1 );
state_type beta = beta_host;
//]
//[ thrust_lorenz_parameters_integration
state_type x( 6 * N );
// initialize x,y,z
thrust::fill( x.begin() , x.begin() + 3 * N , 10.0 );
// initial dx
thrust::fill( x.begin() + 3 * N , x.begin() + 4 * N , 1.0 );
// initialize dy,dz
thrust::fill( x.begin() + 4 * N , x.end() , 0.0 );
// create error stepper, can be used with make_controlled or make_dense_output
typedef runge_kutta_dopri5< state_type , value_type , state_type , value_type > stepper_type;
lorenz_system lorenz( N , beta );
lorenz_perturbation_system lorenz_perturbation( N , beta );
lyap_observer obs( N , 1 );
// calculate transients
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz , std::make_pair( x.begin() , x.begin() + 3 * N ) , 0.0 , 10.0 , dt );
// calculate the Lyapunov exponents -- the main loop
double t = 0.0;
while( t < 10000.0 )
{
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz_perturbation , x , t , t + 1.0 , 0.1 );
t += 1.0;
obs( x , t );
}
vector< value_type > lyap( N );
obs.fill_lyap( lyap );
for( size_t i=0 ; i<N ; ++i )
cout << beta_host[i] << "\t" << lyap[i] << "\n";
//]
return 0;
}
|
891c7b68a325556c3b8bcd4451c52bb3e9025d46.cu
|
/*
Copyright 2011-2012 Karsten Ahnert
Copyright 2011-2013 Mario Mulansky
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#include <iostream>
#include <cmath>
#include <utility>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <boost/numeric/odeint.hpp>
#include <boost/numeric/odeint/external/thrust/thrust.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_real.hpp>
#include <boost/random/variate_generator.hpp>
using namespace std;
using namespace boost::numeric::odeint;
//change this to float if your device does not support double computation
typedef double value_type;
//change this to host_vector< ... > of you want to run on CPU
typedef thrust::device_vector< value_type > state_type;
typedef thrust::device_vector< size_t > index_vector_type;
// typedef thrust::host_vector< value_type > state_type;
// typedef thrust::host_vector< size_t > index_vector_type;
const value_type sigma = 10.0;
const value_type b = 8.0 / 3.0;
//[ thrust_lorenz_parameters_define_simple_system
struct lorenz_system
{
struct lorenz_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
// unpack the parameter we want to vary and the Lorenz variables
value_type R = thrust::get< 3 >( t );
value_type x = thrust::get< 0 >( t );
value_type y = thrust::get< 1 >( t );
value_type z = thrust::get< 2 >( t );
thrust::get< 4 >( t ) = sigma * ( y - x );
thrust::get< 5 >( t ) = R * x - y - x * z;
thrust::get< 6 >( t ) = -b * z + x * y ;
}
};
lorenz_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ) ) ,
lorenz_functor() );
}
size_t m_N;
const state_type &m_beta;
};
//]
struct lorenz_perturbation_system
{
struct lorenz_perturbation_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type R = thrust::get< 1 >( t );
value_type x = thrust::get< 0 >( thrust::get< 0 >( t ) );
value_type y = thrust::get< 1 >( thrust::get< 0 >( t ) );
value_type z = thrust::get< 2 >( thrust::get< 0 >( t ) );
value_type dx = thrust::get< 3 >( thrust::get< 0 >( t ) );
value_type dy = thrust::get< 4 >( thrust::get< 0 >( t ) );
value_type dz = thrust::get< 5 >( thrust::get< 0 >( t ) );
thrust::get< 0 >( thrust::get< 2 >( t ) ) = sigma * ( y - x );
thrust::get< 1 >( thrust::get< 2 >( t ) ) = R * x - y - x * z;
thrust::get< 2 >( thrust::get< 2 >( t ) ) = -b * z + x * y ;
thrust::get< 3 >( thrust::get< 2 >( t ) ) = sigma * ( dy - dx );
thrust::get< 4 >( thrust::get< 2 >( t ) ) = ( R - z ) * dx - dy - x * dz;
thrust::get< 5 >( thrust::get< 2 >( t ) ) = y * dx + x * dy - b * dz;
}
};
lorenz_perturbation_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ) )
) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ,
boost::begin( dxdt ) + 6 * m_N ) )
) ) ,
lorenz_perturbation_functor() );
}
size_t m_N;
const state_type &m_beta;
};
struct lyap_observer
{
//[thrust_lorenz_parameters_observer_functor
struct lyap_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type &dx = thrust::get< 0 >( t );
value_type &dy = thrust::get< 1 >( t );
value_type &dz = thrust::get< 2 >( t );
value_type norm = sqrt( dx * dx + dy * dy + dz * dz );
dx /= norm;
dy /= norm;
dz /= norm;
thrust::get< 3 >( t ) += log( norm );
}
};
//]
lyap_observer( size_t N , size_t every = 100 )
: m_N( N ) , m_lyap( N ) , m_every( every ) , m_count( 0 )
{
thrust::fill( m_lyap.begin() , m_lyap.end() , 0.0 );
}
template< class Lyap >
void fill_lyap( Lyap &lyap )
{
thrust::copy( m_lyap.begin() , m_lyap.end() , lyap.begin() );
for( size_t i=0 ; i<lyap.size() ; ++i )
lyap[i] /= m_t_overall;
}
template< class State >
void operator()( State &x , value_type t )
{
if( ( m_count != 0 ) && ( ( m_count % m_every ) == 0 ) )
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
m_lyap.begin() ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ,
m_lyap.end() ) ) ,
lyap_functor() );
clog << t << "\n";
}
++m_count;
m_t_overall = t;
}
size_t m_N;
state_type m_lyap;
size_t m_every;
size_t m_count;
value_type m_t_overall;
};
const size_t N = 1024*2;
const value_type dt = 0.01;
int main( int arc , char* argv[] )
{
int driver_version , runtime_version;
cudaDriverGetVersion( &driver_version );
cudaRuntimeGetVersion ( &runtime_version );
cout << driver_version << "\t" << runtime_version << endl;
//[ thrust_lorenz_parameters_define_beta
vector< value_type > beta_host( N );
const value_type beta_min = 0.0 , beta_max = 56.0;
for( size_t i=0 ; i<N ; ++i )
beta_host[i] = beta_min + value_type( i ) * ( beta_max - beta_min ) / value_type( N - 1 );
state_type beta = beta_host;
//]
//[ thrust_lorenz_parameters_integration
state_type x( 6 * N );
// initialize x,y,z
thrust::fill( x.begin() , x.begin() + 3 * N , 10.0 );
// initial dx
thrust::fill( x.begin() + 3 * N , x.begin() + 4 * N , 1.0 );
// initialize dy,dz
thrust::fill( x.begin() + 4 * N , x.end() , 0.0 );
// create error stepper, can be used with make_controlled or make_dense_output
typedef runge_kutta_dopri5< state_type , value_type , state_type , value_type > stepper_type;
lorenz_system lorenz( N , beta );
lorenz_perturbation_system lorenz_perturbation( N , beta );
lyap_observer obs( N , 1 );
// calculate transients
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz , std::make_pair( x.begin() , x.begin() + 3 * N ) , 0.0 , 10.0 , dt );
// calculate the Lyapunov exponents -- the main loop
double t = 0.0;
while( t < 10000.0 )
{
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz_perturbation , x , t , t + 1.0 , 0.1 );
t += 1.0;
obs( x , t );
}
vector< value_type > lyap( N );
obs.fill_lyap( lyap );
for( size_t i=0 ; i<N ; ++i )
cout << beta_host[i] << "\t" << lyap[i] << "\n";
//]
return 0;
}
|
fe830764f3ce71794005a78bc29f8c6f5069fd8f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************
* CUDALERP.cu
* CUDALERP
*
* Author: Kareem Omar
* [email protected]
* https://github.com/komrad36
*
* Last updated Jan 7, 2016
*******************************************************************/
//
// The file CUDALERP.h exposes two extremely high performance GPU
// resize operations,
// CUDALERP (bilinear interpolation), and
// CUDANERP (nearest neighbor interpolation), for 8-bit unsigned
// integer (i.e. grayscale) data.
//
// For 32-bit float data, see the CUDAFLERP project instead.
//
// CUDALERP offers superior accuracy to CUDA's built-in texture
// interpolator at comparable performance. The accuracy if compiled
// with -use-fast-math off is nearly equivalent to my CPU interpolator,
// KLERP, while still being as fast as the built-in interpolation.
//
// Particularly for large images, CUDALERP dramatically outperforms
// even the highly tuned CPU AVX2 versions.
//
// All functionality is contained in the header 'CUDALERP.h' and
// the source file 'CUDALERP.cu' and has no external dependencies at all.
//
// Note that these are intended for computer vision use(hence the speed)
// and are designed for grayscale images.
//
// The file 'main.cpp' is an example and speed test driver.
//
#include "CUDALERP.h"
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDANERP_kernel(const hipTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = y*gys;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = x*gxs;
float res = 255.0f*tex2D<float>(d_img_tex, fx, fy);
if (x < neww) d_out[y*neww + x] = res;
}
}
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDALERP_kernel(const hipTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = (y + 0.5f)*gys - 0.5f;
const float wt_y = fy - floor(fy);
const float invwt_y = 1.0f - wt_y;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = (x + 0.5f)*gxs - 0.5f;
// less accurate and not really much (or any) faster
// -----------------
// const float res = tex2D<float>(d_img_tex, fx, fy);
// -----------------
const float4 f = tex2Dgather<float4>(d_img_tex, fx + 0.5f, fy + 0.5f);
const float wt_x = fx - floor(fx);
const float invwt_x = 1.0f - wt_x;
const float xa = invwt_x*f.w + wt_x*f.z;
const float xb = invwt_x*f.x + wt_x*f.y;
const float res = 255.0f*(invwt_y*xa + wt_y*xb) + 0.5f;
// -----------------
if (x < neww) d_out[y*neww + x] = res;
}
}
void CUDANERP(const hipTextureObject_t d_img_tex, const int oldw, const int oldh, uint8_t* __restrict const d_out, const uint32_t neww, const uint32_t newh) {
const float gxs = static_cast<float>(oldw) / static_cast<float>(neww);
const float gys = static_cast<float>(oldh) / static_cast<float>(newh);
hipLaunchKernelGGL(( CUDANERP_kernel), dim3({((neww - 1) >> 9) + 1), dim3(newh}), 256, 0, d_img_tex, gxs, gys, d_out, neww);
hipDeviceSynchronize();
}
void CUDALERP(const hipTextureObject_t d_img_tex, const int oldw, const int oldh, uint8_t* __restrict const d_out, const uint32_t neww, const uint32_t newh) {
const float gxs = static_cast<float>(oldw) / static_cast<float>(neww);
const float gys = static_cast<float>(oldh) / static_cast<float>(newh);
hipLaunchKernelGGL(( CUDALERP_kernel), dim3({((neww - 1) >> 9) + 1), dim3(newh}), 256, 0, d_img_tex, gxs, gys, d_out, neww);
hipDeviceSynchronize();
}
|
fe830764f3ce71794005a78bc29f8c6f5069fd8f.cu
|
/*******************************************************************
* CUDALERP.cu
* CUDALERP
*
* Author: Kareem Omar
* [email protected]
* https://github.com/komrad36
*
* Last updated Jan 7, 2016
*******************************************************************/
//
// The file CUDALERP.h exposes two extremely high performance GPU
// resize operations,
// CUDALERP (bilinear interpolation), and
// CUDANERP (nearest neighbor interpolation), for 8-bit unsigned
// integer (i.e. grayscale) data.
//
// For 32-bit float data, see the CUDAFLERP project instead.
//
// CUDALERP offers superior accuracy to CUDA's built-in texture
// interpolator at comparable performance. The accuracy if compiled
// with -use-fast-math off is nearly equivalent to my CPU interpolator,
// KLERP, while still being as fast as the built-in interpolation.
//
// Particularly for large images, CUDALERP dramatically outperforms
// even the highly tuned CPU AVX2 versions.
//
// All functionality is contained in the header 'CUDALERP.h' and
// the source file 'CUDALERP.cu' and has no external dependencies at all.
//
// Note that these are intended for computer vision use(hence the speed)
// and are designed for grayscale images.
//
// The file 'main.cpp' is an example and speed test driver.
//
#include "CUDALERP.h"
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDANERP_kernel(const cudaTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = y*gys;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = x*gxs;
float res = 255.0f*tex2D<float>(d_img_tex, fx, fy);
if (x < neww) d_out[y*neww + x] = res;
}
}
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDALERP_kernel(const cudaTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = (y + 0.5f)*gys - 0.5f;
const float wt_y = fy - floor(fy);
const float invwt_y = 1.0f - wt_y;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = (x + 0.5f)*gxs - 0.5f;
// less accurate and not really much (or any) faster
// -----------------
// const float res = tex2D<float>(d_img_tex, fx, fy);
// -----------------
const float4 f = tex2Dgather<float4>(d_img_tex, fx + 0.5f, fy + 0.5f);
const float wt_x = fx - floor(fx);
const float invwt_x = 1.0f - wt_x;
const float xa = invwt_x*f.w + wt_x*f.z;
const float xb = invwt_x*f.x + wt_x*f.y;
const float res = 255.0f*(invwt_y*xa + wt_y*xb) + 0.5f;
// -----------------
if (x < neww) d_out[y*neww + x] = res;
}
}
void CUDANERP(const cudaTextureObject_t d_img_tex, const int oldw, const int oldh, uint8_t* __restrict const d_out, const uint32_t neww, const uint32_t newh) {
const float gxs = static_cast<float>(oldw) / static_cast<float>(neww);
const float gys = static_cast<float>(oldh) / static_cast<float>(newh);
CUDANERP_kernel<<<{((neww - 1) >> 9) + 1, newh}, 256>>>(d_img_tex, gxs, gys, d_out, neww);
cudaDeviceSynchronize();
}
void CUDALERP(const cudaTextureObject_t d_img_tex, const int oldw, const int oldh, uint8_t* __restrict const d_out, const uint32_t neww, const uint32_t newh) {
const float gxs = static_cast<float>(oldw) / static_cast<float>(neww);
const float gys = static_cast<float>(oldh) / static_cast<float>(newh);
CUDALERP_kernel<<<{((neww - 1) >> 9) + 1, newh}, 256>>>(d_img_tex, gxs, gys, d_out, neww);
cudaDeviceSynchronize();
}
|
bc2a1fc86973594c7e2af1f06fc34470a6a7e75b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ssqmm --- Part of the project OPLib 1.0, a high performance pricing library
// based on operator methods, higher level BLAS and multicore architectures
// Author: 2009 Claudio Albanese
// Maintainer: Claudio Albanese <[email protected]>
// Created: April-July 2009
// Version: 1.0.0
// Credits: The CUDA code for SGEMM4, SGEMV4 and SSQMM were inspired by
// Vasily Volkov's implementation of SGEMM
// We use several variations of the multi-threaded Mersenne Twister algorithm of
// period 2203 due to Makoto Matsumoto.
// The Monte Carlo routine in SMC includes code by Victor Podlozhnyuk
// included in the CUDA SDK.
// CPU-side BLAS and random number generators link to primitives in the
// Intel Math Kernel Libraries.
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to
// the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
// Boston, MA 02111-1307, USA.
#define NCOLS 4
#ifdef LINUX
#define __declspec(x)
#define __stdcall
#endif
__device__ void rank1_update( const float a, const float *b, float *c )
{
c[0] += a*b[0];
c[1] += a*b[1];
c[2] += a*b[2];
c[3] += a*b[3];
c[4] += a*b[4];
c[5] += a*b[5];
c[6] += a*b[6];
c[7] += a*b[7];
c[8] += a*b[8];
c[9] += a*b[9];
c[10] += a*b[10];
c[11] += a*b[11];
c[12] += a*b[12];
c[13] += a*b[13];
c[14] += a*b[14];
c[15] += a*b[15];
}
__device__ void rankk_update( int k, const float *A0, int lda, const float *b, int ldb, float *c )
{
if( k <= 0 ) return;
const float *A = A0;
int i = 0;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c );
}
__device__ void store_block2( int num, float *c, float *C0, int ldc )
{
if( num <= 0 ) return;
int i = 0;
float *C = C0;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++];
}
static __global__ void global_ssqmm(const int nb, const int d, const int ni, const unsigned A_i, const unsigned B_i, const unsigned C_i)
{
const int i = blockIdx.x / nb;
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = (blockIdx.x % nb) * 64;
const int iby = blockIdx.y * 16;
const int row = ibx + inx + iny*16;
const int lda = d;
const int ldb = d;
const int ldc = d;
const int m = d;
const int n = d;
int k = d;
const unsigned * Au_i = (unsigned *) A_i;
const unsigned * Bu_i = (unsigned *) B_i;
const unsigned * Cu_i = (unsigned *) C_i;
float * A = (float *)(Au_i[i]);
float * B = (float *)(Bu_i[i]);
float * C = (float *)(Cu_i[i]);
A += ibx + inx + iny * 16;
B += inx + (iby + iny) * ldb;
C += ibx + inx + iny * 16 + iby * ldc;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__shared__ float b[16][17];
for( ; k > 0; k -= 16 )
{
#pragma unroll
for(int j = 0; j < 16; j += 4 ) b[inx][iny+j] = B[j*ldb];
__syncthreads();
if( k < 16 ) break;
#pragma unroll
for(int j = 0; j < 16; j++, A += lda ) rank1_update( A[0], &b[j][0], c );
__syncthreads();
B += 16;
};
rankk_update( k, A, lda, &b[0][0], 17, c );
if( row >= m ) return;
store_block2( n - iby, c, C, ldc);
};
extern "C" void __declspec( dllexport ) opcuda_ssqmm(int d, int ni, unsigned A_i, unsigned B_i, unsigned C_i)
{
const int nb = ((d+63)/64);
dim3 grid( ni * nb, (d+15)/16), threads( 16, 4 );
hipLaunchKernelGGL(( global_ssqmm), dim3(grid), dim3(threads), 0, 0, nb, d, ni, A_i, B_i, C_i);
}
|
bc2a1fc86973594c7e2af1f06fc34470a6a7e75b.cu
|
// ssqmm --- Part of the project OPLib 1.0, a high performance pricing library
// based on operator methods, higher level BLAS and multicore architectures
// Author: 2009 Claudio Albanese
// Maintainer: Claudio Albanese <[email protected]>
// Created: April-July 2009
// Version: 1.0.0
// Credits: The CUDA code for SGEMM4, SGEMV4 and SSQMM were inspired by
// Vasily Volkov's implementation of SGEMM
// We use several variations of the multi-threaded Mersenne Twister algorithm of
// period 2203 due to Makoto Matsumoto.
// The Monte Carlo routine in SMC includes code by Victor Podlozhnyuk
// included in the CUDA SDK.
// CPU-side BLAS and random number generators link to primitives in the
// Intel Math Kernel Libraries.
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to
// the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
// Boston, MA 02111-1307, USA.
#define NCOLS 4
#ifdef LINUX
#define __declspec(x)
#define __stdcall
#endif
__device__ void rank1_update( const float a, const float *b, float *c )
{
c[0] += a*b[0];
c[1] += a*b[1];
c[2] += a*b[2];
c[3] += a*b[3];
c[4] += a*b[4];
c[5] += a*b[5];
c[6] += a*b[6];
c[7] += a*b[7];
c[8] += a*b[8];
c[9] += a*b[9];
c[10] += a*b[10];
c[11] += a*b[11];
c[12] += a*b[12];
c[13] += a*b[13];
c[14] += a*b[14];
c[15] += a*b[15];
}
__device__ void rankk_update( int k, const float *A0, int lda, const float *b, int ldb, float *c )
{
if( k <= 0 ) return;
const float *A = A0;
int i = 0;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c ); if( ++i >= k ) return; A += lda;
rank1_update( A[0], &b[i*ldb], c );
}
__device__ void store_block2( int num, float *c, float *C0, int ldc )
{
if( num <= 0 ) return;
int i = 0;
float *C = C0;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++]; if( i >= num ) return; C += ldc;
C[0] = c[i++];
}
static __global__ void global_ssqmm(const int nb, const int d, const int ni, const unsigned A_i, const unsigned B_i, const unsigned C_i)
{
const int i = blockIdx.x / nb;
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = (blockIdx.x % nb) * 64;
const int iby = blockIdx.y * 16;
const int row = ibx + inx + iny*16;
const int lda = d;
const int ldb = d;
const int ldc = d;
const int m = d;
const int n = d;
int k = d;
const unsigned * Au_i = (unsigned *) A_i;
const unsigned * Bu_i = (unsigned *) B_i;
const unsigned * Cu_i = (unsigned *) C_i;
float * A = (float *)(Au_i[i]);
float * B = (float *)(Bu_i[i]);
float * C = (float *)(Cu_i[i]);
A += ibx + inx + iny * 16;
B += inx + (iby + iny) * ldb;
C += ibx + inx + iny * 16 + iby * ldc;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__shared__ float b[16][17];
for( ; k > 0; k -= 16 )
{
#pragma unroll
for(int j = 0; j < 16; j += 4 ) b[inx][iny+j] = B[j*ldb];
__syncthreads();
if( k < 16 ) break;
#pragma unroll
for(int j = 0; j < 16; j++, A += lda ) rank1_update( A[0], &b[j][0], c );
__syncthreads();
B += 16;
};
rankk_update( k, A, lda, &b[0][0], 17, c );
if( row >= m ) return;
store_block2( n - iby, c, C, ldc);
};
extern "C" void __declspec( dllexport ) opcuda_ssqmm(int d, int ni, unsigned A_i, unsigned B_i, unsigned C_i)
{
const int nb = ((d+63)/64);
dim3 grid( ni * nb, (d+15)/16), threads( 16, 4 );
global_ssqmm<<<grid, threads>>>(nb, d, ni, A_i, B_i, C_i);
}
|
sac_model_1point_plane.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id$
*
*/
#include <pcl/pcl_exports.h>
#include <pcl/cuda/sample_consensus/sac_model_1point_plane.h>
#include <pcl/cuda/common/eigen.h>
#include <pcl/cuda/cutil_math.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/random.h>
#include <hip/hip_vector_types.h>
#include <stdio.h>
#include <limits>
// specify inlier computation method
//#define KINECT_NORMALS
#define KINECT
namespace pcl
{
namespace cuda
{
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
SampleConsensusModel1PointPlane<Storage>::SampleConsensusModel1PointPlane (
const PointCloudConstPtr &cloud) :
SampleConsensusModel<Storage> (cloud)
{
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> void
SampleConsensusModel1PointPlane<Storage>::getSamples (int &iterations, Indices &samples)
{
samples.resize (1);
float trand = indices_->size () / (RAND_MAX + 1.0f);
int idx = (int)(rngl_ () * trand);
samples[0] = (*indices_)[idx];
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::computeModelCoefficients (
const Indices &samples, Coefficients &model_coefficients)
{
if (samples.size () != 1)
return (false);
/* if (isnan ((PointXYZRGB)input_->points[samples[0]]).x ||
isnan ((PointXYZRGB)input_->points[samples[1]]).x ||
isnan ((PointXYZRGB)input_->points[samples[2]]).x)
return (false);*/
float3 normal;
normal.x = 0;
normal.y = 0;
normal.z = -1;
// Compute the plane coefficients
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
if (model_coefficients.size () != 4)
model_coefficients.resize (4);
model_coefficients[0] = mc.x;
model_coefficients[1] = mc.y;
model_coefficients[2] = mc.z;
// ... + d = 0
model_coefficients[3] = -1 * dot (mc, ((PointXYZRGB)input_->points[samples[0]]).xyz);
return (true);
}
__host__ __device__
unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
thrust::tuple <int, float4>
Create1PointPlaneSampleHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = 5;
float trand = (float) nr_indices / (thrust::default_random_engine::max + 1.0f);
//rng.seed (hash (t));
//int sample_point = indices[(int)(rng () * trand)];
int sample_point = indices[(int)(t * trand)];
if (isnan (input[sample_point].x))
return (thrust::make_tuple (sample_point, coeff));
#if 0
//TODO:: kind of important: get normal! :D
int xIdx = sample_point % width_;
int yIdx = sample_point / width_;
//int counter = 1;
int window_size = 3;
int left_index = 0;
int top_index = 0;
// West
if (xIdx >= window_size)
{
left_index = sample_point - window_size;
}
else
{
left_index = sample_point + window_size;
}
// North
if (yIdx >= window_size)
{
top_index = sample_point - window_size * width_;
}
else
{
top_index = sample_point + window_size * width_;
}
float3 left_point;
left_point.x = input[left_index].x - input[sample_point].x;
left_point.y = input[left_index].y - input[sample_point].y;
left_point.z = input[left_index].z - input[sample_point].z;
float3 top_point;
top_point.x = input[top_index].x - input[sample_point].x;
top_point.y = input[top_index].y - input[sample_point].y;
top_point.z = input[top_index].z - input[sample_point].z;
float3 normal = cross (top_point, left_point);
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
if (0 == (normal.x) && 0 == (normal.y) && 0 == (normal.z))
{
//mc.x = mc.y = 0;
if (top_point.x == 0 && top_point.y == 0 && top_point.z == 0)
{
mc.x = 999999;
mc.y = input[top_index].x;
mc.z = input[sample_point].x;
//mc.z = top_index - sample_point;
//mc.z = 999999;
}
else
{
if (left_point.x == 0 && left_point.y == 0 && left_point.z == 0)
{
mc.x = mc.y = 888888;
mc.z = left_index - sample_point;
//mc.z = 888888;
}
}
}
#else
float3 mc = make_float3 (normals_[sample_point].x, normals_[sample_point].y, normals_[sample_point].z);
#endif
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[sample_point].xyz);
return (thrust::make_tuple (sample_point, coeff));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
float4
Create1PointPlaneHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = bad_value;
float trand = nr_indices / (RAND_MAX + 1.0f);
thrust::default_random_engine rng (t);
int sample_point = indices[(int)(rng () * trand)];
if (isnan (input[sample_point].x))
return (coeff);
//TODO:: kind of important: get normal! :D
//int xIdx = sample_point % width;
//int yIdx = sample_point / width;
//float3 b = input[sample_point];
//int counter = 1;
//// West
//if (xIdx < width-window_size)
//{
// b += input[sample_point + window_size];
// counter += 1
//}
//// North
//if (yIdx >= window_size)
//{
// b += input[sample_point - window_size * width];
//}
//// South
//if (yIdx < height-window_size)
//{
// b += input[sample_point + window_size * width];
//}
//// East
//if (xIdx >= window_size)
//{
// b += input[sample_point + window_size];
//}
//// Estimate the XYZ centroid
//compute3DCentroid (cloud, xyz_centroid);
//// Compute the 3x3 covariance matrix
//computeCovarianceMatrix (cloud, xyz_centroid, covariance_matrix);
//// Get the plane normal and surface curvature
//solvePlaneParameters (covariance_matrix, xyz_centroid, plane_parameters, curvature);
//int[5] idxs;
//idxs[0] = sample_point;
// west = sample_point - window_size;
//else
// west = -1;
float3 normal;
normal.x = 0;
normal.y = 0;
normal.z = -1;
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[sample_point].xyz);
return (coeff);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::generateModelHypotheses (
Hypotheses &h, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator (0));
//thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
//index_sequence_begin,
//index_sequence_begin + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
h.begin (),
Create1PointPlaneHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::generateModelHypotheses (
Hypotheses &h, Samples &samples, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
samples.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator (0));
//thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
//index_sequence_begin,
//index_sequence_begin + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
//index_sequence_begin, index_sequence_begin + max_iterations,
thrust::make_zip_iterator (thrust::make_tuple (samples.begin (), h.begin())),
// h.begin (),
Create1PointPlaneSampleHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*normals_)[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
input_->width, input_->height,
indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> bool
CountPlanarInlier::operator () (const Tuple &t)
{
if (!isfinite (thrust::raw_reference_cast(thrust::get<0>(t)).x))
return (false);
//TODO: make threshold adaptive, depending on z
return (std::abs (thrust::raw_reference_cast(thrust::get<0>(t)).x * coefficients.x +
thrust::raw_reference_cast(thrust::get<0>(t)).y * coefficients.y +
thrust::raw_reference_cast(thrust::get<0>(t)).z * coefficients.z + coefficients.w) < threshold);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
NewCheckPlanarInlier<Storage>::operator () (const int &idx)
{
if (idx == -1)
return -1;
PointXYZRGB p = input_[idx];
if (isnan (p.x))
return -1;
if (std::abs (p.x * coefficients.x +
p.y * coefficients.y +
p.z * coefficients.z + coefficients.w) < threshold)
// If inlier, return its position in the vector
return idx;
else
// If outlier, return -1
return -1;
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> int
CheckPlanarInlier::operator () (const Tuple &t)
{
if (thrust::get<1>(t) == -1)
return (-1);
if (isnan (thrust::get<0>(t).x))
return (-1);
// Fill in XYZ (and copy NaNs with it)
float4 pt;
pt.x = thrust::get<0>(t).x;
pt.y = thrust::get<0>(t).y;
pt.z = thrust::get<0>(t).z;
pt.w = 1;
//TODO: make threshold adaptive, depending on z
if (std::abs (dot (pt, coefficients)) < threshold)
// If inlier, return its position in the vector
return (thrust::get<1>(t));
else
// If outlier, return -1
return (-1);
}
int CheckPlanarInlierKinectIndices::operator () (const PointXYZRGB &pt, const int &idx)
{
//if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
// return (-1);
const float b = 0.075f;
const float f = 580.0f/2.0f;
float length_pt = sqrtf (dot (pt, pt));
float dot_n_p = pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z;
float D = - coefficients.w * length_pt / dot_n_p - length_pt;
float orig_disparity = b * f / pt.z;
float actual_disparity = orig_disparity * length_pt / (length_pt + D);
if ((std::abs (actual_disparity - orig_disparity) <= 1.0/6.0) & idx != -1)
return (idx);
else
return -1;
}
template <typename Tuple>
int CheckPlanarInlierKinectNormalIndices::operator () (const Tuple &t, const int &idx)
{
//if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
// return (-1);
const PointXYZRGB &pt = thrust::get<0>(t);
float4 &normal = thrust::get<1>(t);
const float b = 0.075f;
const float f = 580.0f/2.0f;
float length_pt = sqrtf (dot (pt, pt));
float dot_n_p = pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z;
float D = - coefficients.w * length_pt / dot_n_p - length_pt;
float orig_disparity = b * f / pt.z;
float actual_disparity = orig_disparity * length_pt / (length_pt + D);
if ((std::abs (actual_disparity - orig_disparity) <= 1.0/2.0) & (idx != -1)
&
(
std::abs (std::acos (normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z)) < angle_threshold
|
std::abs (std::acos (-(normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z))) < angle_threshold
)
)
return (idx);
else
return -1;
}
template <typename Tuple>
int CheckPlanarInlierNormalIndices::operator () (const Tuple &t, const int &idx)
{
const PointXYZRGB &pt = thrust::get<0>(t);
if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
return (-1);
float4 &normal = thrust::get<1>(t);
//TODO: make threshold adaptive, depending on z
if (std::abs (pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z + coefficients.w) < threshold
&
(
std::abs (std::acos (normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z)) < angle_threshold
|
std::abs (std::acos (-(normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z))) < angle_threshold
)
)
// If inlier, return its position in the vector
return (idx);
else
// If outlier, return -1
return (-1);
}
int CheckPlanarInlierIndices::operator () (const PointXYZRGB &pt, const int &idx)
{
if (idx == -1)
return (-1);
if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z))
return (-1);
//TODO: make threshold adaptive, depending on z
if (std::abs (pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z + coefficients.w) < threshold)
// If inlier, return its position in the vector
return (idx);
else
// If outlier, return -1
return (-1);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::countWithinDistance (
const Coefficients &model_coefficients, float threshold)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModel1PointPlane::countWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
return (int) count_if (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (coefficients, threshold));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::countWithinDistance (
const Hypotheses &h, int idx, float threshold)
{
if (isnan (((float4)h[idx]).x))
return (0);
return (int)
(thrust::count_if (
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())),
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (h[idx], threshold)));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
const Coefficients &model_coefficients, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[SampleConsensusModel1PointPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
int nr_points = (int) indices_->size ();
{
// pcl::ScopeTime t ("Resize inl");
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
{
// pcl::ScopeTime t ("transform");
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
}
{
// pcl::ScopeTime t ("Resize all");
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
}
typename Indices::iterator it;
{
// pcl::ScopeTime t ("copy-if");
// Copy data
it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
//it = remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), -1);
}
{
// pcl::ScopeTime t ("Resize");
inliers->resize (it - inliers->begin ());
}
return (int) inliers->size();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
const Hypotheses &h, int idx, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
/* if (model_coefficients.size () != 4)
{
fprintf (stderr, "[SampleConsensusModel1PointPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return;
}*/
int nr_points = (int) indices_->size ();
{
// pcl::ScopeTime t ("Resize inl");
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
}
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
{
// pcl::ScopeTime t ("transform");
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
}
{
// pcl::ScopeTime t ("Resize all");
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
}
typename Indices::iterator it;
{
// pcl::ScopeTime t ("copy-if");
// Copy data
it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
}
{
// pcl::ScopeTime t ("Resize");
inliers->resize (it - inliers->begin ());
}
return (int) inliers->size ();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
Hypotheses &h, int idx, float threshold, IndicesPtr &inliers_stencil, float3 &c)
{
float angle_threshold = 0.26f;
using namespace thrust;
int nr_points = (int) indices_stencil_->size ();
float bad_point = std::numeric_limits<float>::quiet_NaN ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
// necessary for the transform_if call below (since not all elements get written, we init with -1)..
//inliers_stencil->resize (nr_points, -1);
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
if (isnan (coefficients.x) |
isnan (coefficients.y) |
isnan (coefficients.z) |
isnan (coefficients.w) )
{
c.x = c.y = c.z = 0;
return 0;
}
float3 best_centroid;
IndicesPtr best_inliers_stencil;
float3 centroid;
centroid.x = centroid.y = centroid.z = 0;
best_centroid = centroid;
//ORIG
// transform (
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
// nr_points,
// inliers_stencil->begin (),
// CheckPlanarInlier (coefficients, threshold));
// this is just as fast as the ORIG version, but requires initialization to -1 (see above) --> much slower
// transform_if (
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
// nr_points,
// indices_->begin(),
// inliers_stencil->begin (),
// CheckPlanarInlier (coefficients, threshold),
// isInlier ()
// );
// i forgot why this was slow. but it was. :)
// transform (
// indices_stencil_->begin (),
// indices_stencil_->end(),
// inliers_stencil->begin (),
// NewCheckPlanarInlier<Storage> (coefficients, (float)threshold, input_->points));
// compute inliers
// fastest
#ifdef KINECT
// NOTE: this performs inlier checks with kinect disparity error model, without normal check
transform (
input_->points.begin (), input_->points.end (),
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectIndices (coefficients, threshold, angle_threshold));
#endif
#ifdef KINECT_NORMALS
// NOTE: this performs inlier checks with kinect disparity error model, with normal check
transform (
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())),
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())) + nr_points,
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectNormalIndices (coefficients, threshold, angle_threshold));
#endif
// store inliers here
Indices inliers;
inliers.resize (indices_->size ()); // is this necessary?
typename Indices::iterator last = thrust::remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers.begin (), -1);
inliers.erase (last, inliers.end ());
if (inliers.size () < 1)
return (int) inliers.size ();
best_inliers_stencil = inliers_stencil;
int best_nr_inliers = (int) inliers.size ();
int nr_inliers_after_refit = (int) inliers.size ();
int nr_inliers_before_refit;
int nr_refit_iterations = 0;
do {
nr_inliers_before_refit = nr_inliers_after_refit;
compute3DCentroid (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
centroid);
if (isnan (centroid.x) | isnan (centroid.y) | isnan (centroid.z))
{
std::cerr << "Wow, centroid contains nans!" << std::endl;
inliers_stencil = best_inliers_stencil;
c = make_float3 (bad_point, bad_point, bad_point);
//best_centroid;
return best_nr_inliers;
}
// Note: centroid contains centroid * inliers.size() at this point !
#if 0
std::cerr << "----------------------------------------------------------------------------" << std::endl;
std::cerr << "inliers before: " << inliers.size () << std::endl;
std::cerr << "Centroid: " <<
centroid.x << ", " << centroid.y << ", " << centroid.z << ", " << std::endl;
#endif
CovarianceMatrix covariance_matrix;
computeCovariance (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
covariance_matrix, centroid);
if (isnan (covariance_matrix.data[0].x))
{
std::cerr << "Wow, covariance matrix contains nans!" << std::endl;
inliers_stencil = best_inliers_stencil;
c = make_float3 (bad_point, bad_point, bad_point);
//best_centroid;
return best_nr_inliers;
}
#if 0
std::cerr << "Covariance: " <<
covariance_matrix.data[0].x << ", " << covariance_matrix.data[0].y << ", " << covariance_matrix.data[0].z << std::endl <<
covariance_matrix.data[1].x << ", " << covariance_matrix.data[1].y << ", " << covariance_matrix.data[1].z << std::endl <<
covariance_matrix.data[2].x << ", " << covariance_matrix.data[2].y << ", " << covariance_matrix.data[2].z << std::endl;
#endif
CovarianceMatrix evecs;
float3 evals;
// compute eigenvalues and -vectors
eigen33 (covariance_matrix, evecs, evals);
float3 mc = normalize (evecs.data[0]);
#if 0
std::cerr << "Eigenvectors: " <<
evecs.data[0].x << ", " << evecs.data[0].y << ", " << evecs.data[0].z << std::endl <<
evecs.data[1].x << ", " << evecs.data[1].y << ", " << evecs.data[1].z << std::endl <<
evecs.data[2].x << ", " << evecs.data[2].y << ", " << evecs.data[2].z << std::endl;
std::cerr << "Coefficients before: " <<
coefficients.x << ", " << coefficients.y << ", " << coefficients.z << ", " << coefficients.w << ", " << std::endl;
#endif
// compute plane coefficients from eigenvector corr. to smallest eigenvalue and centroid
coefficients.x = mc.x;
coefficients.y = mc.y;
coefficients.z = mc.z;
// ... + d = 0
coefficients.w = -1 * dot (mc, centroid);
#if 0
std::cerr << "Coefficients after: " <<
coefficients.x << ", " << coefficients.y << ", " << coefficients.z << ", " << coefficients.w << ", " << std::endl;
#endif
// finally, another inlier check:
#ifdef KINECT
transform (
input_->points.begin (), input_->points.end (),
//make_zip_iterator (make_tuple (input_->points.begin (), normals_.begin())),
//make_zip_iterator (make_tuple (input_->points.begin (), normals_.begin())) + nr_points,
// input_->points.begin (),
// input_->points.end (),
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectIndices (coefficients, threshold, angle_threshold));
#endif
#ifdef KINECT_NORMALS
transform (
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())),
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())) + nr_points,
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectNormalIndices (coefficients, threshold, angle_threshold));
#endif
// copy inliers from stencil to inlier vector
inliers.resize (inliers_stencil->size ()); // is this necessary?
last = thrust::remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers.begin (), -1);
inliers.erase (last, inliers.end ());
nr_inliers_after_refit = (int) inliers.size ();
compute3DCentroid (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
centroid);
if (nr_inliers_after_refit > best_nr_inliers)
{
best_nr_inliers = nr_inliers_after_refit;
best_inliers_stencil = inliers_stencil;
best_centroid = centroid;
h[idx] = coefficients;
}
//fprintf (stderr, "iteration %i: %f, %f, %f, %f ---> %i\n", nr_refit_iterations, coefficients.x, coefficients.y, coefficients.z, coefficients.w, best_nr_inliers);
} while (nr_inliers_after_refit > nr_inliers_before_refit & ++nr_refit_iterations < 120);
#if 0
std::cerr << "inliers after: " << nr_inliers_after_refit << std::endl;
#endif
//std::cerr << "--> refitting steps: " << nr_refit_iterations << std::endl;
inliers_stencil = best_inliers_stencil;
c = best_centroid;
return best_nr_inliers;
}
// explicit template instantiation for device and host
template class PCL_EXPORTS SampleConsensusModel1PointPlane<Device>;
template class PCL_EXPORTS SampleConsensusModel1PointPlane<Host>;
} // namespace
} // namespace
|
sac_model_1point_plane.cu
|
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id$
*
*/
#include <pcl/pcl_exports.h>
#include <pcl/cuda/sample_consensus/sac_model_1point_plane.h>
#include <pcl/cuda/common/eigen.h>
#include <pcl/cuda/cutil_math.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/random.h>
#include <vector_types.h>
#include <stdio.h>
#include <limits>
// specify inlier computation method
//#define KINECT_NORMALS
#define KINECT
namespace pcl
{
namespace cuda
{
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
SampleConsensusModel1PointPlane<Storage>::SampleConsensusModel1PointPlane (
const PointCloudConstPtr &cloud) :
SampleConsensusModel<Storage> (cloud)
{
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> void
SampleConsensusModel1PointPlane<Storage>::getSamples (int &iterations, Indices &samples)
{
samples.resize (1);
float trand = indices_->size () / (RAND_MAX + 1.0f);
int idx = (int)(rngl_ () * trand);
samples[0] = (*indices_)[idx];
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::computeModelCoefficients (
const Indices &samples, Coefficients &model_coefficients)
{
if (samples.size () != 1)
return (false);
/* if (isnan ((PointXYZRGB)input_->points[samples[0]]).x ||
isnan ((PointXYZRGB)input_->points[samples[1]]).x ||
isnan ((PointXYZRGB)input_->points[samples[2]]).x)
return (false);*/
float3 normal;
normal.x = 0;
normal.y = 0;
normal.z = -1;
// Compute the plane coefficients
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
if (model_coefficients.size () != 4)
model_coefficients.resize (4);
model_coefficients[0] = mc.x;
model_coefficients[1] = mc.y;
model_coefficients[2] = mc.z;
// ... + d = 0
model_coefficients[3] = -1 * dot (mc, ((PointXYZRGB)input_->points[samples[0]]).xyz);
return (true);
}
__host__ __device__
unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
thrust::tuple <int, float4>
Create1PointPlaneSampleHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = 5;
float trand = (float) nr_indices / (thrust::default_random_engine::max + 1.0f);
//rng.seed (hash (t));
//int sample_point = indices[(int)(rng () * trand)];
int sample_point = indices[(int)(t * trand)];
if (isnan (input[sample_point].x))
return (thrust::make_tuple (sample_point, coeff));
#if 0
//TODO:: kind of important: get normal! :D
int xIdx = sample_point % width_;
int yIdx = sample_point / width_;
//int counter = 1;
int window_size = 3;
int left_index = 0;
int top_index = 0;
// West
if (xIdx >= window_size)
{
left_index = sample_point - window_size;
}
else
{
left_index = sample_point + window_size;
}
// North
if (yIdx >= window_size)
{
top_index = sample_point - window_size * width_;
}
else
{
top_index = sample_point + window_size * width_;
}
float3 left_point;
left_point.x = input[left_index].x - input[sample_point].x;
left_point.y = input[left_index].y - input[sample_point].y;
left_point.z = input[left_index].z - input[sample_point].z;
float3 top_point;
top_point.x = input[top_index].x - input[sample_point].x;
top_point.y = input[top_index].y - input[sample_point].y;
top_point.z = input[top_index].z - input[sample_point].z;
float3 normal = cross (top_point, left_point);
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
if (0 == (normal.x) && 0 == (normal.y) && 0 == (normal.z))
{
//mc.x = mc.y = 0;
if (top_point.x == 0 && top_point.y == 0 && top_point.z == 0)
{
mc.x = 999999;
mc.y = input[top_index].x;
mc.z = input[sample_point].x;
//mc.z = top_index - sample_point;
//mc.z = 999999;
}
else
{
if (left_point.x == 0 && left_point.y == 0 && left_point.z == 0)
{
mc.x = mc.y = 888888;
mc.z = left_index - sample_point;
//mc.z = 888888;
}
}
}
#else
float3 mc = make_float3 (normals_[sample_point].x, normals_[sample_point].y, normals_[sample_point].z);
#endif
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[sample_point].xyz);
return (thrust::make_tuple (sample_point, coeff));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
float4
Create1PointPlaneHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = bad_value;
float trand = nr_indices / (RAND_MAX + 1.0f);
thrust::default_random_engine rng (t);
int sample_point = indices[(int)(rng () * trand)];
if (isnan (input[sample_point].x))
return (coeff);
//TODO:: kind of important: get normal! :D
//int xIdx = sample_point % width;
//int yIdx = sample_point / width;
//float3 b = input[sample_point];
//int counter = 1;
//// West
//if (xIdx < width-window_size)
//{
// b += input[sample_point + window_size];
// counter += 1
//}
//// North
//if (yIdx >= window_size)
//{
// b += input[sample_point - window_size * width];
//}
//// South
//if (yIdx < height-window_size)
//{
// b += input[sample_point + window_size * width];
//}
//// East
//if (xIdx >= window_size)
//{
// b += input[sample_point + window_size];
//}
//// Estimate the XYZ centroid
//compute3DCentroid (cloud, xyz_centroid);
//// Compute the 3x3 covariance matrix
//computeCovarianceMatrix (cloud, xyz_centroid, covariance_matrix);
//// Get the plane normal and surface curvature
//solvePlaneParameters (covariance_matrix, xyz_centroid, plane_parameters, curvature);
//int[5] idxs;
//idxs[0] = sample_point;
// west = sample_point - window_size;
//else
// west = -1;
float3 normal;
normal.x = 0;
normal.y = 0;
normal.z = -1;
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[sample_point].xyz);
return (coeff);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::generateModelHypotheses (
Hypotheses &h, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator (0));
//thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
//index_sequence_begin,
//index_sequence_begin + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
h.begin (),
Create1PointPlaneHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::generateModelHypotheses (
Hypotheses &h, Samples &samples, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
samples.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator (0));
//thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
//index_sequence_begin,
//index_sequence_begin + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
//index_sequence_begin, index_sequence_begin + max_iterations,
thrust::make_zip_iterator (thrust::make_tuple (samples.begin (), h.begin())),
// h.begin (),
Create1PointPlaneSampleHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*normals_)[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
input_->width, input_->height,
indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> bool
CountPlanarInlier::operator () (const Tuple &t)
{
if (!isfinite (thrust::raw_reference_cast(thrust::get<0>(t)).x))
return (false);
//TODO: make threshold adaptive, depending on z
return (std::abs (thrust::raw_reference_cast(thrust::get<0>(t)).x * coefficients.x +
thrust::raw_reference_cast(thrust::get<0>(t)).y * coefficients.y +
thrust::raw_reference_cast(thrust::get<0>(t)).z * coefficients.z + coefficients.w) < threshold);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
NewCheckPlanarInlier<Storage>::operator () (const int &idx)
{
if (idx == -1)
return -1;
PointXYZRGB p = input_[idx];
if (isnan (p.x))
return -1;
if (std::abs (p.x * coefficients.x +
p.y * coefficients.y +
p.z * coefficients.z + coefficients.w) < threshold)
// If inlier, return its position in the vector
return idx;
else
// If outlier, return -1
return -1;
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> int
CheckPlanarInlier::operator () (const Tuple &t)
{
if (thrust::get<1>(t) == -1)
return (-1);
if (isnan (thrust::get<0>(t).x))
return (-1);
// Fill in XYZ (and copy NaNs with it)
float4 pt;
pt.x = thrust::get<0>(t).x;
pt.y = thrust::get<0>(t).y;
pt.z = thrust::get<0>(t).z;
pt.w = 1;
//TODO: make threshold adaptive, depending on z
if (std::abs (dot (pt, coefficients)) < threshold)
// If inlier, return its position in the vector
return (thrust::get<1>(t));
else
// If outlier, return -1
return (-1);
}
int CheckPlanarInlierKinectIndices::operator () (const PointXYZRGB &pt, const int &idx)
{
//if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
// return (-1);
const float b = 0.075f;
const float f = 580.0f/2.0f;
float length_pt = sqrtf (dot (pt, pt));
float dot_n_p = pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z;
float D = - coefficients.w * length_pt / dot_n_p - length_pt;
float orig_disparity = b * f / pt.z;
float actual_disparity = orig_disparity * length_pt / (length_pt + D);
if ((std::abs (actual_disparity - orig_disparity) <= 1.0/6.0) & idx != -1)
return (idx);
else
return -1;
}
template <typename Tuple>
int CheckPlanarInlierKinectNormalIndices::operator () (const Tuple &t, const int &idx)
{
//if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
// return (-1);
const PointXYZRGB &pt = thrust::get<0>(t);
float4 &normal = thrust::get<1>(t);
const float b = 0.075f;
const float f = 580.0f/2.0f;
float length_pt = sqrtf (dot (pt, pt));
float dot_n_p = pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z;
float D = - coefficients.w * length_pt / dot_n_p - length_pt;
float orig_disparity = b * f / pt.z;
float actual_disparity = orig_disparity * length_pt / (length_pt + D);
if ((std::abs (actual_disparity - orig_disparity) <= 1.0/2.0) & (idx != -1)
&
(
std::abs (std::acos (normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z)) < angle_threshold
|
std::abs (std::acos (-(normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z))) < angle_threshold
)
)
return (idx);
else
return -1;
}
template <typename Tuple>
int CheckPlanarInlierNormalIndices::operator () (const Tuple &t, const int &idx)
{
const PointXYZRGB &pt = thrust::get<0>(t);
if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
return (-1);
float4 &normal = thrust::get<1>(t);
//TODO: make threshold adaptive, depending on z
if (std::abs (pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z + coefficients.w) < threshold
&
(
std::abs (std::acos (normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z)) < angle_threshold
|
std::abs (std::acos (-(normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z))) < angle_threshold
)
)
// If inlier, return its position in the vector
return (idx);
else
// If outlier, return -1
return (-1);
}
int CheckPlanarInlierIndices::operator () (const PointXYZRGB &pt, const int &idx)
{
if (idx == -1)
return (-1);
if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z))
return (-1);
//TODO: make threshold adaptive, depending on z
if (std::abs (pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z + coefficients.w) < threshold)
// If inlier, return its position in the vector
return (idx);
else
// If outlier, return -1
return (-1);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::countWithinDistance (
const Coefficients &model_coefficients, float threshold)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModel1PointPlane::countWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
return (int) count_if (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (coefficients, threshold));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::countWithinDistance (
const Hypotheses &h, int idx, float threshold)
{
if (isnan (((float4)h[idx]).x))
return (0);
return (int)
(thrust::count_if (
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())),
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (h[idx], threshold)));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
const Coefficients &model_coefficients, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[SampleConsensusModel1PointPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
int nr_points = (int) indices_->size ();
{
// pcl::ScopeTime t ("Resize inl");
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
{
// pcl::ScopeTime t ("transform");
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
}
{
// pcl::ScopeTime t ("Resize all");
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
}
typename Indices::iterator it;
{
// pcl::ScopeTime t ("copy-if");
// Copy data
it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
//it = remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), -1);
}
{
// pcl::ScopeTime t ("Resize");
inliers->resize (it - inliers->begin ());
}
return (int) inliers->size();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
const Hypotheses &h, int idx, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
/* if (model_coefficients.size () != 4)
{
fprintf (stderr, "[SampleConsensusModel1PointPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return;
}*/
int nr_points = (int) indices_->size ();
{
// pcl::ScopeTime t ("Resize inl");
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
}
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
{
// pcl::ScopeTime t ("transform");
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
}
{
// pcl::ScopeTime t ("Resize all");
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
}
typename Indices::iterator it;
{
// pcl::ScopeTime t ("copy-if");
// Copy data
it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
}
{
// pcl::ScopeTime t ("Resize");
inliers->resize (it - inliers->begin ());
}
return (int) inliers->size ();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
Hypotheses &h, int idx, float threshold, IndicesPtr &inliers_stencil, float3 &c)
{
float angle_threshold = 0.26f;
using namespace thrust;
int nr_points = (int) indices_stencil_->size ();
float bad_point = std::numeric_limits<float>::quiet_NaN ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
// necessary for the transform_if call below (since not all elements get written, we init with -1)..
//inliers_stencil->resize (nr_points, -1);
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
if (isnan (coefficients.x) |
isnan (coefficients.y) |
isnan (coefficients.z) |
isnan (coefficients.w) )
{
c.x = c.y = c.z = 0;
return 0;
}
float3 best_centroid;
IndicesPtr best_inliers_stencil;
float3 centroid;
centroid.x = centroid.y = centroid.z = 0;
best_centroid = centroid;
//ORIG
// transform (
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
// nr_points,
// inliers_stencil->begin (),
// CheckPlanarInlier (coefficients, threshold));
// this is just as fast as the ORIG version, but requires initialization to -1 (see above) --> much slower
// transform_if (
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
// nr_points,
// indices_->begin(),
// inliers_stencil->begin (),
// CheckPlanarInlier (coefficients, threshold),
// isInlier ()
// );
// i forgot why this was slow. but it was. :)
// transform (
// indices_stencil_->begin (),
// indices_stencil_->end(),
// inliers_stencil->begin (),
// NewCheckPlanarInlier<Storage> (coefficients, (float)threshold, input_->points));
// compute inliers
// fastest
#ifdef KINECT
// NOTE: this performs inlier checks with kinect disparity error model, without normal check
transform (
input_->points.begin (), input_->points.end (),
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectIndices (coefficients, threshold, angle_threshold));
#endif
#ifdef KINECT_NORMALS
// NOTE: this performs inlier checks with kinect disparity error model, with normal check
transform (
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())),
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())) + nr_points,
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectNormalIndices (coefficients, threshold, angle_threshold));
#endif
// store inliers here
Indices inliers;
inliers.resize (indices_->size ()); // is this necessary?
typename Indices::iterator last = thrust::remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers.begin (), -1);
inliers.erase (last, inliers.end ());
if (inliers.size () < 1)
return (int) inliers.size ();
best_inliers_stencil = inliers_stencil;
int best_nr_inliers = (int) inliers.size ();
int nr_inliers_after_refit = (int) inliers.size ();
int nr_inliers_before_refit;
int nr_refit_iterations = 0;
do {
nr_inliers_before_refit = nr_inliers_after_refit;
compute3DCentroid (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
centroid);
if (isnan (centroid.x) | isnan (centroid.y) | isnan (centroid.z))
{
std::cerr << "Wow, centroid contains nans!" << std::endl;
inliers_stencil = best_inliers_stencil;
c = make_float3 (bad_point, bad_point, bad_point);
//best_centroid;
return best_nr_inliers;
}
// Note: centroid contains centroid * inliers.size() at this point !
#if 0
std::cerr << "----------------------------------------------------------------------------" << std::endl;
std::cerr << "inliers before: " << inliers.size () << std::endl;
std::cerr << "Centroid: " <<
centroid.x << ", " << centroid.y << ", " << centroid.z << ", " << std::endl;
#endif
CovarianceMatrix covariance_matrix;
computeCovariance (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
covariance_matrix, centroid);
if (isnan (covariance_matrix.data[0].x))
{
std::cerr << "Wow, covariance matrix contains nans!" << std::endl;
inliers_stencil = best_inliers_stencil;
c = make_float3 (bad_point, bad_point, bad_point);
//best_centroid;
return best_nr_inliers;
}
#if 0
std::cerr << "Covariance: " <<
covariance_matrix.data[0].x << ", " << covariance_matrix.data[0].y << ", " << covariance_matrix.data[0].z << std::endl <<
covariance_matrix.data[1].x << ", " << covariance_matrix.data[1].y << ", " << covariance_matrix.data[1].z << std::endl <<
covariance_matrix.data[2].x << ", " << covariance_matrix.data[2].y << ", " << covariance_matrix.data[2].z << std::endl;
#endif
CovarianceMatrix evecs;
float3 evals;
// compute eigenvalues and -vectors
eigen33 (covariance_matrix, evecs, evals);
float3 mc = normalize (evecs.data[0]);
#if 0
std::cerr << "Eigenvectors: " <<
evecs.data[0].x << ", " << evecs.data[0].y << ", " << evecs.data[0].z << std::endl <<
evecs.data[1].x << ", " << evecs.data[1].y << ", " << evecs.data[1].z << std::endl <<
evecs.data[2].x << ", " << evecs.data[2].y << ", " << evecs.data[2].z << std::endl;
std::cerr << "Coefficients before: " <<
coefficients.x << ", " << coefficients.y << ", " << coefficients.z << ", " << coefficients.w << ", " << std::endl;
#endif
// compute plane coefficients from eigenvector corr. to smallest eigenvalue and centroid
coefficients.x = mc.x;
coefficients.y = mc.y;
coefficients.z = mc.z;
// ... + d = 0
coefficients.w = -1 * dot (mc, centroid);
#if 0
std::cerr << "Coefficients after: " <<
coefficients.x << ", " << coefficients.y << ", " << coefficients.z << ", " << coefficients.w << ", " << std::endl;
#endif
// finally, another inlier check:
#ifdef KINECT
transform (
input_->points.begin (), input_->points.end (),
//make_zip_iterator (make_tuple (input_->points.begin (), normals_.begin())),
//make_zip_iterator (make_tuple (input_->points.begin (), normals_.begin())) + nr_points,
// input_->points.begin (),
// input_->points.end (),
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectIndices (coefficients, threshold, angle_threshold));
#endif
#ifdef KINECT_NORMALS
transform (
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())),
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())) + nr_points,
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectNormalIndices (coefficients, threshold, angle_threshold));
#endif
// copy inliers from stencil to inlier vector
inliers.resize (inliers_stencil->size ()); // is this necessary?
last = thrust::remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers.begin (), -1);
inliers.erase (last, inliers.end ());
nr_inliers_after_refit = (int) inliers.size ();
compute3DCentroid (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
centroid);
if (nr_inliers_after_refit > best_nr_inliers)
{
best_nr_inliers = nr_inliers_after_refit;
best_inliers_stencil = inliers_stencil;
best_centroid = centroid;
h[idx] = coefficients;
}
//fprintf (stderr, "iteration %i: %f, %f, %f, %f ---> %i\n", nr_refit_iterations, coefficients.x, coefficients.y, coefficients.z, coefficients.w, best_nr_inliers);
} while (nr_inliers_after_refit > nr_inliers_before_refit & ++nr_refit_iterations < 120);
#if 0
std::cerr << "inliers after: " << nr_inliers_after_refit << std::endl;
#endif
//std::cerr << "--> refitting steps: " << nr_refit_iterations << std::endl;
inliers_stencil = best_inliers_stencil;
c = best_centroid;
return best_nr_inliers;
}
// explicit template instantiation for device and host
template class PCL_EXPORTS SampleConsensusModel1PointPlane<Device>;
template class PCL_EXPORTS SampleConsensusModel1PointPlane<Host>;
} // namespace
} // namespace
|
93636aa26eb208536c41e2a3e7d26ffab535d02f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "bertCommon.h"
#include "common_hip.cuh"
#include "qkvToContextPlugin.h"
#include "serialize.hpp"
#include <cassert>
#include <cstring>
#include <iostream>
#include <tuple>
#include <vector>
#include "fused_multihead_attention_v2.h"
using namespace nvinfer1;
namespace bert
{
template <typename T, int TPB, int VPT>
__global__ void maskedSoftmax(const float rsqrtHeadSize, const T* input, T* output, const int* maskIdx)
{
using BlockReduce = hipcub::BlockReduce<float, TPB>;
union SMem
{
T shm[VPT * TPB];
typename BlockReduce::TempStorage reduce;
SMem() {}
};
__shared__ SMem tmp;
// grid: (NxS, B)
const int b = blockIdx.y;
const int blockOffset = (b * gridDim.x + blockIdx.x) * TPB;
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(TPB, maskIdx[b]);
}
__syncthreads();
float local[VPT];
__shared__ float rZ;
__shared__ float fMax[VPT];
const int idx = (blockOffset + threadIdx.x) * VPT;
T* myshm = &tmp.shm[threadIdx.x * VPT];
copy<sizeof(T) * VPT>(&input[idx], myshm);
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
local[it] = (threadIdx.x < lastValid) ? float(tmp.shm[it * TPB + threadIdx.x]) : -FLT_MAX;
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
float maxElem = BlockReduce(tmp.reduce).Reduce(local[it], hipcub::Max());
if (threadIdx.x == 0)
{
fMax[it] = maxElem;
}
__syncthreads();
}
#pragma unroll
for (int it = 0; it < VPT; it++)
{
local[it] = (threadIdx.x < lastValid) ? myExp<float>(rsqrtHeadSize * (local[it] - fMax[it])) : 0.f;
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
const auto Z = BlockReduce(tmp.reduce).Reduce(local[it], hipcub::Sum());
if (threadIdx.x == 0)
{
rZ = (1.f) / Z;
}
__syncthreads();
local[it] *= rZ;
}
#pragma unroll
for (int it = 0; it < VPT; it++)
{
tmp.shm[it * TPB + threadIdx.x] = local[it];
}
__syncthreads();
copy<sizeof(T) * VPT>(myshm, &output[idx]);
}
template <typename T, int TPB, int VPT>
__global__ void softmax(const float rsqrtHeadSize, const T* input, T* output)
{
float local[VPT];
using BlockReduce = hipcub::BlockReduce<float, TPB>;
union SMem
{
T shm[VPT * TPB];
typename BlockReduce::TempStorage reduce;
SMem() {}
};
__shared__ SMem tmp;
__shared__ float rZ;
__shared__ float fMax[VPT];
const int idx = (TPB * blockIdx.x + threadIdx.x) * VPT;
T* myshm = &tmp.shm[threadIdx.x * VPT];
copy<sizeof(T) * VPT>(&input[idx], myshm);
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
local[it] = float(tmp.shm[it * TPB + threadIdx.x]);
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
float maxElem = BlockReduce(tmp.reduce).Reduce(local[it], hipcub::Max());
if (threadIdx.x == 0)
{
fMax[it] = maxElem;
}
__syncthreads();
}
#pragma unroll
for (int it = 0; it < VPT; it++)
{
local[it] = myExp<float>(rsqrtHeadSize * (local[it] - fMax[it]));
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
const auto Z = BlockReduce(tmp.reduce).Reduce(local[it], hipcub::Sum());
if (threadIdx.x == 0)
{
rZ = 1.f / Z;
}
__syncthreads();
local[it] *= rZ;
}
#pragma unroll
for (int it = 0; it < VPT; it++)
{
tmp.shm[it * TPB + threadIdx.x] = local[it];
}
__syncthreads();
copy<sizeof(T) * VPT>(myshm, &output[idx]);
}
template <typename T, unsigned TPB>
__global__ void scaledSoftmaxKernelSmall(const int ld, const float rsqrtHeadSize, const T* input, T* output)
{
scaledSoftmaxSmall<T, TPB>(ld, ld, rsqrtHeadSize, input, output);
}
template <typename T, unsigned TPB>
__global__ void scaledSoftmaxKernel(const int ld, const float rsqrtHeadSize, const T* input, T* output)
{
scaledSoftmax<T, TPB>(ld, ld, rsqrtHeadSize, input, output);
}
template <typename T>
int computeScaledSoftmax(
hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const T* input, T* output)
{
constexpr int VPT = 16 / sizeof(T);
const dim3 grid(ld * N, B, 1);
if (ld <= 32)
{
const int blockSize = 32;
hipLaunchKernelGGL(( scaledSoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output);
}
else if (ld < 128)
{
const int blockSize = 128;
hipLaunchKernelGGL(( scaledSoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output);
}
else if (ld == 128)
{
const int grid = B * N * ld / (VPT);
hipLaunchKernelGGL(( softmax<T, 128, VPT>), dim3(grid), dim3(128), 0, stream, rsqrtHeadSize, input, output);
}
else if (ld == 384)
{
const int grid = B * N * ld / (VPT);
hipLaunchKernelGGL(( softmax<T, 384, VPT>), dim3(grid), dim3(384), 0, stream, rsqrtHeadSize, input, output);
}
else
{
const int blockSize = 256;
hipLaunchKernelGGL(( scaledSoftmaxKernel<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output);
}
CHECK(hipPeekAtLastError());
return 0;
}
template <typename T, unsigned TPB>
__global__ void maskedScaledSoftmaxKernelSmall(
const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output)
{
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(ld, maskIdx[blockIdx.y]);
}
__syncthreads();
scaledSoftmaxSmall<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output);
}
template <typename T, unsigned TPB>
__global__ void maskedScaledSoftmaxKernel(
const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output)
{
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(ld, maskIdx[blockIdx.y]);
}
__syncthreads();
scaledSoftmax<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output);
}
template <typename T>
int computeMaskedScaledSoftmax(hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize,
const int* maskIdx, const T* input, T* output)
{
// Mask idx is of length B and assumes the valid region is contiguous starting
// from the beginning of the sequence
const dim3 grid(ld * N, B, 1);
// for smaller problems, e.g. BERT base B=1, this is not optimal
if (ld <= 32)
{
constexpr int blockSize = 32;
hipLaunchKernelGGL(( maskedScaledSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output);
}
else if (ld < 128)
{
constexpr int blockSize = 128;
hipLaunchKernelGGL(( maskedScaledSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output);
}
else if (ld == 128)
{
if (B == 1)
{
constexpr int VPT = 4 / sizeof(T);
constexpr int blockSize = 128;
const dim3 grid(ld * N / VPT, B, 1);
hipLaunchKernelGGL(( maskedSoftmax<T, blockSize, VPT>), dim3(grid), dim3(blockSize), 0, stream, rsqrtHeadSize, input, output, maskIdx);
}
else
{
constexpr int VPT = 16 / sizeof(T);
constexpr int blockSize = 128;
const dim3 grid(ld * N / VPT, B, 1);
hipLaunchKernelGGL(( maskedSoftmax<T, blockSize, VPT>), dim3(grid), dim3(blockSize), 0, stream, rsqrtHeadSize, input, output, maskIdx);
}
}
else if (ld == 384)
{
if (B == 1)
{
constexpr int VPT = 4 / sizeof(T);
constexpr int blockSize = 384;
const dim3 grid(ld * N / VPT, B, 1);
hipLaunchKernelGGL(( maskedSoftmax<T, blockSize, VPT>), dim3(grid), dim3(blockSize), 0, stream, rsqrtHeadSize, input, output, maskIdx);
}
else
{
constexpr int VPT = 16 / sizeof(T);
constexpr int blockSize = 384;
const dim3 grid(ld * N / VPT, B, 1);
hipLaunchKernelGGL(( maskedSoftmax<T, blockSize, VPT>), dim3(grid), dim3(blockSize), 0, stream, rsqrtHeadSize, input, output, maskIdx);
}
}
else
{
constexpr int blockSize = 256;
hipLaunchKernelGGL(( maskedScaledSoftmaxKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output);
}
CHECK(hipPeekAtLastError());
return 0;
}
std::pair<int, int> tuneBatchedGemm(const int B, const int S, const int numHeads, const int headSize)
{
const int nruns = 500;
hipblasHandle_t cublas;
hipblasCreate(&cublas);
hipStream_t stream;
hipStreamCreate(&stream);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipblasSetStream(cublas, stream);
cublasSetMathMode(cublas, CUBLAS_TENSOR_OP_MATH);
using T = half;
const int omatSize = S * S;
const int numMats = B * numHeads;
const int ldQKV = 3 * B * numHeads * headSize;
const int strideQKV = 3 * headSize;
const int ldOut = B * numHeads * headSize;
const int strideOut = headSize;
const size_t inBytes = S * B * 3 * numHeads * headSize * sizeof(T);
const size_t qkBytes = S * S * B * numHeads * sizeof(T);
const size_t outBytes = S * B * numHeads * headSize * sizeof(T);
T* input = nullptr;
T* qkptr = nullptr;
T* output = nullptr;
hipMalloc(&input, inBytes);
hipMalloc(&qkptr, qkBytes);
hipMalloc(&output, outBytes);
hipMemset(input, 1, inBytes);
hipMemset(qkptr, 1, qkBytes);
// input: SxBx3xNxH
const T* qptr = input;
const T* kptr = qptr + headSize;
const T* vptr = kptr + headSize;
const int startAlgo = (int) CUBLAS_GEMM_DEFAULT_TENSOR_OP;
const int endAlgo = (int) CUBLAS_GEMM_ALGO15_TENSOR_OP;
int best1 = startAlgo;
int best2 = startAlgo;
float ms1 = 1000000;
float ms2 = 1000000;
for (int a = startAlgo; a <= endAlgo; a++)
{
hipblasGemmAlgo_t algo = static_cast<hipblasGemmAlgo_t>(a);
float ms1_, ms2_;
// qkptr: BxNxSxS
hipEventRecord(start, stream);
for (int r = 0; r < nruns; r++)
{
CUBLASASSERT(hipblasGemmStridedBatchedEx<T>(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, S, S, headSize, T(1.f), kptr,
ldQKV, strideQKV, qptr, ldQKV, strideQKV, T(0.f), qkptr, S, omatSize, numMats, algo));
}
hipEventRecord(stop, stream);
hipStreamSynchronize(stream);
hipEventElapsedTime(&ms1_, start, stop);
if (ms1_ < ms1)
{
best1 = algo;
ms1 = ms1_;
}
// pptr: BxNxSxS
// output: SxBxNxH
hipEventRecord(start, stream);
for (int r = 0; r < nruns; r++)
{
CUBLASASSERT(hipblasGemmStridedBatchedEx<T>(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, headSize, S, S, 1.f, vptr,
ldQKV, strideQKV, qkptr, S, omatSize, 0.f, output, ldOut, strideOut, numMats, algo));
}
hipEventRecord(stop, stream);
hipStreamSynchronize(stream);
hipEventElapsedTime(&ms2_, start, stop);
if (ms2_ < ms2)
{
best2 = algo;
ms2 = ms2_;
}
}
hipFree(input);
hipFree(qkptr);
hipFree(output);
hipEventDestroy(start);
hipEventDestroy(stop);
hipStreamDestroy(stream);
hipblasDestroy(cublas);
return std::make_pair(best1, best2);
}
template int computeScaledSoftmax<float>(hipStream_t stream, const int ld, const int B, const int N,
const float rsqrtHeadSize, const float* input, float* output);
template int computeScaledSoftmax<half>(hipStream_t stream, const int ld, const int B, const int N,
const float rsqrtHeadSize, const half* input, half* output);
template int computeMaskedScaledSoftmax<float>(hipStream_t stream, const int ld, const int B, const int N,
const float rsqrtHeadSize, const int* maskIdx, const float* input, float* output);
template int computeMaskedScaledSoftmax<half>(hipStream_t stream, const int ld, const int B, const int N,
const float rsqrtHeadSize, const int* maskIdx, const half* input, half* output);
size_t MHARunner::getSerializationSize() const
{
return sizeof(mS) + sizeof(mB);
}
void MHARunner::serialize(void* buffer) const
{
serialize_value(&buffer, mS);
serialize_value(&buffer, mB);
}
void MHARunner::deserialize(const void* data, size_t length)
{
deserialize_value(&data, &length, &mS);
deserialize_value(&data, &length, &mB);
setup(mS, mB);
}
UnfusedMHARunner::UnfusedMHARunner(const nvinfer1::DataType type, const int numHeads, const int headSize)
: MHARunner(type, numHeads, headSize)
, mIsBestAlgoFound(false)
, mAlgoBatchedEx1(CUBLAS_GEMM_DEFAULT_TENSOR_OP)
, mAlgoBatchedEx2(CUBLAS_GEMM_DEFAULT_TENSOR_OP)
{
CUBLASASSERT(hipblasCreate(&mCublas));
}
UnfusedMHARunner::~UnfusedMHARunner()
{
CUBLASASSERT(hipblasDestroy(mCublas));
}
size_t UnfusedMHARunner::getSerializationSize() const
{
return sizeof(mAlgoBatchedEx1) + sizeof(mAlgoBatchedEx2) + MHARunner::getSerializationSize();
}
void UnfusedMHARunner::serialize(void* buffer) const
{
serialize_value(&buffer, mAlgoBatchedEx1);
serialize_value(&buffer, mAlgoBatchedEx2);
MHARunner::serialize(buffer);
}
void UnfusedMHARunner::deserialize(const void* data, size_t length)
{
mIsBestAlgoFound = true;
deserialize_value(&data, &length, &mAlgoBatchedEx1);
deserialize_value(&data, &length, &mAlgoBatchedEx2);
MHARunner::deserialize(data, length);
}
void UnfusedMHARunner::setup(const int S, const int B)
{
MHARunner::setup(S, B);
if (mType == DataType::kHALF && !mIsBestAlgoFound)
{
std::tie(mAlgoBatchedEx1, mAlgoBatchedEx2) = tuneBatchedGemm(B, S, mNumHeads, mHeadSize);
mIsBestAlgoFound = true;
gLogVerbose << "QKV Plugin - Selected Algos for batch gemms: " << mAlgoBatchedEx1 << ", " << mAlgoBatchedEx2
<< "\n";
}
}
size_t UnfusedMHARunner::getWorkspaceSize() const
{
return 2UL * mWordSize * mOmatSize * mNumMats;
}
void UnfusedMHARunner::run(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream)
{
this->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], outputs[0], workspace, stream);
}
void UnfusedMHARunner::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, hipStream_t stream)
{
const int* maskIdx = static_cast<const int*>(maskPtr);
hipblasSetStream(mCublas, stream);
// Q, K, V: BxNxSxH (inputs)
// Q * K': BxNxSxS (-> scratch1)
// P: BxNxSxS (-> scratch2)
// P * V: BxNxSxH (output)
if (mType == DataType::kHALF)
{
CublasConfigHelper helper(mCublas);
const half* qptr = static_cast<const half*>(qkvPtr);
const half* kptr = qptr + mHeadSize;
const half* vptr = kptr + mHeadSize;
half* qkptr = static_cast<half*>(workspace);
half* pptr = qkptr + mOmatSize * mNumMats;
half alpha = 1.f;
half beta = 0.f;
CUBLASASSERT(::hipblasGemmStridedBatchedEx(mCublas, HIPBLAS_OP_T, HIPBLAS_OP_N, mS, mS, mHeadSize, &alpha, kptr,
HIP_R_16F, mLdQKV, mStrideQKV, qptr, HIP_R_16F, mLdQKV, mStrideQKV, &beta, qkptr, HIP_R_16F, mS,
mOmatSize, mNumMats, HIP_R_16F, static_cast<hipblasGemmAlgo_t>(mAlgoBatchedEx1)));
// apply softmax
if (maskIdx)
{ // if we have a mask
computeMaskedScaledSoftmax<half>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, maskIdx, qkptr, pptr);
}
else
{ // if we don't have a mask
computeScaledSoftmax<half>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, qkptr, pptr);
}
// compute P*V (as V*P)
CUBLASASSERT(hipblasGemmStridedBatchedEx(mCublas, HIPBLAS_OP_N, HIPBLAS_OP_N, mHeadSize, mS, mS, &alpha, vptr,
HIP_R_16F, mLdQKV, mStrideQKV, pptr, HIP_R_16F, mS, mOmatSize, &beta, output, HIP_R_16F, mLdOut,
mStrideOut, mNumMats, HIP_R_16F, static_cast<hipblasGemmAlgo_t>(mAlgoBatchedEx2)));
}
else
{
const float* qptr = static_cast<const float*>(qkvPtr);
const float* kptr = qptr + mHeadSize;
const float* vptr = kptr + mHeadSize;
float* qkptr = static_cast<float*>(workspace);
float* pptr = qkptr + mOmatSize * mNumMats;
float* outptr = static_cast<float*>(output);
CUBLASASSERT(cublasGemmStridedBatched<float>(mCublas, HIPBLAS_OP_T, HIPBLAS_OP_N, mS, mS, mHeadSize, 1.f, kptr,
mLdQKV, mStrideQKV, qptr, mLdQKV, mStrideQKV, 0.f, qkptr, mS, mOmatSize, mNumMats));
// apply softmax
if (maskIdx)
{ // if we have a mask
computeMaskedScaledSoftmax<float>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, maskIdx, qkptr, pptr);
}
else
{ // if we don't have a mask
computeScaledSoftmax<float>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, qkptr, pptr);
}
CUBLASASSERT(cublasGemmStridedBatched<float>(mCublas, HIPBLAS_OP_N, HIPBLAS_OP_N, mHeadSize, mS, mS, 1.f, vptr,
mLdQKV, mStrideQKV, pptr, mS, mOmatSize, 0.f, outptr, mLdOut, mStrideOut, mNumMats));
}
}
bool UnfusedMHARunner::isValid(int s) const
{
return mType != DataType::kINT8;
}
static inline void set_alpha(uint32_t& alpha, float norm, Data_type dtype)
{
if (dtype == DATA_TYPE_FP16)
{
half2 h2 = __float2half2_rn(norm);
alpha = reinterpret_cast<const uint32_t&>(h2);
}
else if (dtype == DATA_TYPE_FP32)
{
alpha = reinterpret_cast<const uint32_t&>(norm);
}
else if (dtype == DATA_TYPE_INT32)
{
int32_t inorm = static_cast<int32_t>(norm);
alpha = reinterpret_cast<const uint32_t&>(inorm);
}
else
{
assert(false);
}
}
class FusedMHARunnerFP16::mhaImpl
{
public:
mhaImpl(FusedMHARunnerFP16* interface)
: interface(interface)
, sm(interface->mSm)
, xmmaKernel(getXMMAKernels(DATA_TYPE_FP16, sm))
{
memset(¶ms, 0, sizeof(params));
}
~mhaImpl() {}
size_t getPackedMaskSizeInBytes() const
{
// check that we initialized
assert(xmmas_m > 0);
assert(threads_per_cta > 0);
assert(interface->mB > 0);
return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t);
}
void setup(const int S, const int B)
{
// TODO these implementation details might be better centralized into the XMMA code, since they are needed in
// several places (also outside of this plugin)
size_t warps_m, warps_n, warps_k = 1;
if (S == 64 || S == 96 || S == 128)
{
warps_m = 2;
warps_n = 2;
}
else if (S == 384)
{
warps_m = 1;
warps_n = 8;
}
else
{
assert(false && "Unsupporte seqlen");
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
// The number of xmmas in the N dimension.
xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
const float scale_bmm1 = interface->mRsqrtHeadSize;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
Data_type scale_type = DATA_TYPE_FP16;
set_alpha(params.scale_bmm1, scale_bmm1, scale_type);
set_alpha(params.scale_softmax, scale_softmax, scale_type);
set_alpha(params.scale_bmm2, scale_bmm2, scale_type);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = get_size_in_bytes(interface->mLdQKV, DATA_TYPE_FP16);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = get_size_in_bytes(interface->mLdOut, DATA_TYPE_FP16);
}
void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, hipStream_t stream)
{
params.qkv_ptr = const_cast<void*>(qkvPtr);
params.packed_mask_ptr = const_cast<void*>(maskPtr);
params.o_ptr = output;
xmmaKernel->run(params, stream);
CHECK(hipPeekAtLastError());
}
bool isValid(int s) const
{
return xmmaKernel->isValid(s);
}
private:
FusedMHARunnerFP16* interface;
Fused_multihead_attention_params params;
int sm;
const FusedMultiHeadAttentionXMMAKernel* xmmaKernel;
size_t xmmas_m;
size_t xmmas_n;
size_t threads_per_cta;
};
FusedMHARunnerFP16::FusedMHARunnerFP16(const int numHeads, const int headSize, const int sm)
: MHARunner(DataType::kHALF, numHeads, headSize)
, mSm(sm)
, pimpl(new mhaImpl(this))
{
}
void FusedMHARunnerFP16::setup(const int S, const int B)
{
MHARunner::setup(S, B);
pimpl->setup(S, B);
}
size_t FusedMHARunnerFP16::getWorkspaceSize() const
{
return 0;
}
void FusedMHARunnerFP16::deserialize(const void* data, size_t length)
{
MHARunner::deserialize(data, length);
setup(mS, mB);
}
void FusedMHARunnerFP16::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, hipStream_t stream)
{
pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream);
}
void FusedMHARunnerFP16::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream)
{
assert(false && "not implemented");
}
bool FusedMHARunnerFP16::isValid(int s) const
{
return pimpl->isValid(s);
}
// Int8 starts here: TODO refactor the duplicate stuff
class FusedMHARunnerInt8::mhaImpl
{
public:
mhaImpl(FusedMHARunnerInt8* interface)
: interface(interface)
, sm(interface->mSm)
, xmmaKernel(getXMMAKernels(DATA_TYPE_INT8, sm))
, mDqProbs(interface->mDqProbs)
{
memset(¶ms, 0, sizeof(params));
}
~mhaImpl() {}
size_t getPackedMaskSizeInBytes() const
{
assert(xmmas_m > 0);
assert(threads_per_cta > 0);
assert(interface->mB > 0);
return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t);
}
void setup(const int S, const int B)
{
size_t warps_m, warps_n, warps_k = 1;
if (S == 128)
{
warps_m = 2;
warps_n = 2;
}
else if (S == 384)
{
warps_m = 1;
warps_n = 8;
}
else
{
assert(false && "Unsupporte seqlen");
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
// The number of xmmas in the N dimension.
xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = get_size_in_bytes(interface->mLdQKV, DATA_TYPE_INT8);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = get_size_in_bytes(interface->mLdOut, DATA_TYPE_INT8);
}
void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, hipStream_t stream)
{
float scaleQkv = inputDesc.scale;
float scaleCtx = outputDesc.scale;
float scaleBmm1 = scaleQkv * scaleQkv * interface->mRsqrtHeadSize;
float scaleBmm2 = mDqProbs * scaleQkv / scaleCtx;
float scaleSoftmax = 1.f / mDqProbs;
params.scale_bmm1 = reinterpret_cast<const uint32_t&>(scaleBmm1);
params.scale_bmm2 = reinterpret_cast<const uint32_t&>(scaleBmm2);
params.scale_softmax = reinterpret_cast<const uint32_t&>(scaleSoftmax);
params.enable_i2f_trick = -double(1 << 22) * double(scaleBmm2) <= -128.f
&& double(1 << 22) * double(scaleBmm2) >= 127.f;
params.qkv_ptr = const_cast<void*>(qkvPtr);
params.packed_mask_ptr = const_cast<void*>(maskPtr);
params.o_ptr = output;
xmmaKernel->run(params, stream);
CHECK(hipPeekAtLastError());
}
bool isValid(int s) const
{
return xmmaKernel->isValid(s);
}
private:
float mDqProbs;
FusedMHARunnerInt8* interface;
Fused_multihead_attention_params params;
int sm;
const FusedMultiHeadAttentionXMMAKernel* xmmaKernel;
size_t xmmas_m;
size_t xmmas_n;
size_t threads_per_cta;
};
FusedMHARunnerInt8::FusedMHARunnerInt8(const int numHeads, const int headSize, const int sm, const float dqProbs)
: MHARunner(DataType::kINT8, numHeads, headSize)
, mSm(sm)
, pimpl(new mhaImpl(this))
, mDqProbs(dqProbs)
{
}
void FusedMHARunnerInt8::setup(const int S, const int B)
{
MHARunner::setup(S, B);
pimpl->setup(S, B);
}
size_t FusedMHARunnerInt8::getWorkspaceSize() const
{
return 0;
}
void FusedMHARunnerInt8::deserialize(const void* data, size_t length)
{
MHARunner::deserialize(data, length);
setup(mS, mB);
}
void FusedMHARunnerInt8::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, hipStream_t stream)
{
pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream);
}
void FusedMHARunnerInt8::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream)
{
assert(false && "not implemented");
}
bool FusedMHARunnerInt8::isValid(int s) const
{
return pimpl->isValid(s);
}
class FusedMHARunnerFP16v2::mhaImpl
{
public:
mhaImpl(FusedMHARunnerFP16v2* interface)
: interface(interface)
, sm(interface->mSm)
, xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm))
{
assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86) && "Unsupported architecture");
params.clear();
}
~mhaImpl() {}
size_t getPackedMaskSizeInBytes() const
{
// check that we initialized
assert(xmmas_m > 0);
assert(threads_per_cta > 0);
assert(interface->mB > 0);
return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t);
}
void setup(const int S, const int B)
{
// TODO these implementation details might be better centralized into the XMMA code, since they are needed in
// several places (also outside of this plugin)
size_t warps_m, warps_n, warps_k = 1;
if (S == 64 || S == 96 || S == 128)
{
warps_m = 2;
warps_n = 2;
}
else if (S == 256 || S == 192)
{
warps_m = 1;
warps_n = 4;
}
else if (S == 384)
{
warps_m = 1;
warps_n = 8;
}
else
{
assert(false && "Unsupporte seqlen");
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
// The number of xmmas in the N dimension.
xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
const float scale_bmm1 = interface->mRsqrtHeadSize;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
Data_type scale_type = DATA_TYPE_FP16;
set_alpha(params.scale_bmm1, scale_bmm1, scale_type);
set_alpha(params.scale_softmax, scale_softmax, scale_type);
set_alpha(params.scale_bmm2, scale_bmm2, scale_type);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
// mLdQKV = 3 * B * mNumHeads * mHeadSize;
// mLdOut = B * mNumHeads * mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
}
void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, hipStream_t stream)
{
params.qkv_ptr = const_cast<void*>(qkvPtr);
params.packed_mask_ptr = const_cast<void*>(maskPtr);
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr));
xmmaKernel->run(params, stream);
CHECK(hipPeekAtLastError());
}
bool isValid(int s) const
{
return xmmaKernel->isValid(s);
}
private:
FusedMHARunnerFP16v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
size_t xmmas_m;
size_t xmmas_n;
size_t threads_per_cta;
};
FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads, const int headSize, const int sm)
: MHARunner(DataType::kHALF, numHeads, headSize)
, mSm(sm)
, pimpl(new mhaImpl(this))
{
}
void FusedMHARunnerFP16v2::setup(const int S, const int B)
{
MHARunner::setup(S, B);
pimpl->setup(S, B);
}
size_t FusedMHARunnerFP16v2::getWorkspaceSize() const
{
return 0;
}
void FusedMHARunnerFP16v2::deserialize(const void* data, size_t length)
{
MHARunner::deserialize(data, length);
setup(mS, mB);
}
void FusedMHARunnerFP16v2::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc,
const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream)
{
assert(false && "not implemented");
// pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream);
}
void FusedMHARunnerFP16v2::run(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace,
hipStream_t stream)
{
pimpl->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], inputs[2], outputs[0], workspace, stream);
}
bool FusedMHARunnerFP16v2::isValid(int s) const
{
return pimpl->isValid(s);
}
// Int8 starts here: TODO refactor the duplicate stuff
class FusedMHARunnerInt8v2::mhaImpl
{
public:
mhaImpl(FusedMHARunnerInt8v2* interface)
: interface(interface)
, sm(interface->mSm)
, xmmaKernel(getXMMAKernelsV2(DATA_TYPE_INT8, sm))
, mDqProbs(interface->mDqProbs)
{
assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86) && "Unsupported architecture");
params.clear();
}
~mhaImpl() {}
size_t getPackedMaskSizeInBytes() const
{
assert(xmmas_m > 0);
assert(threads_per_cta > 0);
assert(interface->mB > 0);
return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t);
}
void setup(const int S, const int B)
{
size_t warps_m, warps_n, warps_k = 1;
if (S == 128)
{
warps_m = 2;
warps_n = 2;
}
else if (S == 256 || S == 192)
{
warps_m = 1;
warps_n = 4;
}
else if (S == 384)
{
warps_m = 1;
warps_n = 8;
}
else
{
assert(false && "Unsupported seqlen.");
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
// The number of xmmas in the N dimension.
xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.use_int8_scale_max = true;
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(int8_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(int8_t);
}
void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, hipStream_t stream)
{
float scaleQkv = inputDesc.scale;
float scaleCtx = outputDesc.scale;
float scaleBmm1 = scaleQkv * scaleQkv * interface->mRsqrtHeadSize;
float scaleBmm2 = mDqProbs * scaleQkv / scaleCtx;
float scaleSoftmax = 1.f / mDqProbs;
params.scale_bmm1 = reinterpret_cast<const uint32_t&>(scaleBmm1);
params.scale_bmm2 = reinterpret_cast<const uint32_t&>(scaleBmm2);
params.scale_softmax = reinterpret_cast<const uint32_t&>(scaleSoftmax);
params.enable_i2f_trick
= -double(1 << 22) * double(scaleBmm2) <= -128.f && double(1 << 22) * double(scaleBmm2) >= 127.f;
params.qkv_ptr = const_cast<void*>(qkvPtr);
params.packed_mask_ptr = const_cast<void*>(maskPtr);
params.use_int8_scale_max = true;
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr));
xmmaKernel->run(params, stream);
CHECK(hipPeekAtLastError());
}
bool isValid(int s) const
{
return xmmaKernel->isValid(s);
}
private:
float mDqProbs;
FusedMHARunnerInt8v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
size_t xmmas_m;
size_t xmmas_n;
size_t threads_per_cta;
};
FusedMHARunnerInt8v2::FusedMHARunnerInt8v2(const int numHeads, const int headSize, const int sm, const float dqProbs)
: MHARunner(DataType::kINT8, numHeads, headSize)
, mSm(sm)
, pimpl(new mhaImpl(this))
, mDqProbs(dqProbs)
{
}
void FusedMHARunnerInt8v2::setup(const int S, const int B)
{
MHARunner::setup(S, B);
pimpl->setup(S, B);
}
size_t FusedMHARunnerInt8v2::getWorkspaceSize() const
{
return 0;
}
void FusedMHARunnerInt8v2::deserialize(const void* data, size_t length)
{
MHARunner::deserialize(data, length);
setup(mS, mB);
}
void FusedMHARunnerInt8v2::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc,
const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream)
{
assert(false && "Not implemented");
}
void FusedMHARunnerInt8v2::run(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace,
hipStream_t stream)
{
pimpl->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], inputs[2], outputs[0], workspace, stream);
}
bool FusedMHARunnerInt8v2::isValid(int s) const
{
return pimpl->isValid(s);
}
} // namespace bert
|
93636aa26eb208536c41e2a3e7d26ffab535d02f.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "bertCommon.h"
#include "common.cuh"
#include "qkvToContextPlugin.h"
#include "serialize.hpp"
#include <cassert>
#include <cstring>
#include <iostream>
#include <tuple>
#include <vector>
#include "fused_multihead_attention_v2.h"
using namespace nvinfer1;
namespace bert
{
template <typename T, int TPB, int VPT>
__global__ void maskedSoftmax(const float rsqrtHeadSize, const T* input, T* output, const int* maskIdx)
{
using BlockReduce = cub::BlockReduce<float, TPB>;
union SMem
{
T shm[VPT * TPB];
typename BlockReduce::TempStorage reduce;
SMem() {}
};
__shared__ SMem tmp;
// grid: (NxS, B)
const int b = blockIdx.y;
const int blockOffset = (b * gridDim.x + blockIdx.x) * TPB;
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(TPB, maskIdx[b]);
}
__syncthreads();
float local[VPT];
__shared__ float rZ;
__shared__ float fMax[VPT];
const int idx = (blockOffset + threadIdx.x) * VPT;
T* myshm = &tmp.shm[threadIdx.x * VPT];
copy<sizeof(T) * VPT>(&input[idx], myshm);
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
local[it] = (threadIdx.x < lastValid) ? float(tmp.shm[it * TPB + threadIdx.x]) : -FLT_MAX;
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
float maxElem = BlockReduce(tmp.reduce).Reduce(local[it], cub::Max());
if (threadIdx.x == 0)
{
fMax[it] = maxElem;
}
__syncthreads();
}
#pragma unroll
for (int it = 0; it < VPT; it++)
{
local[it] = (threadIdx.x < lastValid) ? myExp<float>(rsqrtHeadSize * (local[it] - fMax[it])) : 0.f;
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
const auto Z = BlockReduce(tmp.reduce).Reduce(local[it], cub::Sum());
if (threadIdx.x == 0)
{
rZ = (1.f) / Z;
}
__syncthreads();
local[it] *= rZ;
}
#pragma unroll
for (int it = 0; it < VPT; it++)
{
tmp.shm[it * TPB + threadIdx.x] = local[it];
}
__syncthreads();
copy<sizeof(T) * VPT>(myshm, &output[idx]);
}
template <typename T, int TPB, int VPT>
__global__ void softmax(const float rsqrtHeadSize, const T* input, T* output)
{
float local[VPT];
using BlockReduce = cub::BlockReduce<float, TPB>;
union SMem
{
T shm[VPT * TPB];
typename BlockReduce::TempStorage reduce;
SMem() {}
};
__shared__ SMem tmp;
__shared__ float rZ;
__shared__ float fMax[VPT];
const int idx = (TPB * blockIdx.x + threadIdx.x) * VPT;
T* myshm = &tmp.shm[threadIdx.x * VPT];
copy<sizeof(T) * VPT>(&input[idx], myshm);
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
local[it] = float(tmp.shm[it * TPB + threadIdx.x]);
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
float maxElem = BlockReduce(tmp.reduce).Reduce(local[it], cub::Max());
if (threadIdx.x == 0)
{
fMax[it] = maxElem;
}
__syncthreads();
}
#pragma unroll
for (int it = 0; it < VPT; it++)
{
local[it] = myExp<float>(rsqrtHeadSize * (local[it] - fMax[it]));
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
const auto Z = BlockReduce(tmp.reduce).Reduce(local[it], cub::Sum());
if (threadIdx.x == 0)
{
rZ = 1.f / Z;
}
__syncthreads();
local[it] *= rZ;
}
#pragma unroll
for (int it = 0; it < VPT; it++)
{
tmp.shm[it * TPB + threadIdx.x] = local[it];
}
__syncthreads();
copy<sizeof(T) * VPT>(myshm, &output[idx]);
}
template <typename T, unsigned TPB>
__global__ void scaledSoftmaxKernelSmall(const int ld, const float rsqrtHeadSize, const T* input, T* output)
{
scaledSoftmaxSmall<T, TPB>(ld, ld, rsqrtHeadSize, input, output);
}
template <typename T, unsigned TPB>
__global__ void scaledSoftmaxKernel(const int ld, const float rsqrtHeadSize, const T* input, T* output)
{
scaledSoftmax<T, TPB>(ld, ld, rsqrtHeadSize, input, output);
}
template <typename T>
int computeScaledSoftmax(
cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const T* input, T* output)
{
constexpr int VPT = 16 / sizeof(T);
const dim3 grid(ld * N, B, 1);
if (ld <= 32)
{
const int blockSize = 32;
scaledSoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output);
}
else if (ld < 128)
{
const int blockSize = 128;
scaledSoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output);
}
else if (ld == 128)
{
const int grid = B * N * ld / (VPT);
softmax<T, 128, VPT><<<grid, 128, 0, stream>>>(rsqrtHeadSize, input, output);
}
else if (ld == 384)
{
const int grid = B * N * ld / (VPT);
softmax<T, 384, VPT><<<grid, 384, 0, stream>>>(rsqrtHeadSize, input, output);
}
else
{
const int blockSize = 256;
scaledSoftmaxKernel<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output);
}
CHECK(cudaPeekAtLastError());
return 0;
}
template <typename T, unsigned TPB>
__global__ void maskedScaledSoftmaxKernelSmall(
const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output)
{
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(ld, maskIdx[blockIdx.y]);
}
__syncthreads();
scaledSoftmaxSmall<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output);
}
template <typename T, unsigned TPB>
__global__ void maskedScaledSoftmaxKernel(
const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output)
{
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(ld, maskIdx[blockIdx.y]);
}
__syncthreads();
scaledSoftmax<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output);
}
template <typename T>
int computeMaskedScaledSoftmax(cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize,
const int* maskIdx, const T* input, T* output)
{
// Mask idx is of length B and assumes the valid region is contiguous starting
// from the beginning of the sequence
const dim3 grid(ld * N, B, 1);
// for smaller problems, e.g. BERT base B=1, this is not optimal
if (ld <= 32)
{
constexpr int blockSize = 32;
maskedScaledSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output);
}
else if (ld < 128)
{
constexpr int blockSize = 128;
maskedScaledSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output);
}
else if (ld == 128)
{
if (B == 1)
{
constexpr int VPT = 4 / sizeof(T);
constexpr int blockSize = 128;
const dim3 grid(ld * N / VPT, B, 1);
maskedSoftmax<T, blockSize, VPT><<<grid, blockSize, 0, stream>>>(rsqrtHeadSize, input, output, maskIdx);
}
else
{
constexpr int VPT = 16 / sizeof(T);
constexpr int blockSize = 128;
const dim3 grid(ld * N / VPT, B, 1);
maskedSoftmax<T, blockSize, VPT><<<grid, blockSize, 0, stream>>>(rsqrtHeadSize, input, output, maskIdx);
}
}
else if (ld == 384)
{
if (B == 1)
{
constexpr int VPT = 4 / sizeof(T);
constexpr int blockSize = 384;
const dim3 grid(ld * N / VPT, B, 1);
maskedSoftmax<T, blockSize, VPT><<<grid, blockSize, 0, stream>>>(rsqrtHeadSize, input, output, maskIdx);
}
else
{
constexpr int VPT = 16 / sizeof(T);
constexpr int blockSize = 384;
const dim3 grid(ld * N / VPT, B, 1);
maskedSoftmax<T, blockSize, VPT><<<grid, blockSize, 0, stream>>>(rsqrtHeadSize, input, output, maskIdx);
}
}
else
{
constexpr int blockSize = 256;
maskedScaledSoftmaxKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output);
}
CHECK(cudaPeekAtLastError());
return 0;
}
std::pair<int, int> tuneBatchedGemm(const int B, const int S, const int numHeads, const int headSize)
{
const int nruns = 500;
cublasHandle_t cublas;
cublasCreate(&cublas);
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cublasSetStream(cublas, stream);
cublasSetMathMode(cublas, CUBLAS_TENSOR_OP_MATH);
using T = half;
const int omatSize = S * S;
const int numMats = B * numHeads;
const int ldQKV = 3 * B * numHeads * headSize;
const int strideQKV = 3 * headSize;
const int ldOut = B * numHeads * headSize;
const int strideOut = headSize;
const size_t inBytes = S * B * 3 * numHeads * headSize * sizeof(T);
const size_t qkBytes = S * S * B * numHeads * sizeof(T);
const size_t outBytes = S * B * numHeads * headSize * sizeof(T);
T* input = nullptr;
T* qkptr = nullptr;
T* output = nullptr;
cudaMalloc(&input, inBytes);
cudaMalloc(&qkptr, qkBytes);
cudaMalloc(&output, outBytes);
cudaMemset(input, 1, inBytes);
cudaMemset(qkptr, 1, qkBytes);
// input: SxBx3xNxH
const T* qptr = input;
const T* kptr = qptr + headSize;
const T* vptr = kptr + headSize;
const int startAlgo = (int) CUBLAS_GEMM_DEFAULT_TENSOR_OP;
const int endAlgo = (int) CUBLAS_GEMM_ALGO15_TENSOR_OP;
int best1 = startAlgo;
int best2 = startAlgo;
float ms1 = 1000000;
float ms2 = 1000000;
for (int a = startAlgo; a <= endAlgo; a++)
{
cublasGemmAlgo_t algo = static_cast<cublasGemmAlgo_t>(a);
float ms1_, ms2_;
// qkptr: BxNxSxS
cudaEventRecord(start, stream);
for (int r = 0; r < nruns; r++)
{
CUBLASASSERT(cublasGemmStridedBatchedEx<T>(cublas, CUBLAS_OP_T, CUBLAS_OP_N, S, S, headSize, T(1.f), kptr,
ldQKV, strideQKV, qptr, ldQKV, strideQKV, T(0.f), qkptr, S, omatSize, numMats, algo));
}
cudaEventRecord(stop, stream);
cudaStreamSynchronize(stream);
cudaEventElapsedTime(&ms1_, start, stop);
if (ms1_ < ms1)
{
best1 = algo;
ms1 = ms1_;
}
// pptr: BxNxSxS
// output: SxBxNxH
cudaEventRecord(start, stream);
for (int r = 0; r < nruns; r++)
{
CUBLASASSERT(cublasGemmStridedBatchedEx<T>(cublas, CUBLAS_OP_N, CUBLAS_OP_N, headSize, S, S, 1.f, vptr,
ldQKV, strideQKV, qkptr, S, omatSize, 0.f, output, ldOut, strideOut, numMats, algo));
}
cudaEventRecord(stop, stream);
cudaStreamSynchronize(stream);
cudaEventElapsedTime(&ms2_, start, stop);
if (ms2_ < ms2)
{
best2 = algo;
ms2 = ms2_;
}
}
cudaFree(input);
cudaFree(qkptr);
cudaFree(output);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamDestroy(stream);
cublasDestroy(cublas);
return std::make_pair(best1, best2);
}
template int computeScaledSoftmax<float>(cudaStream_t stream, const int ld, const int B, const int N,
const float rsqrtHeadSize, const float* input, float* output);
template int computeScaledSoftmax<half>(cudaStream_t stream, const int ld, const int B, const int N,
const float rsqrtHeadSize, const half* input, half* output);
template int computeMaskedScaledSoftmax<float>(cudaStream_t stream, const int ld, const int B, const int N,
const float rsqrtHeadSize, const int* maskIdx, const float* input, float* output);
template int computeMaskedScaledSoftmax<half>(cudaStream_t stream, const int ld, const int B, const int N,
const float rsqrtHeadSize, const int* maskIdx, const half* input, half* output);
size_t MHARunner::getSerializationSize() const
{
return sizeof(mS) + sizeof(mB);
}
void MHARunner::serialize(void* buffer) const
{
serialize_value(&buffer, mS);
serialize_value(&buffer, mB);
}
void MHARunner::deserialize(const void* data, size_t length)
{
deserialize_value(&data, &length, &mS);
deserialize_value(&data, &length, &mB);
setup(mS, mB);
}
UnfusedMHARunner::UnfusedMHARunner(const nvinfer1::DataType type, const int numHeads, const int headSize)
: MHARunner(type, numHeads, headSize)
, mIsBestAlgoFound(false)
, mAlgoBatchedEx1(CUBLAS_GEMM_DEFAULT_TENSOR_OP)
, mAlgoBatchedEx2(CUBLAS_GEMM_DEFAULT_TENSOR_OP)
{
CUBLASASSERT(cublasCreate(&mCublas));
}
UnfusedMHARunner::~UnfusedMHARunner()
{
CUBLASASSERT(cublasDestroy(mCublas));
}
size_t UnfusedMHARunner::getSerializationSize() const
{
return sizeof(mAlgoBatchedEx1) + sizeof(mAlgoBatchedEx2) + MHARunner::getSerializationSize();
}
void UnfusedMHARunner::serialize(void* buffer) const
{
serialize_value(&buffer, mAlgoBatchedEx1);
serialize_value(&buffer, mAlgoBatchedEx2);
MHARunner::serialize(buffer);
}
void UnfusedMHARunner::deserialize(const void* data, size_t length)
{
mIsBestAlgoFound = true;
deserialize_value(&data, &length, &mAlgoBatchedEx1);
deserialize_value(&data, &length, &mAlgoBatchedEx2);
MHARunner::deserialize(data, length);
}
void UnfusedMHARunner::setup(const int S, const int B)
{
MHARunner::setup(S, B);
if (mType == DataType::kHALF && !mIsBestAlgoFound)
{
std::tie(mAlgoBatchedEx1, mAlgoBatchedEx2) = tuneBatchedGemm(B, S, mNumHeads, mHeadSize);
mIsBestAlgoFound = true;
gLogVerbose << "QKV Plugin - Selected Algos for batch gemms: " << mAlgoBatchedEx1 << ", " << mAlgoBatchedEx2
<< "\n";
}
}
size_t UnfusedMHARunner::getWorkspaceSize() const
{
return 2UL * mWordSize * mOmatSize * mNumMats;
}
void UnfusedMHARunner::run(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
{
this->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], outputs[0], workspace, stream);
}
void UnfusedMHARunner::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, cudaStream_t stream)
{
const int* maskIdx = static_cast<const int*>(maskPtr);
cublasSetStream(mCublas, stream);
// Q, K, V: BxNxSxH (inputs)
// Q * K': BxNxSxS (-> scratch1)
// P: BxNxSxS (-> scratch2)
// P * V: BxNxSxH (output)
if (mType == DataType::kHALF)
{
CublasConfigHelper helper(mCublas);
const half* qptr = static_cast<const half*>(qkvPtr);
const half* kptr = qptr + mHeadSize;
const half* vptr = kptr + mHeadSize;
half* qkptr = static_cast<half*>(workspace);
half* pptr = qkptr + mOmatSize * mNumMats;
half alpha = 1.f;
half beta = 0.f;
CUBLASASSERT(::cublasGemmStridedBatchedEx(mCublas, CUBLAS_OP_T, CUBLAS_OP_N, mS, mS, mHeadSize, &alpha, kptr,
CUDA_R_16F, mLdQKV, mStrideQKV, qptr, CUDA_R_16F, mLdQKV, mStrideQKV, &beta, qkptr, CUDA_R_16F, mS,
mOmatSize, mNumMats, CUDA_R_16F, static_cast<cublasGemmAlgo_t>(mAlgoBatchedEx1)));
// apply softmax
if (maskIdx)
{ // if we have a mask
computeMaskedScaledSoftmax<half>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, maskIdx, qkptr, pptr);
}
else
{ // if we don't have a mask
computeScaledSoftmax<half>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, qkptr, pptr);
}
// compute P*V (as V*P)
CUBLASASSERT(cublasGemmStridedBatchedEx(mCublas, CUBLAS_OP_N, CUBLAS_OP_N, mHeadSize, mS, mS, &alpha, vptr,
CUDA_R_16F, mLdQKV, mStrideQKV, pptr, CUDA_R_16F, mS, mOmatSize, &beta, output, CUDA_R_16F, mLdOut,
mStrideOut, mNumMats, CUDA_R_16F, static_cast<cublasGemmAlgo_t>(mAlgoBatchedEx2)));
}
else
{
const float* qptr = static_cast<const float*>(qkvPtr);
const float* kptr = qptr + mHeadSize;
const float* vptr = kptr + mHeadSize;
float* qkptr = static_cast<float*>(workspace);
float* pptr = qkptr + mOmatSize * mNumMats;
float* outptr = static_cast<float*>(output);
CUBLASASSERT(cublasGemmStridedBatched<float>(mCublas, CUBLAS_OP_T, CUBLAS_OP_N, mS, mS, mHeadSize, 1.f, kptr,
mLdQKV, mStrideQKV, qptr, mLdQKV, mStrideQKV, 0.f, qkptr, mS, mOmatSize, mNumMats));
// apply softmax
if (maskIdx)
{ // if we have a mask
computeMaskedScaledSoftmax<float>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, maskIdx, qkptr, pptr);
}
else
{ // if we don't have a mask
computeScaledSoftmax<float>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, qkptr, pptr);
}
CUBLASASSERT(cublasGemmStridedBatched<float>(mCublas, CUBLAS_OP_N, CUBLAS_OP_N, mHeadSize, mS, mS, 1.f, vptr,
mLdQKV, mStrideQKV, pptr, mS, mOmatSize, 0.f, outptr, mLdOut, mStrideOut, mNumMats));
}
}
bool UnfusedMHARunner::isValid(int s) const
{
return mType != DataType::kINT8;
}
static inline void set_alpha(uint32_t& alpha, float norm, Data_type dtype)
{
if (dtype == DATA_TYPE_FP16)
{
half2 h2 = __float2half2_rn(norm);
alpha = reinterpret_cast<const uint32_t&>(h2);
}
else if (dtype == DATA_TYPE_FP32)
{
alpha = reinterpret_cast<const uint32_t&>(norm);
}
else if (dtype == DATA_TYPE_INT32)
{
int32_t inorm = static_cast<int32_t>(norm);
alpha = reinterpret_cast<const uint32_t&>(inorm);
}
else
{
assert(false);
}
}
class FusedMHARunnerFP16::mhaImpl
{
public:
mhaImpl(FusedMHARunnerFP16* interface)
: interface(interface)
, sm(interface->mSm)
, xmmaKernel(getXMMAKernels(DATA_TYPE_FP16, sm))
{
memset(¶ms, 0, sizeof(params));
}
~mhaImpl() {}
size_t getPackedMaskSizeInBytes() const
{
// check that we initialized
assert(xmmas_m > 0);
assert(threads_per_cta > 0);
assert(interface->mB > 0);
return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t);
}
void setup(const int S, const int B)
{
// TODO these implementation details might be better centralized into the XMMA code, since they are needed in
// several places (also outside of this plugin)
size_t warps_m, warps_n, warps_k = 1;
if (S == 64 || S == 96 || S == 128)
{
warps_m = 2;
warps_n = 2;
}
else if (S == 384)
{
warps_m = 1;
warps_n = 8;
}
else
{
assert(false && "Unsupporte seqlen");
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
// The number of xmmas in the N dimension.
xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
const float scale_bmm1 = interface->mRsqrtHeadSize;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
Data_type scale_type = DATA_TYPE_FP16;
set_alpha(params.scale_bmm1, scale_bmm1, scale_type);
set_alpha(params.scale_softmax, scale_softmax, scale_type);
set_alpha(params.scale_bmm2, scale_bmm2, scale_type);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = get_size_in_bytes(interface->mLdQKV, DATA_TYPE_FP16);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = get_size_in_bytes(interface->mLdOut, DATA_TYPE_FP16);
}
void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, cudaStream_t stream)
{
params.qkv_ptr = const_cast<void*>(qkvPtr);
params.packed_mask_ptr = const_cast<void*>(maskPtr);
params.o_ptr = output;
xmmaKernel->run(params, stream);
CHECK(cudaPeekAtLastError());
}
bool isValid(int s) const
{
return xmmaKernel->isValid(s);
}
private:
FusedMHARunnerFP16* interface;
Fused_multihead_attention_params params;
int sm;
const FusedMultiHeadAttentionXMMAKernel* xmmaKernel;
size_t xmmas_m;
size_t xmmas_n;
size_t threads_per_cta;
};
FusedMHARunnerFP16::FusedMHARunnerFP16(const int numHeads, const int headSize, const int sm)
: MHARunner(DataType::kHALF, numHeads, headSize)
, mSm(sm)
, pimpl(new mhaImpl(this))
{
}
void FusedMHARunnerFP16::setup(const int S, const int B)
{
MHARunner::setup(S, B);
pimpl->setup(S, B);
}
size_t FusedMHARunnerFP16::getWorkspaceSize() const
{
return 0;
}
void FusedMHARunnerFP16::deserialize(const void* data, size_t length)
{
MHARunner::deserialize(data, length);
setup(mS, mB);
}
void FusedMHARunnerFP16::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, cudaStream_t stream)
{
pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream);
}
void FusedMHARunnerFP16::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
{
assert(false && "not implemented");
}
bool FusedMHARunnerFP16::isValid(int s) const
{
return pimpl->isValid(s);
}
// Int8 starts here: TODO refactor the duplicate stuff
class FusedMHARunnerInt8::mhaImpl
{
public:
mhaImpl(FusedMHARunnerInt8* interface)
: interface(interface)
, sm(interface->mSm)
, xmmaKernel(getXMMAKernels(DATA_TYPE_INT8, sm))
, mDqProbs(interface->mDqProbs)
{
memset(¶ms, 0, sizeof(params));
}
~mhaImpl() {}
size_t getPackedMaskSizeInBytes() const
{
assert(xmmas_m > 0);
assert(threads_per_cta > 0);
assert(interface->mB > 0);
return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t);
}
void setup(const int S, const int B)
{
size_t warps_m, warps_n, warps_k = 1;
if (S == 128)
{
warps_m = 2;
warps_n = 2;
}
else if (S == 384)
{
warps_m = 1;
warps_n = 8;
}
else
{
assert(false && "Unsupporte seqlen");
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
// The number of xmmas in the N dimension.
xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = get_size_in_bytes(interface->mLdQKV, DATA_TYPE_INT8);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = get_size_in_bytes(interface->mLdOut, DATA_TYPE_INT8);
}
void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, cudaStream_t stream)
{
float scaleQkv = inputDesc.scale;
float scaleCtx = outputDesc.scale;
float scaleBmm1 = scaleQkv * scaleQkv * interface->mRsqrtHeadSize;
float scaleBmm2 = mDqProbs * scaleQkv / scaleCtx;
float scaleSoftmax = 1.f / mDqProbs;
params.scale_bmm1 = reinterpret_cast<const uint32_t&>(scaleBmm1);
params.scale_bmm2 = reinterpret_cast<const uint32_t&>(scaleBmm2);
params.scale_softmax = reinterpret_cast<const uint32_t&>(scaleSoftmax);
params.enable_i2f_trick = -double(1 << 22) * double(scaleBmm2) <= -128.f
&& double(1 << 22) * double(scaleBmm2) >= 127.f;
params.qkv_ptr = const_cast<void*>(qkvPtr);
params.packed_mask_ptr = const_cast<void*>(maskPtr);
params.o_ptr = output;
xmmaKernel->run(params, stream);
CHECK(cudaPeekAtLastError());
}
bool isValid(int s) const
{
return xmmaKernel->isValid(s);
}
private:
float mDqProbs;
FusedMHARunnerInt8* interface;
Fused_multihead_attention_params params;
int sm;
const FusedMultiHeadAttentionXMMAKernel* xmmaKernel;
size_t xmmas_m;
size_t xmmas_n;
size_t threads_per_cta;
};
FusedMHARunnerInt8::FusedMHARunnerInt8(const int numHeads, const int headSize, const int sm, const float dqProbs)
: MHARunner(DataType::kINT8, numHeads, headSize)
, mSm(sm)
, pimpl(new mhaImpl(this))
, mDqProbs(dqProbs)
{
}
void FusedMHARunnerInt8::setup(const int S, const int B)
{
MHARunner::setup(S, B);
pimpl->setup(S, B);
}
size_t FusedMHARunnerInt8::getWorkspaceSize() const
{
return 0;
}
void FusedMHARunnerInt8::deserialize(const void* data, size_t length)
{
MHARunner::deserialize(data, length);
setup(mS, mB);
}
void FusedMHARunnerInt8::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, void* output, void* workspace, cudaStream_t stream)
{
pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream);
}
void FusedMHARunnerInt8::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
{
assert(false && "not implemented");
}
bool FusedMHARunnerInt8::isValid(int s) const
{
return pimpl->isValid(s);
}
class FusedMHARunnerFP16v2::mhaImpl
{
public:
mhaImpl(FusedMHARunnerFP16v2* interface)
: interface(interface)
, sm(interface->mSm)
, xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm))
{
assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86) && "Unsupported architecture");
params.clear();
}
~mhaImpl() {}
size_t getPackedMaskSizeInBytes() const
{
// check that we initialized
assert(xmmas_m > 0);
assert(threads_per_cta > 0);
assert(interface->mB > 0);
return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t);
}
void setup(const int S, const int B)
{
// TODO these implementation details might be better centralized into the XMMA code, since they are needed in
// several places (also outside of this plugin)
size_t warps_m, warps_n, warps_k = 1;
if (S == 64 || S == 96 || S == 128)
{
warps_m = 2;
warps_n = 2;
}
else if (S == 256 || S == 192)
{
warps_m = 1;
warps_n = 4;
}
else if (S == 384)
{
warps_m = 1;
warps_n = 8;
}
else
{
assert(false && "Unsupporte seqlen");
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
// The number of xmmas in the N dimension.
xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
const float scale_bmm1 = interface->mRsqrtHeadSize;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
Data_type scale_type = DATA_TYPE_FP16;
set_alpha(params.scale_bmm1, scale_bmm1, scale_type);
set_alpha(params.scale_softmax, scale_softmax, scale_type);
set_alpha(params.scale_bmm2, scale_bmm2, scale_type);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
// mLdQKV = 3 * B * mNumHeads * mHeadSize;
// mLdOut = B * mNumHeads * mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
}
void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, cudaStream_t stream)
{
params.qkv_ptr = const_cast<void*>(qkvPtr);
params.packed_mask_ptr = const_cast<void*>(maskPtr);
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr));
xmmaKernel->run(params, stream);
CHECK(cudaPeekAtLastError());
}
bool isValid(int s) const
{
return xmmaKernel->isValid(s);
}
private:
FusedMHARunnerFP16v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
size_t xmmas_m;
size_t xmmas_n;
size_t threads_per_cta;
};
FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads, const int headSize, const int sm)
: MHARunner(DataType::kHALF, numHeads, headSize)
, mSm(sm)
, pimpl(new mhaImpl(this))
{
}
void FusedMHARunnerFP16v2::setup(const int S, const int B)
{
MHARunner::setup(S, B);
pimpl->setup(S, B);
}
size_t FusedMHARunnerFP16v2::getWorkspaceSize() const
{
return 0;
}
void FusedMHARunnerFP16v2::deserialize(const void* data, size_t length)
{
MHARunner::deserialize(data, length);
setup(mS, mB);
}
void FusedMHARunnerFP16v2::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc,
const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream)
{
assert(false && "not implemented");
// pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream);
}
void FusedMHARunnerFP16v2::run(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace,
cudaStream_t stream)
{
pimpl->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], inputs[2], outputs[0], workspace, stream);
}
bool FusedMHARunnerFP16v2::isValid(int s) const
{
return pimpl->isValid(s);
}
// Int8 starts here: TODO refactor the duplicate stuff
class FusedMHARunnerInt8v2::mhaImpl
{
public:
mhaImpl(FusedMHARunnerInt8v2* interface)
: interface(interface)
, sm(interface->mSm)
, xmmaKernel(getXMMAKernelsV2(DATA_TYPE_INT8, sm))
, mDqProbs(interface->mDqProbs)
{
assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86) && "Unsupported architecture");
params.clear();
}
~mhaImpl() {}
size_t getPackedMaskSizeInBytes() const
{
assert(xmmas_m > 0);
assert(threads_per_cta > 0);
assert(interface->mB > 0);
return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t);
}
void setup(const int S, const int B)
{
size_t warps_m, warps_n, warps_k = 1;
if (S == 128)
{
warps_m = 2;
warps_n = 2;
}
else if (S == 256 || S == 192)
{
warps_m = 1;
warps_n = 4;
}
else if (S == 384)
{
warps_m = 1;
warps_n = 8;
}
else
{
assert(false && "Unsupported seqlen.");
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
// The number of xmmas in the N dimension.
xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.use_int8_scale_max = true;
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(int8_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(int8_t);
}
void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr,
const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, cudaStream_t stream)
{
float scaleQkv = inputDesc.scale;
float scaleCtx = outputDesc.scale;
float scaleBmm1 = scaleQkv * scaleQkv * interface->mRsqrtHeadSize;
float scaleBmm2 = mDqProbs * scaleQkv / scaleCtx;
float scaleSoftmax = 1.f / mDqProbs;
params.scale_bmm1 = reinterpret_cast<const uint32_t&>(scaleBmm1);
params.scale_bmm2 = reinterpret_cast<const uint32_t&>(scaleBmm2);
params.scale_softmax = reinterpret_cast<const uint32_t&>(scaleSoftmax);
params.enable_i2f_trick
= -double(1 << 22) * double(scaleBmm2) <= -128.f && double(1 << 22) * double(scaleBmm2) >= 127.f;
params.qkv_ptr = const_cast<void*>(qkvPtr);
params.packed_mask_ptr = const_cast<void*>(maskPtr);
params.use_int8_scale_max = true;
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr));
xmmaKernel->run(params, stream);
CHECK(cudaPeekAtLastError());
}
bool isValid(int s) const
{
return xmmaKernel->isValid(s);
}
private:
float mDqProbs;
FusedMHARunnerInt8v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
size_t xmmas_m;
size_t xmmas_n;
size_t threads_per_cta;
};
FusedMHARunnerInt8v2::FusedMHARunnerInt8v2(const int numHeads, const int headSize, const int sm, const float dqProbs)
: MHARunner(DataType::kINT8, numHeads, headSize)
, mSm(sm)
, pimpl(new mhaImpl(this))
, mDqProbs(dqProbs)
{
}
void FusedMHARunnerInt8v2::setup(const int S, const int B)
{
MHARunner::setup(S, B);
pimpl->setup(S, B);
}
size_t FusedMHARunnerInt8v2::getWorkspaceSize() const
{
return 0;
}
void FusedMHARunnerInt8v2::deserialize(const void* data, size_t length)
{
MHARunner::deserialize(data, length);
setup(mS, mB);
}
void FusedMHARunnerInt8v2::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc,
const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream)
{
assert(false && "Not implemented");
}
void FusedMHARunnerInt8v2::run(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace,
cudaStream_t stream)
{
pimpl->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], inputs[2], outputs[0], workspace, stream);
}
bool FusedMHARunnerInt8v2::isValid(int s) const
{
return pimpl->isValid(s);
}
} // namespace bert
|
fa0e95c42cfbac605aaecc5350cd744c9da2a5f7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
#include <stdalign.h>
__global__ void f_kernel(int *d_flag){
int tid = blockIdx.x*blockDim.x+threadIdx.x;
int flag = 1;
if(threadIdx.x == 0)
*d_flag = 0;
}
__attribute__((noreturn)); void f(void){
int *d_flag;
int flag = 1;
hipMalloc((void**)&d_flag, sizeof(int));
hipMemcpy(d_flag,0,sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( f_kernel), dim3(1),dim3(1), 0, 0, d_flag);
hipMemcpy(&flag, d_flag, sizeof(int), hipMemcpyDeviceToHost);
}
int main(void)
{
f();
//printf("%d\n", f());
return 0;
}
//;
|
fa0e95c42cfbac605aaecc5350cd744c9da2a5f7.cu
|
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
#include <stdalign.h>
__global__ void f_kernel(int *d_flag){
int tid = blockIdx.x*blockDim.x+threadIdx.x;
int flag = 1;
if(threadIdx.x == 0)
*d_flag = 0;
}
__attribute__((noreturn)); void f(void){
int *d_flag;
int flag = 1;
cudaMalloc((void**)&d_flag, sizeof(int));
cudaMemcpy(d_flag,0,sizeof(int), cudaMemcpyHostToDevice);
f_kernel<<<1,1>>>(d_flag);
cudaMemcpy(&flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost);
}
int main(void)
{
f();
//printf("%d\n", f());
return 0;
}
//编译通过;
|
435bf01227a1be12558f70b43584e982391c5de4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define M 512
#define P 256
#define N 128
#define K 8
// Implementation of the Matrix Multiplication taken from the Lecture Notes
__global__ void globalMatrixMultiplication(double A[M][P], double B[P][N], double C[M][N]) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
for (int k = 0; k < P; k++)
C[i][j] += A[i][k] * B[k][j];
}
void verifyMatrixMultiplication(double C[M][N], double check) {
int maxError = 0;
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
maxError += (int)abs(C[i][j] - check);
}
}
printf("Maximum Error = %d\n", maxError);
}
float matrixMultiplicationHost(bool verbose) {
double A[M][P], B[P][N], C[M][N];
for (int i = 0; i < M; i++) {
for (int j = 0; j < P; j++)
A[i][j] = 1.0;
}
for (int i = 0; i < P; i++) {
for (int j = 0; j < N; j++)
B[i][j] = 2.0;
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++)
C[i][j] = 0.0;
}
double (*dA)[P], (*dB)[N], (*dC)[N];
hipMalloc((void**)&dA, sizeof(double) * M * P);
hipMalloc((void**)&dB, sizeof(double) * P * N);
hipMalloc((void**)&dC, sizeof(double) * M * N);
hipMemcpy(dA, A, sizeof(double) * M * P, hipMemcpyHostToDevice);
hipMemcpy(dB, B, sizeof(double) * P * N, hipMemcpyHostToDevice);
hipMemcpy(dC, C, sizeof(double) * M * N, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 threadPerBlocks(K, K);
dim3 numBlocks(M/K, N/K);
hipEventRecord(start);
hipLaunchKernelGGL(( globalMatrixMultiplication), dim3(numBlocks), dim3(threadPerBlocks), 0, 0, dA, dB, dC);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipMemcpy(C, dC, sizeof(double) * M * N, hipMemcpyDeviceToHost);
hipFree(dA);
hipFree(dB);
hipFree(dC);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
if (verbose) {
printf("Elapsed Time = %f milliseconds\n", elapsedTime);
verifyMatrixMultiplication(C, P * 1.0 * 2.0);
}
hipEventDestroy(start);
hipEventDestroy(stop);
return elapsedTime;
}
int main() {
int count = 100;
float averageTime = 0;
for (int i = 0; i < count; i++)
averageTime += matrixMultiplicationHost(false);
averageTime /= count;
printf("[GPU - Double] (Matrix Multiplication 2D - Global) Average Elapsed Time = %f ms\n", averageTime);
return 0;
}
|
435bf01227a1be12558f70b43584e982391c5de4.cu
|
#include <stdio.h>
#define M 512
#define P 256
#define N 128
#define K 8
// Implementation of the Matrix Multiplication taken from the Lecture Notes
__global__ void globalMatrixMultiplication(double A[M][P], double B[P][N], double C[M][N]) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
for (int k = 0; k < P; k++)
C[i][j] += A[i][k] * B[k][j];
}
void verifyMatrixMultiplication(double C[M][N], double check) {
int maxError = 0;
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
maxError += (int)abs(C[i][j] - check);
}
}
printf("Maximum Error = %d\n", maxError);
}
float matrixMultiplicationHost(bool verbose) {
double A[M][P], B[P][N], C[M][N];
for (int i = 0; i < M; i++) {
for (int j = 0; j < P; j++)
A[i][j] = 1.0;
}
for (int i = 0; i < P; i++) {
for (int j = 0; j < N; j++)
B[i][j] = 2.0;
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++)
C[i][j] = 0.0;
}
double (*dA)[P], (*dB)[N], (*dC)[N];
cudaMalloc((void**)&dA, sizeof(double) * M * P);
cudaMalloc((void**)&dB, sizeof(double) * P * N);
cudaMalloc((void**)&dC, sizeof(double) * M * N);
cudaMemcpy(dA, A, sizeof(double) * M * P, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, sizeof(double) * P * N, cudaMemcpyHostToDevice);
cudaMemcpy(dC, C, sizeof(double) * M * N, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 threadPerBlocks(K, K);
dim3 numBlocks(M/K, N/K);
cudaEventRecord(start);
globalMatrixMultiplication<<<numBlocks, threadPerBlocks>>>(dA, dB, dC);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaMemcpy(C, dC, sizeof(double) * M * N, cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
if (verbose) {
printf("Elapsed Time = %f milliseconds\n", elapsedTime);
verifyMatrixMultiplication(C, P * 1.0 * 2.0);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
return elapsedTime;
}
int main() {
int count = 100;
float averageTime = 0;
for (int i = 0; i < count; i++)
averageTime += matrixMultiplicationHost(false);
averageTime /= count;
printf("[GPU - Double] (Matrix Multiplication 2D - Global) Average Elapsed Time = %f ms\n", averageTime);
return 0;
}
|
cbd013211ae6499c9becc74c4fb4e4cb735eeb32.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <numa-interface.h>
//#include <migration.h>
#include <iostream>
#include <chrono>
#include <numaif.h>
#include <hwloc.h>
using namespace std;
using namespace chrono;
__global__ void doSomethingKernel(float *in, int sz) {
for(int inx = 0; inx < sz; inx+= 1024)
in[inx + threadIdx.x] += 5.0f;
}
int main(int argc, char* argv[]) {
assert(!(argc < 2));
int n_devices;
hipGetDeviceCount(&n_devices);
float* data;
int size = atoi(argv[1]);
//cout << "Cuda Devices: " << n_devices << " ";
// cout << "NUMA Devices: " << _get_num_nodes() << endl;
//numaGetDeviceCount(&n_devices);
//cout << "Total Devices: "<< n_devices << endl;
dim3 block(1024);
dim3 grid(1);
cout << size << " " << size*sizeof(float);
if ( numaMallocManaged((void**)&data, (size_t)size*sizeof(float), hipMemAttachGlobal, 0) != hipSuccess){
std::cout << "Malloc Fail: " << hipGetLastError() << std::endl;
return 0;
}
hipError_t e = hipGetLastError();
if (e != hipSuccess) cout << "ERROR1: " << e <<endl;
cout << " " << get_pos(data);
numaMemPrefetchAsync(data, size*sizeof(float),0);
e = hipGetLastError();
if (e != hipSuccess) cout << "ERROR2: " << e <<endl;
hipLaunchKernelGGL(( doSomethingKernel), dim3(grid), dim3(block), 0, 0, data, size);
hipDeviceSynchronize();
e = hipGetLastError();
if (e != hipSuccess) cout << "ERROR3: " << e <<endl;
//cout << "Result: " << data[3] << endl;
auto t1 = system_clock::now();
e = numaMemPrefetchAsync(data, size*sizeof(float), 2);
if ( e != hipSuccess) {
cout << "prefetch Fail: " << hipGetLastError() << endl;
} //D2H
hipDeviceSynchronize();
auto t2 = system_clock::now();
double mt = duration_cast<nanoseconds>(t2-t1).count();
cout <<";" << (size*sizeof(float))/mt << " " << get_pos(data);
t1 = system_clock::now();
e = numaMemPrefetchAsync(data, size*sizeof(float), 3);
if ( e != hipSuccess) {
cout << "prefetch Fail: " << hipGetLastError() << endl;
} //D2H
hipDeviceSynchronize();
t2 = system_clock::now();
mt = duration_cast<nanoseconds>(t2-t1).count();
cout <<";" << (size*sizeof(float))/mt << " " << get_pos(data);
numaMemPrefetchAsync(data, size*sizeof(float),0);
hipLaunchKernelGGL(( doSomethingKernel), dim3(grid), dim3(block), 0, 0, data, size);
hipDeviceSynchronize();
t1 = system_clock::now();
e = numaMemPrefetchAsync(data, size*sizeof(float), 3);
if ( e != hipSuccess) {
cout << "prefetch Fail: " << hipGetLastError() << endl;
} //D2H
hipDeviceSynchronize();
t2 = system_clock::now();
mt = duration_cast<nanoseconds>(t2-t1).count();
cout <<";" << (size*sizeof(float))/mt << " " << get_pos(data) << ";" << data[0] << endl;
numaFree(data);
}
|
cbd013211ae6499c9becc74c4fb4e4cb735eeb32.cu
|
#include <cuda_runtime.h>
#include <numa-interface.h>
//#include <migration.h>
#include <iostream>
#include <chrono>
#include <numaif.h>
#include <hwloc.h>
using namespace std;
using namespace chrono;
__global__ void doSomethingKernel(float *in, int sz) {
for(int inx = 0; inx < sz; inx+= 1024)
in[inx + threadIdx.x] += 5.0f;
}
int main(int argc, char* argv[]) {
assert(!(argc < 2));
int n_devices;
cudaGetDeviceCount(&n_devices);
float* data;
int size = atoi(argv[1]);
//cout << "Cuda Devices: " << n_devices << " ";
// cout << "NUMA Devices: " << _get_num_nodes() << endl;
//numaGetDeviceCount(&n_devices);
//cout << "Total Devices: "<< n_devices << endl;
dim3 block(1024);
dim3 grid(1);
cout << size << " " << size*sizeof(float);
if ( numaMallocManaged((void**)&data, (size_t)size*sizeof(float), cudaMemAttachGlobal, 0) != cudaSuccess){
std::cout << "Malloc Fail: " << cudaGetLastError() << std::endl;
return 0;
}
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess) cout << "ERROR1: " << e <<endl;
cout << " " << get_pos(data);
numaMemPrefetchAsync(data, size*sizeof(float),0);
e = cudaGetLastError();
if (e != cudaSuccess) cout << "ERROR2: " << e <<endl;
doSomethingKernel<<<grid, block>>>(data, size);
cudaDeviceSynchronize();
e = cudaGetLastError();
if (e != cudaSuccess) cout << "ERROR3: " << e <<endl;
//cout << "Result: " << data[3] << endl;
auto t1 = system_clock::now();
e = numaMemPrefetchAsync(data, size*sizeof(float), 2);
if ( e != cudaSuccess) {
cout << "prefetch Fail: " << cudaGetLastError() << endl;
} //D2H
cudaDeviceSynchronize();
auto t2 = system_clock::now();
double mt = duration_cast<nanoseconds>(t2-t1).count();
cout <<";" << (size*sizeof(float))/mt << " " << get_pos(data);
t1 = system_clock::now();
e = numaMemPrefetchAsync(data, size*sizeof(float), 3);
if ( e != cudaSuccess) {
cout << "prefetch Fail: " << cudaGetLastError() << endl;
} //D2H
cudaDeviceSynchronize();
t2 = system_clock::now();
mt = duration_cast<nanoseconds>(t2-t1).count();
cout <<";" << (size*sizeof(float))/mt << " " << get_pos(data);
numaMemPrefetchAsync(data, size*sizeof(float),0);
doSomethingKernel<<<grid, block>>>(data, size);
cudaDeviceSynchronize();
t1 = system_clock::now();
e = numaMemPrefetchAsync(data, size*sizeof(float), 3);
if ( e != cudaSuccess) {
cout << "prefetch Fail: " << cudaGetLastError() << endl;
} //D2H
cudaDeviceSynchronize();
t2 = system_clock::now();
mt = duration_cast<nanoseconds>(t2-t1).count();
cout <<";" << (size*sizeof(float))/mt << " " << get_pos(data) << ";" << data[0] << endl;
numaFree(data);
}
|
321352f445d4c10383f393605508e9e9e42ad877.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/thresholded_relu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ThresholdedReluKernel(const int N, const T* X, T* Y, T alpha_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] > alpha_ ? X[i] : 0;
}
}
template <typename T>
__global__ void
ThresholdedReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = Y[i] > 0 ? dY[i] : 0;
}
}
} // namespace
template <>
bool ThresholdedReluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( ThresholdedReluKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.numel(), X.data<float>(), Y->template mutable_data<float>(), alpha_);
return true;
}
template <>
bool ThresholdedReluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GT(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( ThresholdedReluGradientKernel),
dim3(CAFFE_GET_BLOCKS(Y.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(ThresholdedRelu, ThresholdedReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ThresholdedReluGradient,
ThresholdedReluGradientOp<float, CUDAContext>);
} // namespace caffe2
|
321352f445d4c10383f393605508e9e9e42ad877.cu
|
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/thresholded_relu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ThresholdedReluKernel(const int N, const T* X, T* Y, T alpha_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] > alpha_ ? X[i] : 0;
}
}
template <typename T>
__global__ void
ThresholdedReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = Y[i] > 0 ? dY[i] : 0;
}
}
} // namespace
template <>
bool ThresholdedReluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
ThresholdedReluKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), X.data<float>(), Y->template mutable_data<float>(), alpha_);
return true;
}
template <>
bool ThresholdedReluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GT(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
ThresholdedReluGradientKernel<<<
CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(ThresholdedRelu, ThresholdedReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ThresholdedReluGradient,
ThresholdedReluGradientOp<float, CUDAContext>);
} // namespace caffe2
|
ddc0d6bfb775a56f08c23b984bdbf42f085e4007.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "zero/Zero.cuh"
extern "C"
__global__ void backwardExponentiationKernel (int batchSize, int numberEntriesPerInstance, int numberIterations, float *forward, float *chain, float *destination) {
// What's the first entry index within the instance that this thread should operate on?
int startIndexWithinInstance = blockIdx.y * (blockDim.x * numberIterations) + threadIdx.x * numberIterations;
// Continue if this index is smaller than the dimension of the instance.
if(startIndexWithinInstance < numberEntriesPerInstance) {
// What's the first entry index within the batch that this thread should operate on?
int startIndexWithinBatch = blockIdx.x * numberEntriesPerInstance + startIndexWithinInstance;
// Is the instance greater than the current batch size?
if(blockIdx.x >= batchSize) {
setToZero(destination, startIndexWithinBatch, numberIterations);
}
else {
for(int indexEntry = startIndexWithinBatch; indexEntry < startIndexWithinBatch + numberIterations; indexEntry++) {
destination[indexEntry] = chain[indexEntry] * forward[indexEntry];
}
}
}
}
|
ddc0d6bfb775a56f08c23b984bdbf42f085e4007.cu
|
#include "zero/Zero.cuh"
extern "C"
__global__ void backwardExponentiationKernel (int batchSize, int numberEntriesPerInstance, int numberIterations, float *forward, float *chain, float *destination) {
// What's the first entry index within the instance that this thread should operate on?
int startIndexWithinInstance = blockIdx.y * (blockDim.x * numberIterations) + threadIdx.x * numberIterations;
// Continue if this index is smaller than the dimension of the instance.
if(startIndexWithinInstance < numberEntriesPerInstance) {
// What's the first entry index within the batch that this thread should operate on?
int startIndexWithinBatch = blockIdx.x * numberEntriesPerInstance + startIndexWithinInstance;
// Is the instance greater than the current batch size?
if(blockIdx.x >= batchSize) {
setToZero(destination, startIndexWithinBatch, numberIterations);
}
else {
for(int indexEntry = startIndexWithinBatch; indexEntry < startIndexWithinBatch + numberIterations; indexEntry++) {
destination[indexEntry] = chain[indexEntry] * forward[indexEntry];
}
}
}
}
|
03a63c0f2cc6b5c7fb32e4b8dc7711e8520a3a45.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by joe on 4/6/16.
//
#include"prefix.h"
#include"testlib/vector_gen.h"
#include"CSR.h"
#include"FBCSR.h"
#include"FBCSR_krnl.h"
#include<hipsparse.h>
#define TOTALRUNS 1000
typedef void (*testFunc)(void);
int main(int argc, char **argv) {
if (argc < 2) {
fprintf(stderr, "USAGE: %s <matrix.csr> <opt>", argv[0]);
return -1;
}
int opt = 1;
if (argc > 2)
switch (argv[2][0]) {
case 'd':
opt = 0;
break;
case 'g':
opt = 2;
break;
default:
opt = 1;
}
csr c;
vector vec;
vector ref;
csr_readFile(argv[1], &c);
vector_gen(&vec, c.m, NULL);
vector_init(&ref, c.n);
// Make reference
{
vector cuv;
vector cur;
csr cum;
vector_init(&ref, c.n);
csr_memCpy(&c, &cum, cpyHostToDevice);
vector_memCpy(&vec, &cuv, cpyHostToDevice);
vector_memCpy(&ref, &cur, cpyHostToDevice);
hipsparseMatDescr_t descr = 0;
hipsparseHandle_t handle = 0;
hipsparseCreateMatDescr(&descr);
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseCreate(&handle);
cusparseHybMat_t hybMat;
cusparseHybPartition_t hybPart = CUSPARSE_HYB_PARTITION_AUTO;
cusparseCreateHybMat(&hybMat);
cusparseScsr2hyb(handle, cum.n, cum.m, descr, cum.val, cum.ptr, cum.indx, hybMat, 0, hybPart);
if (opt) {
hipEvent_t st, ed;
float eltime;
hipEventCreate(&st);
hipEventCreate(&ed);
hipEventRecord(st, 0);
float unit = 1;
for (int i = 0; i < TOTALRUNS; ++i) {
cusparseShybmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &unit, descr, hybMat, cuv.val, &unit, cur.val);
}
hipEventRecord(ed, 0);
hipEventSynchronize(ed);
hipEventElapsedTime(&eltime, st, ed);
if (opt == 1)
printf("%f\n", eltime / TOTALRUNS);
else
printf("%f\n", 2 * c.nnz / (eltime * (1000000 / TOTALRUNS)));
hipEventDestroy(st);
hipEventDestroy(ed);
}
vector_destroy(&ref);
vector_memCpy(&cur, &ref, cpyDeviceToHost);
csr_CUDA_destroy(&cum);
vector_CUDA_destroy(&cuv);
vector_CUDA_destroy(&cur);
hipsparseDestroy(handle);
hipsparseDestroyMatDescr(descr);
cusparseDestroyHybMat(hybMat);
}
csr_destroy(&c);
vector_destroy(&vec);
vector_destroy(&ref);
return 0;
}
|
03a63c0f2cc6b5c7fb32e4b8dc7711e8520a3a45.cu
|
//
// Created by joe on 4/6/16.
//
#include"prefix.h"
#include"testlib/vector_gen.h"
#include"CSR.h"
#include"FBCSR.h"
#include"FBCSR_krnl.h"
#include<cusparse.h>
#define TOTALRUNS 1000
typedef void (*testFunc)(void);
int main(int argc, char **argv) {
if (argc < 2) {
fprintf(stderr, "USAGE: %s <matrix.csr> <opt>", argv[0]);
return -1;
}
int opt = 1;
if (argc > 2)
switch (argv[2][0]) {
case 'd':
opt = 0;
break;
case 'g':
opt = 2;
break;
default:
opt = 1;
}
csr c;
vector vec;
vector ref;
csr_readFile(argv[1], &c);
vector_gen(&vec, c.m, NULL);
vector_init(&ref, c.n);
// Make reference
{
vector cuv;
vector cur;
csr cum;
vector_init(&ref, c.n);
csr_memCpy(&c, &cum, cpyHostToDevice);
vector_memCpy(&vec, &cuv, cpyHostToDevice);
vector_memCpy(&ref, &cur, cpyHostToDevice);
cusparseMatDescr_t descr = 0;
cusparseHandle_t handle = 0;
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
cusparseCreate(&handle);
cusparseHybMat_t hybMat;
cusparseHybPartition_t hybPart = CUSPARSE_HYB_PARTITION_AUTO;
cusparseCreateHybMat(&hybMat);
cusparseScsr2hyb(handle, cum.n, cum.m, descr, cum.val, cum.ptr, cum.indx, hybMat, 0, hybPart);
if (opt) {
cudaEvent_t st, ed;
float eltime;
cudaEventCreate(&st);
cudaEventCreate(&ed);
cudaEventRecord(st, 0);
float unit = 1;
for (int i = 0; i < TOTALRUNS; ++i) {
cusparseShybmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &unit, descr, hybMat, cuv.val, &unit, cur.val);
}
cudaEventRecord(ed, 0);
cudaEventSynchronize(ed);
cudaEventElapsedTime(&eltime, st, ed);
if (opt == 1)
printf("%f\n", eltime / TOTALRUNS);
else
printf("%f\n", 2 * c.nnz / (eltime * (1000000 / TOTALRUNS)));
cudaEventDestroy(st);
cudaEventDestroy(ed);
}
vector_destroy(&ref);
vector_memCpy(&cur, &ref, cpyDeviceToHost);
csr_CUDA_destroy(&cum);
vector_CUDA_destroy(&cuv);
vector_CUDA_destroy(&cur);
cusparseDestroy(handle);
cusparseDestroyMatDescr(descr);
cusparseDestroyHybMat(hybMat);
}
csr_destroy(&c);
vector_destroy(&vec);
vector_destroy(&ref);
return 0;
}
|
84c4023ca1bebb478258ce7f285244416f673696.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__device__ unsigned dfun(unsigned id) {
printf("%d\n", id);
if (id > 10 && id < 15) return dfun(id+1);
else return 0;
}
__global__ void dkernel(unsigned n) {
dfun(n);
}
#define BLOCKSIZE 256
int main(int nn, char *str[]) {
unsigned N = atoi(str[1]);
hipLaunchKernelGGL(( dkernel), dim3(1), dim3(BLOCKSIZE), 0, 0, N);
hipDeviceSynchronize();
return 0;
}
|
84c4023ca1bebb478258ce7f285244416f673696.cu
|
#include <stdio.h>
#include <cuda.h>
__device__ unsigned dfun(unsigned id) {
printf("%d\n", id);
if (id > 10 && id < 15) return dfun(id+1);
else return 0;
}
__global__ void dkernel(unsigned n) {
dfun(n);
}
#define BLOCKSIZE 256
int main(int nn, char *str[]) {
unsigned N = atoi(str[1]);
dkernel<<<1, BLOCKSIZE>>>(N);
cudaThreadSynchronize();
return 0;
}
|
17d22b4885f34b93d229796e58d87283643f8022.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "MakeMerges.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *mergeWith = NULL;
hipMalloc(&mergeWith, XSIZE*YSIZE);
int *offsets = NULL;
hipMalloc(&offsets, XSIZE*YSIZE);
int *mis = NULL;
hipMalloc(&mis, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
MakeMerges), dim3(gridBlock),dim3(threadBlock), 0, 0, size,mergeWith,offsets,mis);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
MakeMerges), dim3(gridBlock),dim3(threadBlock), 0, 0, size,mergeWith,offsets,mis);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
MakeMerges), dim3(gridBlock),dim3(threadBlock), 0, 0, size,mergeWith,offsets,mis);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
17d22b4885f34b93d229796e58d87283643f8022.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "MakeMerges.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *mergeWith = NULL;
cudaMalloc(&mergeWith, XSIZE*YSIZE);
int *offsets = NULL;
cudaMalloc(&offsets, XSIZE*YSIZE);
int *mis = NULL;
cudaMalloc(&mis, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
MakeMerges<<<gridBlock,threadBlock>>>(size,mergeWith,offsets,mis);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
MakeMerges<<<gridBlock,threadBlock>>>(size,mergeWith,offsets,mis);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
MakeMerges<<<gridBlock,threadBlock>>>(size,mergeWith,offsets,mis);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
b90444f63427d7d4255b4266c2a3c30be9618933.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// KERNEL FUNCTION
__global__ void kernel() {
// COMMON VARIABLES
fp* d_in;
int rot_row;
int rot_col;
int in2_rowlow;
int in2_collow;
int ic;
int jc;
int jp1;
int ja1, ja2;
int ip1;
int ia1, ia2;
int ja, jb;
int ia, ib;
float s;
int i;
int j;
int row;
int col;
int ori_row;
int ori_col;
int position;
float sum;
int pos_ori;
float temp;
float temp2;
int location;
int cent;
int tMask_row;
int tMask_col;
float largest_value_current = 0;
float largest_value = 0;
int largest_coordinate_current = 0;
int largest_coordinate = 0;
float fin_max_val = 0;
int fin_max_coo = 0;
int largest_row;
int largest_col;
int offset_row;
int offset_col;
__shared__
float in_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE
__shared__
float in_sqr_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE
__shared__
float in_final_sum;
__shared__
float in_sqr_final_sum;
float mean;
float mean_sqr;
float variance;
float deviation;
__shared__
float denomT;
__shared__
float par_max_val[131]; // WATCH THIS !!! HARDCODED VALUE
__shared__
int par_max_coo[131]; // WATCH THIS !!! HARDCODED VALUE
int pointer;
__shared__
float d_in_mod_temp[2601];
int ori_pointer;
int loc_pointer;
// THREAD PARAMETERS
int bx = blockIdx.x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
int ei_new;
// GENERATE TEMPLATE
// generate templates based on the first frame only
if (d_common_change.frame_no == 0) {
// GET POINTER TO TEMPLATE FOR THE POINT
// pointers to: current template for current point
d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer];
// UPDATE ROW LOC AND COL LOC
// uptade temporary endo/epi row/col coordinates (in each block corresponding to point, narrow work to one thread)
ei_new = tx;
if (ei_new == 0) {
// update temporary row/col coordinates
pointer = d_unique[bx].point_no * d_common.no_frames + d_common_change.frame_no;
d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no];
d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no];
}
// CREATE TEMPLATES
// work
ei_new = tx;
while (ei_new < d_common.in_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in_rows == 0) {
row = d_common.in_rows - 1;
col = col - 1;
}
// figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right)
ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1;
ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1;
ori_pointer = ori_col * d_common.frame_rows + ori_row;
// update template
d_in[col * d_common.in_rows + row] = d_common_change.d_frame[ori_pointer];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
}
// PROCESS POINTS
// process points in all frames except for the first one
if (d_common_change.frame_no != 0) {
// SELECTION
in2_rowlow = d_unique[bx].d_Row[d_unique[bx].point_no] - d_common.sSize; // (1 to n+1)
in2_collow = d_unique[bx].d_Col[d_unique[bx].point_no] - d_common.sSize;
// work
ei_new = tx;
while (ei_new < d_common.in2_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_rows == 0) {
row = d_common.in2_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + in2_rowlow - 1;
ori_col = col + in2_collow - 1;
d_unique[bx].d_in2[ei_new] = d_common_change.d_frame[ori_col * d_common.frame_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// CONVOLUTION
// ROTATION
// variables
d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer];
// work
ei_new = tx;
while (ei_new < d_common.in_elem) {
// figure out row/col location in padded array
row = (ei_new + 1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in_rows == 0) {
row = d_common.in_rows - 1;
col = col - 1;
}
// execution
rot_row = (d_common.in_rows - 1) - row;
rot_col = (d_common.in_rows - 1) - col;
d_in_mod_temp[ei_new] = d_in[rot_col * d_common.in_rows + rot_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// ACTUAL CONVOLUTION
// work
ei_new = tx;
while (ei_new < d_common.conv_elem) {
// figure out row/col location in array
ic = (ei_new + 1) % d_common.conv_rows; // (1-n)
jc = (ei_new + 1) / d_common.conv_rows + 1; // (1-n)
if ((ei_new + 1) % d_common.conv_rows == 0) {
ic = d_common.conv_rows;
jc = jc - 1;
}
j = jc + d_common.joffset;
jp1 = j + 1;
if (d_common.in2_cols < jp1) {
ja1 = jp1 - d_common.in2_cols;
} else {
ja1 = 1;
}
if (d_common.in_cols < j) {
ja2 = d_common.in_cols;
} else {
ja2 = j;
}
i = ic + d_common.ioffset;
ip1 = i + 1;
if (d_common.in2_rows < ip1) {
ia1 = ip1 - d_common.in2_rows;
} else {
ia1 = 1;
}
if (d_common.in_rows < i) {
ia2 = d_common.in_rows;
} else {
ia2 = i;
}
s = 0;
for (ja = ja1; ja <= ja2; ja++) {
jb = jp1 - ja;
for (ia = ia1; ia <= ia2; ia++) {
ib = ip1 - ia;
s = s
+ d_in_mod_temp[d_common.in_rows * (ja - 1) + ia - 1]
* d_unique[bx].d_in2[d_common.in2_rows * (jb - 1) + ib - 1];
}
}
//d_unique[bx].d_conv[d_common.conv_rows*(jc-1)+ic-1] = s;
d_unique[bx].d_conv[ei_new] = s;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// CUMULATIVE SUM
// PAD ARRAY, VERTICAL CUMULATIVE SUM
// PADD ARRAY
// work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_elem) {
// figure out row/col location in padded array
row = (ei_new + 1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_pad_cumv_rows == 0) {
row = d_common.in2_pad_cumv_rows - 1;
col = col - 1;
}
// execution
if (row > (d_common.in2_pad_add_rows - 1)
&& // do if has numbers in original array
row < (d_common.in2_pad_add_rows + d_common.in2_rows)
&& col > (d_common.in2_pad_add_cols - 1)
&& col < (d_common.in2_pad_add_cols + d_common.in2_cols)) {
ori_row = row - d_common.in2_pad_add_rows;
ori_col = col - d_common.in2_pad_add_cols;
d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2[ori_col * d_common.in2_rows
+ ori_row];
} else { // do if otherwise
d_unique[bx].d_in2_pad_cumv[ei_new] = 0;
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// VERTICAL CUMULATIVE SUM
//work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_cols) {
// figure out column position
pos_ori = ei_new * d_common.in2_pad_cumv_rows;
// variables
sum = 0;
// loop through all rows
for (position = pos_ori; position < pos_ori + d_common.in2_pad_cumv_rows;
position = position + 1) {
d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum;
sum = d_unique[bx].d_in2_pad_cumv[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION
// work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_sel_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_pad_cumv_sel_rows == 0) {
row = d_common.in2_pad_cumv_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel_collow - 1;
d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col
* d_common.in2_pad_cumv_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
// SELECTION 2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub_cumh_rows == 0) {
row = d_common.in2_sub_cumh_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1;
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col
* d_common.in2_pad_cumv_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SUBTRACTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_elem) {
// subtract
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new]
- d_unique[bx].d_in2_sub_cumh[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// HORIZONTAL CUMULATIVE SUM
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_rows) {
// figure out row position
pos_ori = ei_new;
// variables
sum = 0;
// loop through all rows
for (position = pos_ori; position < pos_ori + d_common.in2_sub_cumh_elem;
position = position + d_common.in2_sub_cumh_rows) {
d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum;
sum = d_unique[bx].d_in2_sub_cumh[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_sel_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub_cumh_sel_rows == 0) {
row = d_common.in2_sub_cumh_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel_collow - 1;
d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col
* d_common.in2_sub_cumh_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION 2, SUBTRACTION
// SELECTION 2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub2_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub2_rows == 0) {
row = d_common.in2_sub2_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1;
d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col
* d_common.in2_sub_cumh_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SUBTRACTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
// subtract
d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new]
- d_unique[bx].d_in2_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// CUMULATIVE SUM 2
// MULTIPLICATION
// work
ei_new = tx;
while (ei_new < d_common.in2_sqr_elem) {
temp = d_unique[bx].d_in2[ei_new];
d_unique[bx].d_in2_sqr[ei_new] = temp * temp;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// PAD ARRAY, VERTICAL CUMULATIVE SUM
// PAD ARRAY
// work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_elem) {
// figure out row/col location in padded array
row = (ei_new + 1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_pad_cumv_rows == 0) {
row = d_common.in2_pad_cumv_rows - 1;
col = col - 1;
}
// execution
if (row > (d_common.in2_pad_add_rows - 1)
&& // do if has numbers in original array
row < (d_common.in2_pad_add_rows + d_common.in2_sqr_rows)
&& col > (d_common.in2_pad_add_cols - 1)
&& col < (d_common.in2_pad_add_cols + d_common.in2_sqr_cols)) {
ori_row = row - d_common.in2_pad_add_rows;
ori_col = col - d_common.in2_pad_add_cols;
d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2_sqr[ori_col * d_common.in2_sqr_rows
+ ori_row];
} else { // do if otherwise
d_unique[bx].d_in2_pad_cumv[ei_new] = 0;
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// VERTICAL CUMULATIVE SUM
//work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_cols) {
// figure out column position
pos_ori = ei_new * d_common.in2_pad_cumv_rows;
// variables
sum = 0;
// loop through all rows
for (position = pos_ori; position < pos_ori + d_common.in2_pad_cumv_rows;
position = position + 1) {
d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum;
sum = d_unique[bx].d_in2_pad_cumv[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION
// work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_sel_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_pad_cumv_sel_rows == 0) {
row = d_common.in2_pad_cumv_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel_collow - 1;
d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col
* d_common.in2_pad_cumv_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
// SELECTION 2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub_cumh_rows == 0) {
row = d_common.in2_sub_cumh_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1;
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col
* d_common.in2_pad_cumv_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SUBTRACTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_elem) {
// subtract
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new]
- d_unique[bx].d_in2_sub_cumh[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// HORIZONTAL CUMULATIVE SUM
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_rows) {
// figure out row position
pos_ori = ei_new;
// variables
sum = 0;
// loop through all rows
for (position = pos_ori; position < pos_ori + d_common.in2_sub_cumh_elem;
position = position + d_common.in2_sub_cumh_rows) {
d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum;
sum = d_unique[bx].d_in2_sub_cumh[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_sel_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub_cumh_sel_rows == 0) {
row = d_common.in2_sub_cumh_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel_collow - 1;
d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col
* d_common.in2_sub_cumh_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION 2, SUBTRACTION
// SELECTION 2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub2_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub2_rows == 0) {
row = d_common.in2_sub2_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1;
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col
* d_common.in2_sub_cumh_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SUBTRACTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
// subtract
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new]
- d_unique[bx].d_in2_sqr_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// FINAL
// DENOMINATOR A SAVE RESULT IN CUMULATIVE SUM A2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
temp = d_unique[bx].d_in2_sub2[ei_new];
temp2 = d_unique[bx].d_in2_sqr_sub2[ei_new] - (temp * temp / d_common.in_elem);
if (temp2 < 0) {
temp2 = 0;
}
d_unique[bx].d_in2_sqr_sub2[ei_new] = sqrt(temp2);
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// MULTIPLICATION
// work
ei_new = tx;
while (ei_new < d_common.in_sqr_elem) {
temp = d_in[ei_new];
d_unique[bx].d_in_sqr[ei_new] = temp * temp;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// IN SUM
// work
ei_new = tx;
while (ei_new < d_common.in_cols) {
sum = 0;
for (i = 0; i < d_common.in_rows; i++) {
sum = sum + d_in[ei_new * d_common.in_rows + i];
}
in_partial_sum[ei_new] = sum;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// IN_SQR SUM
ei_new = tx;
while (ei_new < d_common.in_sqr_rows) {
sum = 0;
for (i = 0; i < d_common.in_sqr_cols; i++) {
sum = sum + d_unique[bx].d_in_sqr[ei_new + d_common.in_sqr_rows * i];
}
in_sqr_partial_sum[ei_new] = sum;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// FINAL SUMMATION
if (tx == 0) {
in_final_sum = 0;
for (i = 0; i < d_common.in_cols; i++) {
in_final_sum = in_final_sum + in_partial_sum[i];
}
} else if (tx == 1) {
in_sqr_final_sum = 0;
for (i = 0; i < d_common.in_sqr_cols; i++) {
in_sqr_final_sum = in_sqr_final_sum + in_sqr_partial_sum[i];
}
}
// SYNCHRONIZE THREADS
__syncthreads();
// DENOMINATOR T
if (tx == 0) {
mean = in_final_sum / d_common.in_elem; // gets mean (average) value of element in ROI
mean_sqr = mean * mean;
variance = (in_sqr_final_sum / d_common.in_elem) - mean_sqr; // gets variance of ROI
deviation = sqrt(variance); // gets standard deviation of ROI
denomT = sqrt(float(d_common.in_elem - 1)) * deviation;
}
// SYNCHRONIZE THREADS
__syncthreads();
// DENOMINATOR SAVE RESULT IN CUMULATIVE SUM A2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * denomT;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// NUMERATOR SAVE RESULT IN CONVOLUTION
// work
ei_new = tx;
while (ei_new < d_common.conv_elem) {
d_unique[bx].d_conv[ei_new] = d_unique[bx].d_conv[ei_new]
- d_unique[bx].d_in2_sub2[ei_new] * in_final_sum / d_common.in_elem;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// CORRELATION SAVE RESULT IN CUMULATIVE SUM A2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_conv[ei_new]
/ d_unique[bx].d_in2_sqr_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// TEMPLATE MASK CREATE
cent = d_common.sSize + d_common.tSize + 1;
if (d_common_change.frame_no == 0) {
tMask_row = cent + d_unique[bx].d_Row[d_unique[bx].point_no]
- d_unique[bx].d_Row[d_unique[bx].point_no] - 1;
tMask_col = cent + d_unique[bx].d_Col[d_unique[bx].point_no]
- d_unique[bx].d_Col[d_unique[bx].point_no] - 1;
} else {
pointer = d_common_change.frame_no - 1 + d_unique[bx].point_no * d_common.no_frames;
tMask_row = cent + d_unique[bx].d_tRowLoc[pointer] - d_unique[bx].d_Row[d_unique[bx].point_no]
- 1;
tMask_col = cent + d_unique[bx].d_tColLoc[pointer] - d_unique[bx].d_Col[d_unique[bx].point_no]
- 1;
}
//work
ei_new = tx;
while (ei_new < d_common.tMask_elem) {
location = tMask_col * d_common.tMask_rows + tMask_row;
if (ei_new == location) {
d_unique[bx].d_tMask[ei_new] = 1;
} else {
d_unique[bx].d_tMask[ei_new] = 0;
}
//go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// MASK CONVOLUTION
// work
ei_new = tx;
while (ei_new < d_common.mask_conv_elem) {
// figure out row/col location in array
ic = (ei_new + 1) % d_common.mask_conv_rows; // (1-n)
jc = (ei_new + 1) / d_common.mask_conv_rows + 1; // (1-n)
if ((ei_new + 1) % d_common.mask_conv_rows == 0) {
ic = d_common.mask_conv_rows;
jc = jc - 1;
}
j = jc + d_common.mask_conv_joffset;
jp1 = j + 1;
if (d_common.mask_cols < jp1) {
ja1 = jp1 - d_common.mask_cols;
} else {
ja1 = 1;
}
if (d_common.tMask_cols < j) {
ja2 = d_common.tMask_cols;
} else {
ja2 = j;
}
i = ic + d_common.mask_conv_ioffset;
ip1 = i + 1;
if (d_common.mask_rows < ip1) {
ia1 = ip1 - d_common.mask_rows;
} else {
ia1 = 1;
}
if (d_common.tMask_rows < i) {
ia2 = d_common.tMask_rows;
} else {
ia2 = i;
}
s = 0;
for (ja = ja1; ja <= ja2; ja++) {
jb = jp1 - ja;
for (ia = ia1; ia <= ia2; ia++) {
ib = ip1 - ia;
s = s + d_unique[bx].d_tMask[d_common.tMask_rows * (ja - 1) + ia - 1] * 1;
}
}
// //d_unique[bx].d_mask_conv[d_common.mask_conv_rows*(jc-1)+ic-1] = s;
d_unique[bx].d_mask_conv[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * s;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// MAXIMUM VALUE
// INITIAL SEARCH
ei_new = tx;
while (ei_new < d_common.mask_conv_rows) {
for (i = 0; i < d_common.mask_conv_cols; i++) {
largest_coordinate_current = ei_new * d_common.mask_conv_rows + i;
largest_value_current = abs(d_unique[bx].d_mask_conv[largest_coordinate_current]);
if (largest_value_current > largest_value) {
largest_coordinate = largest_coordinate_current;
largest_value = largest_value_current;
}
}
par_max_coo[ei_new] = largest_coordinate;
par_max_val[ei_new] = largest_value;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// FINAL SEARCH
if (tx == 0) {
for (i = 0; i < d_common.mask_conv_rows; i++) {
if (par_max_val[i] > fin_max_val) {
fin_max_val = par_max_val[i];
fin_max_coo = par_max_coo[i];
}
}
// convert coordinate to row/col form
largest_row = (fin_max_coo + 1) % d_common.mask_conv_rows - 1; // (0-n) row
largest_col = (fin_max_coo + 1) / d_common.mask_conv_rows; // (0-n) column
if ((fin_max_coo + 1) % d_common.mask_conv_rows == 0) {
largest_row = d_common.mask_conv_rows - 1;
largest_col = largest_col - 1;
}
// calculate offset
largest_row = largest_row + 1; // compensate to match MATLAB format (1-n)
largest_col = largest_col + 1; // compensate to match MATLAB format (1-n)
offset_row = largest_row - d_common.in_rows - (d_common.sSize - d_common.tSize);
offset_col = largest_col - d_common.in_cols - (d_common.sSize - d_common.tSize);
pointer = d_common_change.frame_no + d_unique[bx].point_no * d_common.no_frames;
d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no] + offset_row;
d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no] + offset_col;
}
// SYNCHRONIZE THREADS
__syncthreads();
}
// COORDINATE AND TEMPLATE UPDATE
// time19 = clock();
// if the last frame in the bath, update template
if (d_common_change.frame_no != 0 && (d_common_change.frame_no) % 10 == 0) {
// update coordinate
loc_pointer = d_unique[bx].point_no * d_common.no_frames + d_common_change.frame_no;
d_unique[bx].d_Row[d_unique[bx].point_no] = d_unique[bx].d_tRowLoc[loc_pointer];
d_unique[bx].d_Col[d_unique[bx].point_no] = d_unique[bx].d_tColLoc[loc_pointer];
// work
ei_new = tx;
while (ei_new < d_common.in_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in_rows == 0) {
row = d_common.in_rows - 1;
col = col - 1;
}
// figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right)
ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1;
ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1;
ori_pointer = ori_col * d_common.frame_rows + ori_row;
// update template
d_in[ei_new] = d_common.alpha * d_in[ei_new]
+ (1.00 - d_common.alpha) * d_common_change.d_frame[ori_pointer];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
}
}
// END OF FUNCTION
|
b90444f63427d7d4255b4266c2a3c30be9618933.cu
|
// KERNEL FUNCTION
__global__ void kernel() {
// COMMON VARIABLES
fp* d_in;
int rot_row;
int rot_col;
int in2_rowlow;
int in2_collow;
int ic;
int jc;
int jp1;
int ja1, ja2;
int ip1;
int ia1, ia2;
int ja, jb;
int ia, ib;
float s;
int i;
int j;
int row;
int col;
int ori_row;
int ori_col;
int position;
float sum;
int pos_ori;
float temp;
float temp2;
int location;
int cent;
int tMask_row;
int tMask_col;
float largest_value_current = 0;
float largest_value = 0;
int largest_coordinate_current = 0;
int largest_coordinate = 0;
float fin_max_val = 0;
int fin_max_coo = 0;
int largest_row;
int largest_col;
int offset_row;
int offset_col;
__shared__
float in_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE
__shared__
float in_sqr_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE
__shared__
float in_final_sum;
__shared__
float in_sqr_final_sum;
float mean;
float mean_sqr;
float variance;
float deviation;
__shared__
float denomT;
__shared__
float par_max_val[131]; // WATCH THIS !!! HARDCODED VALUE
__shared__
int par_max_coo[131]; // WATCH THIS !!! HARDCODED VALUE
int pointer;
__shared__
float d_in_mod_temp[2601];
int ori_pointer;
int loc_pointer;
// THREAD PARAMETERS
int bx = blockIdx.x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
int ei_new;
// GENERATE TEMPLATE
// generate templates based on the first frame only
if (d_common_change.frame_no == 0) {
// GET POINTER TO TEMPLATE FOR THE POINT
// pointers to: current template for current point
d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer];
// UPDATE ROW LOC AND COL LOC
// uptade temporary endo/epi row/col coordinates (in each block corresponding to point, narrow work to one thread)
ei_new = tx;
if (ei_new == 0) {
// update temporary row/col coordinates
pointer = d_unique[bx].point_no * d_common.no_frames + d_common_change.frame_no;
d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no];
d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no];
}
// CREATE TEMPLATES
// work
ei_new = tx;
while (ei_new < d_common.in_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in_rows == 0) {
row = d_common.in_rows - 1;
col = col - 1;
}
// figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right)
ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1;
ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1;
ori_pointer = ori_col * d_common.frame_rows + ori_row;
// update template
d_in[col * d_common.in_rows + row] = d_common_change.d_frame[ori_pointer];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
}
// PROCESS POINTS
// process points in all frames except for the first one
if (d_common_change.frame_no != 0) {
// SELECTION
in2_rowlow = d_unique[bx].d_Row[d_unique[bx].point_no] - d_common.sSize; // (1 to n+1)
in2_collow = d_unique[bx].d_Col[d_unique[bx].point_no] - d_common.sSize;
// work
ei_new = tx;
while (ei_new < d_common.in2_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_rows == 0) {
row = d_common.in2_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + in2_rowlow - 1;
ori_col = col + in2_collow - 1;
d_unique[bx].d_in2[ei_new] = d_common_change.d_frame[ori_col * d_common.frame_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// CONVOLUTION
// ROTATION
// variables
d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer];
// work
ei_new = tx;
while (ei_new < d_common.in_elem) {
// figure out row/col location in padded array
row = (ei_new + 1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in_rows == 0) {
row = d_common.in_rows - 1;
col = col - 1;
}
// execution
rot_row = (d_common.in_rows - 1) - row;
rot_col = (d_common.in_rows - 1) - col;
d_in_mod_temp[ei_new] = d_in[rot_col * d_common.in_rows + rot_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// ACTUAL CONVOLUTION
// work
ei_new = tx;
while (ei_new < d_common.conv_elem) {
// figure out row/col location in array
ic = (ei_new + 1) % d_common.conv_rows; // (1-n)
jc = (ei_new + 1) / d_common.conv_rows + 1; // (1-n)
if ((ei_new + 1) % d_common.conv_rows == 0) {
ic = d_common.conv_rows;
jc = jc - 1;
}
j = jc + d_common.joffset;
jp1 = j + 1;
if (d_common.in2_cols < jp1) {
ja1 = jp1 - d_common.in2_cols;
} else {
ja1 = 1;
}
if (d_common.in_cols < j) {
ja2 = d_common.in_cols;
} else {
ja2 = j;
}
i = ic + d_common.ioffset;
ip1 = i + 1;
if (d_common.in2_rows < ip1) {
ia1 = ip1 - d_common.in2_rows;
} else {
ia1 = 1;
}
if (d_common.in_rows < i) {
ia2 = d_common.in_rows;
} else {
ia2 = i;
}
s = 0;
for (ja = ja1; ja <= ja2; ja++) {
jb = jp1 - ja;
for (ia = ia1; ia <= ia2; ia++) {
ib = ip1 - ia;
s = s
+ d_in_mod_temp[d_common.in_rows * (ja - 1) + ia - 1]
* d_unique[bx].d_in2[d_common.in2_rows * (jb - 1) + ib - 1];
}
}
//d_unique[bx].d_conv[d_common.conv_rows*(jc-1)+ic-1] = s;
d_unique[bx].d_conv[ei_new] = s;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// CUMULATIVE SUM
// PAD ARRAY, VERTICAL CUMULATIVE SUM
// PADD ARRAY
// work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_elem) {
// figure out row/col location in padded array
row = (ei_new + 1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_pad_cumv_rows == 0) {
row = d_common.in2_pad_cumv_rows - 1;
col = col - 1;
}
// execution
if (row > (d_common.in2_pad_add_rows - 1)
&& // do if has numbers in original array
row < (d_common.in2_pad_add_rows + d_common.in2_rows)
&& col > (d_common.in2_pad_add_cols - 1)
&& col < (d_common.in2_pad_add_cols + d_common.in2_cols)) {
ori_row = row - d_common.in2_pad_add_rows;
ori_col = col - d_common.in2_pad_add_cols;
d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2[ori_col * d_common.in2_rows
+ ori_row];
} else { // do if otherwise
d_unique[bx].d_in2_pad_cumv[ei_new] = 0;
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// VERTICAL CUMULATIVE SUM
//work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_cols) {
// figure out column position
pos_ori = ei_new * d_common.in2_pad_cumv_rows;
// variables
sum = 0;
// loop through all rows
for (position = pos_ori; position < pos_ori + d_common.in2_pad_cumv_rows;
position = position + 1) {
d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum;
sum = d_unique[bx].d_in2_pad_cumv[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION
// work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_sel_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_pad_cumv_sel_rows == 0) {
row = d_common.in2_pad_cumv_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel_collow - 1;
d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col
* d_common.in2_pad_cumv_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
// SELECTION 2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub_cumh_rows == 0) {
row = d_common.in2_sub_cumh_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1;
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col
* d_common.in2_pad_cumv_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SUBTRACTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_elem) {
// subtract
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new]
- d_unique[bx].d_in2_sub_cumh[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// HORIZONTAL CUMULATIVE SUM
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_rows) {
// figure out row position
pos_ori = ei_new;
// variables
sum = 0;
// loop through all rows
for (position = pos_ori; position < pos_ori + d_common.in2_sub_cumh_elem;
position = position + d_common.in2_sub_cumh_rows) {
d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum;
sum = d_unique[bx].d_in2_sub_cumh[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_sel_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub_cumh_sel_rows == 0) {
row = d_common.in2_sub_cumh_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel_collow - 1;
d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col
* d_common.in2_sub_cumh_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION 2, SUBTRACTION
// SELECTION 2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub2_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub2_rows == 0) {
row = d_common.in2_sub2_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1;
d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col
* d_common.in2_sub_cumh_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SUBTRACTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
// subtract
d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new]
- d_unique[bx].d_in2_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// CUMULATIVE SUM 2
// MULTIPLICATION
// work
ei_new = tx;
while (ei_new < d_common.in2_sqr_elem) {
temp = d_unique[bx].d_in2[ei_new];
d_unique[bx].d_in2_sqr[ei_new] = temp * temp;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// PAD ARRAY, VERTICAL CUMULATIVE SUM
// PAD ARRAY
// work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_elem) {
// figure out row/col location in padded array
row = (ei_new + 1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_pad_cumv_rows == 0) {
row = d_common.in2_pad_cumv_rows - 1;
col = col - 1;
}
// execution
if (row > (d_common.in2_pad_add_rows - 1)
&& // do if has numbers in original array
row < (d_common.in2_pad_add_rows + d_common.in2_sqr_rows)
&& col > (d_common.in2_pad_add_cols - 1)
&& col < (d_common.in2_pad_add_cols + d_common.in2_sqr_cols)) {
ori_row = row - d_common.in2_pad_add_rows;
ori_col = col - d_common.in2_pad_add_cols;
d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2_sqr[ori_col * d_common.in2_sqr_rows
+ ori_row];
} else { // do if otherwise
d_unique[bx].d_in2_pad_cumv[ei_new] = 0;
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// VERTICAL CUMULATIVE SUM
//work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_cols) {
// figure out column position
pos_ori = ei_new * d_common.in2_pad_cumv_rows;
// variables
sum = 0;
// loop through all rows
for (position = pos_ori; position < pos_ori + d_common.in2_pad_cumv_rows;
position = position + 1) {
d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum;
sum = d_unique[bx].d_in2_pad_cumv[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION
// work
ei_new = tx;
while (ei_new < d_common.in2_pad_cumv_sel_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_pad_cumv_sel_rows == 0) {
row = d_common.in2_pad_cumv_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel_collow - 1;
d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col
* d_common.in2_pad_cumv_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
// SELECTION 2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub_cumh_rows == 0) {
row = d_common.in2_sub_cumh_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1;
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col
* d_common.in2_pad_cumv_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SUBTRACTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_elem) {
// subtract
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new]
- d_unique[bx].d_in2_sub_cumh[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// HORIZONTAL CUMULATIVE SUM
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_rows) {
// figure out row position
pos_ori = ei_new;
// variables
sum = 0;
// loop through all rows
for (position = pos_ori; position < pos_ori + d_common.in2_sub_cumh_elem;
position = position + d_common.in2_sub_cumh_rows) {
d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum;
sum = d_unique[bx].d_in2_sub_cumh[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub_cumh_sel_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub_cumh_sel_rows == 0) {
row = d_common.in2_sub_cumh_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel_collow - 1;
d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col
* d_common.in2_sub_cumh_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SELECTION 2, SUBTRACTION
// SELECTION 2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in2_sub2_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in2_sub2_rows == 0) {
row = d_common.in2_sub2_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1;
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col
* d_common.in2_sub_cumh_rows + ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// SUBTRACTION
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
// subtract
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new]
- d_unique[bx].d_in2_sqr_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// FINAL
// DENOMINATOR A SAVE RESULT IN CUMULATIVE SUM A2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
temp = d_unique[bx].d_in2_sub2[ei_new];
temp2 = d_unique[bx].d_in2_sqr_sub2[ei_new] - (temp * temp / d_common.in_elem);
if (temp2 < 0) {
temp2 = 0;
}
d_unique[bx].d_in2_sqr_sub2[ei_new] = sqrt(temp2);
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// MULTIPLICATION
// work
ei_new = tx;
while (ei_new < d_common.in_sqr_elem) {
temp = d_in[ei_new];
d_unique[bx].d_in_sqr[ei_new] = temp * temp;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// IN SUM
// work
ei_new = tx;
while (ei_new < d_common.in_cols) {
sum = 0;
for (i = 0; i < d_common.in_rows; i++) {
sum = sum + d_in[ei_new * d_common.in_rows + i];
}
in_partial_sum[ei_new] = sum;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// IN_SQR SUM
ei_new = tx;
while (ei_new < d_common.in_sqr_rows) {
sum = 0;
for (i = 0; i < d_common.in_sqr_cols; i++) {
sum = sum + d_unique[bx].d_in_sqr[ei_new + d_common.in_sqr_rows * i];
}
in_sqr_partial_sum[ei_new] = sum;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// FINAL SUMMATION
if (tx == 0) {
in_final_sum = 0;
for (i = 0; i < d_common.in_cols; i++) {
in_final_sum = in_final_sum + in_partial_sum[i];
}
} else if (tx == 1) {
in_sqr_final_sum = 0;
for (i = 0; i < d_common.in_sqr_cols; i++) {
in_sqr_final_sum = in_sqr_final_sum + in_sqr_partial_sum[i];
}
}
// SYNCHRONIZE THREADS
__syncthreads();
// DENOMINATOR T
if (tx == 0) {
mean = in_final_sum / d_common.in_elem; // gets mean (average) value of element in ROI
mean_sqr = mean * mean;
variance = (in_sqr_final_sum / d_common.in_elem) - mean_sqr; // gets variance of ROI
deviation = sqrt(variance); // gets standard deviation of ROI
denomT = sqrt(float(d_common.in_elem - 1)) * deviation;
}
// SYNCHRONIZE THREADS
__syncthreads();
// DENOMINATOR SAVE RESULT IN CUMULATIVE SUM A2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * denomT;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// NUMERATOR SAVE RESULT IN CONVOLUTION
// work
ei_new = tx;
while (ei_new < d_common.conv_elem) {
d_unique[bx].d_conv[ei_new] = d_unique[bx].d_conv[ei_new]
- d_unique[bx].d_in2_sub2[ei_new] * in_final_sum / d_common.in_elem;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// CORRELATION SAVE RESULT IN CUMULATIVE SUM A2
// work
ei_new = tx;
while (ei_new < d_common.in2_sub2_elem) {
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_conv[ei_new]
/ d_unique[bx].d_in2_sqr_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// TEMPLATE MASK CREATE
cent = d_common.sSize + d_common.tSize + 1;
if (d_common_change.frame_no == 0) {
tMask_row = cent + d_unique[bx].d_Row[d_unique[bx].point_no]
- d_unique[bx].d_Row[d_unique[bx].point_no] - 1;
tMask_col = cent + d_unique[bx].d_Col[d_unique[bx].point_no]
- d_unique[bx].d_Col[d_unique[bx].point_no] - 1;
} else {
pointer = d_common_change.frame_no - 1 + d_unique[bx].point_no * d_common.no_frames;
tMask_row = cent + d_unique[bx].d_tRowLoc[pointer] - d_unique[bx].d_Row[d_unique[bx].point_no]
- 1;
tMask_col = cent + d_unique[bx].d_tColLoc[pointer] - d_unique[bx].d_Col[d_unique[bx].point_no]
- 1;
}
//work
ei_new = tx;
while (ei_new < d_common.tMask_elem) {
location = tMask_col * d_common.tMask_rows + tMask_row;
if (ei_new == location) {
d_unique[bx].d_tMask[ei_new] = 1;
} else {
d_unique[bx].d_tMask[ei_new] = 0;
}
//go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// MASK CONVOLUTION
// work
ei_new = tx;
while (ei_new < d_common.mask_conv_elem) {
// figure out row/col location in array
ic = (ei_new + 1) % d_common.mask_conv_rows; // (1-n)
jc = (ei_new + 1) / d_common.mask_conv_rows + 1; // (1-n)
if ((ei_new + 1) % d_common.mask_conv_rows == 0) {
ic = d_common.mask_conv_rows;
jc = jc - 1;
}
j = jc + d_common.mask_conv_joffset;
jp1 = j + 1;
if (d_common.mask_cols < jp1) {
ja1 = jp1 - d_common.mask_cols;
} else {
ja1 = 1;
}
if (d_common.tMask_cols < j) {
ja2 = d_common.tMask_cols;
} else {
ja2 = j;
}
i = ic + d_common.mask_conv_ioffset;
ip1 = i + 1;
if (d_common.mask_rows < ip1) {
ia1 = ip1 - d_common.mask_rows;
} else {
ia1 = 1;
}
if (d_common.tMask_rows < i) {
ia2 = d_common.tMask_rows;
} else {
ia2 = i;
}
s = 0;
for (ja = ja1; ja <= ja2; ja++) {
jb = jp1 - ja;
for (ia = ia1; ia <= ia2; ia++) {
ib = ip1 - ia;
s = s + d_unique[bx].d_tMask[d_common.tMask_rows * (ja - 1) + ia - 1] * 1;
}
}
// //d_unique[bx].d_mask_conv[d_common.mask_conv_rows*(jc-1)+ic-1] = s;
d_unique[bx].d_mask_conv[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * s;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// MAXIMUM VALUE
// INITIAL SEARCH
ei_new = tx;
while (ei_new < d_common.mask_conv_rows) {
for (i = 0; i < d_common.mask_conv_cols; i++) {
largest_coordinate_current = ei_new * d_common.mask_conv_rows + i;
largest_value_current = abs(d_unique[bx].d_mask_conv[largest_coordinate_current]);
if (largest_value_current > largest_value) {
largest_coordinate = largest_coordinate_current;
largest_value = largest_value_current;
}
}
par_max_coo[ei_new] = largest_coordinate;
par_max_val[ei_new] = largest_value;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
// SYNCHRONIZE THREADS
__syncthreads();
// FINAL SEARCH
if (tx == 0) {
for (i = 0; i < d_common.mask_conv_rows; i++) {
if (par_max_val[i] > fin_max_val) {
fin_max_val = par_max_val[i];
fin_max_coo = par_max_coo[i];
}
}
// convert coordinate to row/col form
largest_row = (fin_max_coo + 1) % d_common.mask_conv_rows - 1; // (0-n) row
largest_col = (fin_max_coo + 1) / d_common.mask_conv_rows; // (0-n) column
if ((fin_max_coo + 1) % d_common.mask_conv_rows == 0) {
largest_row = d_common.mask_conv_rows - 1;
largest_col = largest_col - 1;
}
// calculate offset
largest_row = largest_row + 1; // compensate to match MATLAB format (1-n)
largest_col = largest_col + 1; // compensate to match MATLAB format (1-n)
offset_row = largest_row - d_common.in_rows - (d_common.sSize - d_common.tSize);
offset_col = largest_col - d_common.in_cols - (d_common.sSize - d_common.tSize);
pointer = d_common_change.frame_no + d_unique[bx].point_no * d_common.no_frames;
d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no] + offset_row;
d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no] + offset_col;
}
// SYNCHRONIZE THREADS
__syncthreads();
}
// COORDINATE AND TEMPLATE UPDATE
// time19 = clock();
// if the last frame in the bath, update template
if (d_common_change.frame_no != 0 && (d_common_change.frame_no) % 10 == 0) {
// update coordinate
loc_pointer = d_unique[bx].point_no * d_common.no_frames + d_common_change.frame_no;
d_unique[bx].d_Row[d_unique[bx].point_no] = d_unique[bx].d_tRowLoc[loc_pointer];
d_unique[bx].d_Col[d_unique[bx].point_no] = d_unique[bx].d_tColLoc[loc_pointer];
// work
ei_new = tx;
while (ei_new < d_common.in_elem) {
// figure out row/col location in new matrix
row = (ei_new + 1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new + 1) / d_common.in_rows + 1 - 1; // (0-n) column
if ((ei_new + 1) % d_common.in_rows == 0) {
row = d_common.in_rows - 1;
col = col - 1;
}
// figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right)
ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1;
ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1;
ori_pointer = ori_col * d_common.frame_rows + ori_row;
// update template
d_in[ei_new] = d_common.alpha * d_in[ei_new]
+ (1.00 - d_common.alpha) * d_common_change.d_frame[ori_pointer];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
}
}
// END OF FUNCTION
|
476023944f0dd92d8c0e29f8d72b601cdc0258ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* dijkstras-test.cu
*
* Created on: Apr 20, 2015
* Author: luke
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <climits>
#include <stdint.h>
#include <ctime>
void CudaMallocErrorCheck(void** ptr, int size);
void DijkstrasSetupCuda(int *V, int *E, int *We, int *sigma, int *F, int *U, int num_v, int num_e);
void Extremas(int *V, int *E, int num_v, int num_e, int *extrema_vertex, int source_vertex);
void Initialize(int *V, int *E, int num_v, int num_e, int **dev_V, int **dev_E, int **dev_U, int **dev_F, int **dev_sigma, int source);
int Minimum(int *U, int *sigma, int *V, int *E, int num_v, int num_e, int *dev_dest, int *dev_src);
__global__ void InitializeGPU(int *V, int *E, int *U, int *F, int *sigma, int src, int size_v, int size_e);
__global__ void Relax(int *U, int *F, int *sigma, int *V, int *E, int num_v, int num_e);
__global__ void Update(int *U, int *F, int *sigma, int delta, int size);
__global__ void reduce(int *g_idata, int *g_odata, unsigned int n, int *U, int *sigma);
__global__ void reduce_fix(int *g_idata, int *g_odata, unsigned int n, unsigned int s_size, unsigned int loops, int *U, int *sigma);
uint32_t NearestPowerTwo(uint32_t N);
uint32_t NearestPowerBase(uint32_t N, uint32_t base, uint32_t &power);
// Generate V_a, E_a, Start_a, End_a, Weight_a
int main(int argc, char **argv) {
// Initialize graph
int V[] = {0, 1, 5, 7, 9};
int E[] = {1, 0, 2, 3, 4, 1, 4, 1, 4, 1, 2, 3};
int Sv[] = {0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4};
int Ev[] = {1, 0, 2, 3, 4, 1, 4, 1, 4, 1, 2, 3};
int We[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
// Initialize Unsettled, Frontier, Sigma function
int sigma[]= {0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; // -1 = inf
int F[] = {1, 0, 0, 0, 0};
int U[] = {0, 1, 1, 1, 1};
DijkstrasSetupCuda(V, E, We, sigma, F, U, 5, 12);
}
void DijkstrasSetupCuda(int *V, int *E, int *We, int *sigma, int *F, int *U, int num_v, int num_e) {
int extrema_vertex;
Extremas(V, E, num_v, num_e, &extrema_vertex, 0);
}
void Extremas(int *V, int *E, int num_v, int num_e, int *extrema_vertex, int source_vertex) {
// Define Unsettled sigma and Frontier nodes
int *dev_U, *dev_sigma, *dev_F, *dev_V, *dev_E, *dev_src, *dev_dest;
int delta = 0;
float elapsedTime=0;
// Initialize reduce function mem
CudaMallocErrorCheck((void**)&dev_src, num_v*sizeof(int));
CudaMallocErrorCheck((void**)&dev_dest, num_v*sizeof(int));
Initialize(V, E, num_v, num_e, &dev_V, &dev_E, &dev_U, &dev_F, &dev_sigma, source_vertex);
// hipLaunchKernelGGL(( Relax), dim3(1), dim3(5), 0, 0, dev_U, dev_F, dev_sigma, dev_V, dev_E, num_v, num_e);
// int test = Minimum(dev_U, dev_sigma, dev_V, dev_E, num_v, num_e, dev_dest, dev_src);
// Update<<<1,5>>>(dev_U, dev_F, dev_sigma, test, num_v);
// printf("Test: %d\n", test);
//
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
while (delta != INT_MAX) {
hipLaunchKernelGGL(( Relax), dim3(1), dim3(5), 0, 0, dev_U, dev_F, dev_sigma, dev_V, dev_E, num_v, num_e);
delta = Minimum(dev_U, dev_sigma, dev_V, dev_E, num_v, num_e, dev_dest, dev_src);
hipLaunchKernelGGL(( Update), dim3(1), dim3(5), 0, 0, dev_U, dev_F, dev_sigma, delta, num_v);
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsedTime, start, end);
printf("Elapsed Time: %f\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(end);
int sigma[num_v];
// int V_t[num_v];
// int U_t[num_v];
hipMemcpy(sigma, dev_sigma, num_v*sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(V_t, dev_F, num_v*sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(U_t, dev_U, num_v*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < num_v; ++i) {
printf("Sigma[%d] : %d\n", i, sigma[i]);
// printf("Frontier[%d] : %d\n", i, V_t[i]);
// printf("Unsettled[%d]: %d\n", i, U_t[i]);
}
}
void Initialize(int *V, int *E, int num_v, int num_e, int **dev_V, int **dev_E, int **dev_U, int **dev_F, int **dev_sigma, int source) {
// Allocate the device memory
CudaMallocErrorCheck((void**)dev_V, num_v*sizeof(int));
CudaMallocErrorCheck((void**)dev_E, num_e*sizeof(int));
CudaMallocErrorCheck((void**)dev_U, num_v*sizeof(int));
CudaMallocErrorCheck((void**)dev_F, num_v*sizeof(int));
CudaMallocErrorCheck((void**)dev_sigma, num_v*sizeof(int));
// copy graph to device
hipMemcpy(*dev_V, V, num_v*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(*dev_E, E, num_e*sizeof(int), hipMemcpyHostToDevice);
// initialize Frontier
// Initialize Unselttled
// Initialize Sigma distance function
int threads_per_block, blocks_per_dim;
blocks_per_dim = num_v / 1024 + 1;
threads_per_block = num_v / blocks_per_dim;
hipLaunchKernelGGL(( InitializeGPU), dim3(blocks_per_dim), dim3(threads_per_block), 0, 0, *dev_V, *dev_E, *dev_U, *dev_F, *dev_sigma, source, num_e, num_v);
}
__global__ void InitializeGPU(int *V, int *E, int *U, int *F, int *sigma, int src, int size_v, int size_e) {
int offset = blockDim.x * blockIdx.x + threadIdx.x;
int U_t, F_t, sigma_t;
if (offset < size_v) {
U_t = 1;
F_t = 0;
sigma_t = INT_MAX - 1;
if (offset == src) {
U_t = 0;
F_t = 1;
sigma_t = 0;
}
U[offset] = U_t;
F[offset] = F_t;
sigma[offset] = sigma_t;
}
}
__global__ void Relax(int *U, int *F, int *sigma, int *V, int *E, int num_v, int num_e) {
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < num_v) {
if (F[offset] == 1) {
for (int i = V[offset]; i < V[offset+1] && i < num_e; ++i) {
if (U[E[i]] == 1) {
atomicMin(&sigma[E[i]], sigma[offset] + 1);
}
}
}
}
}
__global__ void Update(int *U, int *F, int *sigma, int delta, int size) {
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < size){
F[offset] = 0;
if (U[offset] == 1 && sigma[offset] <= delta) {
U[offset] = 0;
F[offset] = 1;
}
}
}
int Minimum(int *U, int *sigma, int *V, int *E, int num_v, int num_e, int *dev_dest, int *dev_src) {
uint32_t blocks = (num_v+1) / 1024 + 1;
uint32_t threads = (num_v+1) / blocks / 2;
uint32_t loops;
uint32_t n_multiple = NearestPowerBase(num_v, threads * blocks * 2, loops);
uint32_t dev_dest_size = NearestPowerTwo(blocks*loops);
uint32_t share = NearestPowerTwo(threads);
// printf("Blocks: %d, Threads:%d\n", blocks, threads);
hipLaunchKernelGGL(( reduce_fix), dim3(blocks), dim3(threads), share*sizeof(int), 0, V, dev_dest, n_multiple,
share, loops, U, sigma);
// Recall GPU function: Assumption Destination is power of 2. calculate block
// and threads for each call.
// GPU Call loop until Threshold
if (dev_dest_size > 1024) {
threads = 512;
blocks = dev_dest_size / threads / 2;
} else {
threads = dev_dest_size / 2;
blocks = 1;
}
while (dev_dest_size > 1) {
int * temp = dev_dest;
dev_dest = dev_src;
dev_src = temp;
hipLaunchKernelGGL(( reduce), dim3(blocks), dim3(threads), threads*sizeof(int), 0, dev_src, dev_dest,
dev_dest_size, U, sigma);
dev_dest_size = blocks;
if (dev_dest_size > 1024) {
threads = 512;
blocks = dev_dest_size / threads / 2;
} else {
threads = dev_dest_size / 2;
blocks = 1;
}
}
int result;
hipMemcpy(&result, dev_dest, sizeof(int), hipMemcpyDeviceToHost);
return result;
}
void CudaMallocErrorCheck(void** ptr, int size) {
hipError_t err = hipMalloc(ptr, size);
if (err != hipSuccess) {
printf("Error: %s", hipGetErrorString(err));
exit(1);
}
}
uint32_t NearestPowerTwo(uint32_t N) {
uint32_t result = 1;
while (result < N) {
result <<= 1;
}
return result;
}
uint32_t NearestPowerBase(uint32_t N, uint32_t base, uint32_t &power) {
uint32_t result = base;
power = 1;
while (result < N) {
result += base;
power++;
}
return result;
}
__global__ void reduce(int *g_idata, int *g_odata, unsigned int n, int *U, int *sigma) {
// Pointer to shared memory
extern __shared__ int share_mem[];
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
unsigned int block_dim = blockDim.x;
unsigned int offset = block_id*block_dim*2 + thread_id;
// Temp result float
int result = (offset < n && U[offset] == 1) ? g_idata[offset] : INT_MAX;
// Perform summation
if (offset + block_dim < n && U[offset + block_dim] == 1)
result = min(result, g_idata[offset+block_dim]);
share_mem[thread_id] = result;
// Sync Threads in a single Block
__syncthreads();
// store result to shared memory
for (unsigned int s=block_dim/2; s>0; s>>=1) {
if (thread_id < s) {
share_mem[thread_id] = result = min(result, share_mem[thread_id + s]);
}
__syncthreads();
}
// Store result to output data pointer
if (thread_id == 0) g_odata[block_id] = result;
}
__global__ void reduce_fix(int *g_idata, int *g_odata, unsigned int n, unsigned int s_size, unsigned int loops, int *U, int *sigma) {
// Pointer to shared memory
extern __shared__ int share_mem[];
unsigned int thread_id = threadIdx.x;
for (int i = 0; i < loops; ++i) {
unsigned int offset = blockIdx.x*blockDim.x*2 + threadIdx.x + blockDim.x * 2 * gridDim.x * i;
// Temp result float
int result = (offset < n && U[offset] == 1) ? g_idata[offset] : INT_MAX;
// Perform summation
if (offset + blockDim.x < n && U[offset + blockDim.x] == 1)
result = min(result, g_idata[offset+blockDim.x]);
share_mem[thread_id] = result;
// printf("Result: %d\n", result);
// Sync Threads in a single Block
int delta = s_size - blockDim.x;
if (thread_id + delta > blockDim.x-1) {
share_mem[thread_id+delta] = INT_MAX;
}
__syncthreads();
// store result to shared memory
for (unsigned int s=s_size/2; s>0; s>>=1) {
if (thread_id < s) {
share_mem[thread_id] = result = min(result, share_mem[thread_id + s]);
}
__syncthreads();
}
// Store result to output data pointer
if (thread_id == 0) {
g_odata[blockIdx.x+ gridDim.x*i] = result;
}
}
}
|
476023944f0dd92d8c0e29f8d72b601cdc0258ce.cu
|
/*
* dijkstras-test.cu
*
* Created on: Apr 20, 2015
* Author: luke
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <climits>
#include <stdint.h>
#include <ctime>
void CudaMallocErrorCheck(void** ptr, int size);
void DijkstrasSetupCuda(int *V, int *E, int *We, int *sigma, int *F, int *U, int num_v, int num_e);
void Extremas(int *V, int *E, int num_v, int num_e, int *extrema_vertex, int source_vertex);
void Initialize(int *V, int *E, int num_v, int num_e, int **dev_V, int **dev_E, int **dev_U, int **dev_F, int **dev_sigma, int source);
int Minimum(int *U, int *sigma, int *V, int *E, int num_v, int num_e, int *dev_dest, int *dev_src);
__global__ void InitializeGPU(int *V, int *E, int *U, int *F, int *sigma, int src, int size_v, int size_e);
__global__ void Relax(int *U, int *F, int *sigma, int *V, int *E, int num_v, int num_e);
__global__ void Update(int *U, int *F, int *sigma, int delta, int size);
__global__ void reduce(int *g_idata, int *g_odata, unsigned int n, int *U, int *sigma);
__global__ void reduce_fix(int *g_idata, int *g_odata, unsigned int n, unsigned int s_size, unsigned int loops, int *U, int *sigma);
uint32_t NearestPowerTwo(uint32_t N);
uint32_t NearestPowerBase(uint32_t N, uint32_t base, uint32_t &power);
// Generate V_a, E_a, Start_a, End_a, Weight_a
int main(int argc, char **argv) {
// Initialize graph
int V[] = {0, 1, 5, 7, 9};
int E[] = {1, 0, 2, 3, 4, 1, 4, 1, 4, 1, 2, 3};
int Sv[] = {0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4};
int Ev[] = {1, 0, 2, 3, 4, 1, 4, 1, 4, 1, 2, 3};
int We[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
// Initialize Unsettled, Frontier, Sigma function
int sigma[]= {0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; // -1 = inf
int F[] = {1, 0, 0, 0, 0};
int U[] = {0, 1, 1, 1, 1};
DijkstrasSetupCuda(V, E, We, sigma, F, U, 5, 12);
}
void DijkstrasSetupCuda(int *V, int *E, int *We, int *sigma, int *F, int *U, int num_v, int num_e) {
int extrema_vertex;
Extremas(V, E, num_v, num_e, &extrema_vertex, 0);
}
void Extremas(int *V, int *E, int num_v, int num_e, int *extrema_vertex, int source_vertex) {
// Define Unsettled sigma and Frontier nodes
int *dev_U, *dev_sigma, *dev_F, *dev_V, *dev_E, *dev_src, *dev_dest;
int delta = 0;
float elapsedTime=0;
// Initialize reduce function mem
CudaMallocErrorCheck((void**)&dev_src, num_v*sizeof(int));
CudaMallocErrorCheck((void**)&dev_dest, num_v*sizeof(int));
Initialize(V, E, num_v, num_e, &dev_V, &dev_E, &dev_U, &dev_F, &dev_sigma, source_vertex);
// Relax<<<1, 5>>>(dev_U, dev_F, dev_sigma, dev_V, dev_E, num_v, num_e);
// int test = Minimum(dev_U, dev_sigma, dev_V, dev_E, num_v, num_e, dev_dest, dev_src);
// Update<<<1,5>>>(dev_U, dev_F, dev_sigma, test, num_v);
// printf("Test: %d\n", test);
//
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
while (delta != INT_MAX) {
Relax<<<1, 5>>>(dev_U, dev_F, dev_sigma, dev_V, dev_E, num_v, num_e);
delta = Minimum(dev_U, dev_sigma, dev_V, dev_E, num_v, num_e, dev_dest, dev_src);
Update<<<1, 5>>>(dev_U, dev_F, dev_sigma, delta, num_v);
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime, start, end);
printf("Elapsed Time: %f\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(end);
int sigma[num_v];
// int V_t[num_v];
// int U_t[num_v];
cudaMemcpy(sigma, dev_sigma, num_v*sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(V_t, dev_F, num_v*sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(U_t, dev_U, num_v*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < num_v; ++i) {
printf("Sigma[%d] : %d\n", i, sigma[i]);
// printf("Frontier[%d] : %d\n", i, V_t[i]);
// printf("Unsettled[%d]: %d\n", i, U_t[i]);
}
}
void Initialize(int *V, int *E, int num_v, int num_e, int **dev_V, int **dev_E, int **dev_U, int **dev_F, int **dev_sigma, int source) {
// Allocate the device memory
CudaMallocErrorCheck((void**)dev_V, num_v*sizeof(int));
CudaMallocErrorCheck((void**)dev_E, num_e*sizeof(int));
CudaMallocErrorCheck((void**)dev_U, num_v*sizeof(int));
CudaMallocErrorCheck((void**)dev_F, num_v*sizeof(int));
CudaMallocErrorCheck((void**)dev_sigma, num_v*sizeof(int));
// copy graph to device
cudaMemcpy(*dev_V, V, num_v*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(*dev_E, E, num_e*sizeof(int), cudaMemcpyHostToDevice);
// initialize Frontier
// Initialize Unselttled
// Initialize Sigma distance function
int threads_per_block, blocks_per_dim;
blocks_per_dim = num_v / 1024 + 1;
threads_per_block = num_v / blocks_per_dim;
InitializeGPU<<<blocks_per_dim, threads_per_block>>>(*dev_V, *dev_E, *dev_U, *dev_F, *dev_sigma, source, num_e, num_v);
}
__global__ void InitializeGPU(int *V, int *E, int *U, int *F, int *sigma, int src, int size_v, int size_e) {
int offset = blockDim.x * blockIdx.x + threadIdx.x;
int U_t, F_t, sigma_t;
if (offset < size_v) {
U_t = 1;
F_t = 0;
sigma_t = INT_MAX - 1;
if (offset == src) {
U_t = 0;
F_t = 1;
sigma_t = 0;
}
U[offset] = U_t;
F[offset] = F_t;
sigma[offset] = sigma_t;
}
}
__global__ void Relax(int *U, int *F, int *sigma, int *V, int *E, int num_v, int num_e) {
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < num_v) {
if (F[offset] == 1) {
for (int i = V[offset]; i < V[offset+1] && i < num_e; ++i) {
if (U[E[i]] == 1) {
atomicMin(&sigma[E[i]], sigma[offset] + 1);
}
}
}
}
}
__global__ void Update(int *U, int *F, int *sigma, int delta, int size) {
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < size){
F[offset] = 0;
if (U[offset] == 1 && sigma[offset] <= delta) {
U[offset] = 0;
F[offset] = 1;
}
}
}
int Minimum(int *U, int *sigma, int *V, int *E, int num_v, int num_e, int *dev_dest, int *dev_src) {
uint32_t blocks = (num_v+1) / 1024 + 1;
uint32_t threads = (num_v+1) / blocks / 2;
uint32_t loops;
uint32_t n_multiple = NearestPowerBase(num_v, threads * blocks * 2, loops);
uint32_t dev_dest_size = NearestPowerTwo(blocks*loops);
uint32_t share = NearestPowerTwo(threads);
// printf("Blocks: %d, Threads:%d\n", blocks, threads);
reduce_fix<<<blocks, threads, share*sizeof(int)>>>(V, dev_dest, n_multiple,
share, loops, U, sigma);
// Recall GPU function: Assumption Destination is power of 2. calculate block
// and threads for each call.
// GPU Call loop until Threshold
if (dev_dest_size > 1024) {
threads = 512;
blocks = dev_dest_size / threads / 2;
} else {
threads = dev_dest_size / 2;
blocks = 1;
}
while (dev_dest_size > 1) {
int * temp = dev_dest;
dev_dest = dev_src;
dev_src = temp;
reduce<<<blocks, threads, threads*sizeof(int)>>>(dev_src, dev_dest,
dev_dest_size, U, sigma);
dev_dest_size = blocks;
if (dev_dest_size > 1024) {
threads = 512;
blocks = dev_dest_size / threads / 2;
} else {
threads = dev_dest_size / 2;
blocks = 1;
}
}
int result;
cudaMemcpy(&result, dev_dest, sizeof(int), cudaMemcpyDeviceToHost);
return result;
}
void CudaMallocErrorCheck(void** ptr, int size) {
cudaError_t err = cudaMalloc(ptr, size);
if (err != cudaSuccess) {
printf("Error: %s", cudaGetErrorString(err));
exit(1);
}
}
uint32_t NearestPowerTwo(uint32_t N) {
uint32_t result = 1;
while (result < N) {
result <<= 1;
}
return result;
}
uint32_t NearestPowerBase(uint32_t N, uint32_t base, uint32_t &power) {
uint32_t result = base;
power = 1;
while (result < N) {
result += base;
power++;
}
return result;
}
__global__ void reduce(int *g_idata, int *g_odata, unsigned int n, int *U, int *sigma) {
// Pointer to shared memory
extern __shared__ int share_mem[];
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
unsigned int block_dim = blockDim.x;
unsigned int offset = block_id*block_dim*2 + thread_id;
// Temp result float
int result = (offset < n && U[offset] == 1) ? g_idata[offset] : INT_MAX;
// Perform summation
if (offset + block_dim < n && U[offset + block_dim] == 1)
result = min(result, g_idata[offset+block_dim]);
share_mem[thread_id] = result;
// Sync Threads in a single Block
__syncthreads();
// store result to shared memory
for (unsigned int s=block_dim/2; s>0; s>>=1) {
if (thread_id < s) {
share_mem[thread_id] = result = min(result, share_mem[thread_id + s]);
}
__syncthreads();
}
// Store result to output data pointer
if (thread_id == 0) g_odata[block_id] = result;
}
__global__ void reduce_fix(int *g_idata, int *g_odata, unsigned int n, unsigned int s_size, unsigned int loops, int *U, int *sigma) {
// Pointer to shared memory
extern __shared__ int share_mem[];
unsigned int thread_id = threadIdx.x;
for (int i = 0; i < loops; ++i) {
unsigned int offset = blockIdx.x*blockDim.x*2 + threadIdx.x + blockDim.x * 2 * gridDim.x * i;
// Temp result float
int result = (offset < n && U[offset] == 1) ? g_idata[offset] : INT_MAX;
// Perform summation
if (offset + blockDim.x < n && U[offset + blockDim.x] == 1)
result = min(result, g_idata[offset+blockDim.x]);
share_mem[thread_id] = result;
// printf("Result: %d\n", result);
// Sync Threads in a single Block
int delta = s_size - blockDim.x;
if (thread_id + delta > blockDim.x-1) {
share_mem[thread_id+delta] = INT_MAX;
}
__syncthreads();
// store result to shared memory
for (unsigned int s=s_size/2; s>0; s>>=1) {
if (thread_id < s) {
share_mem[thread_id] = result = min(result, share_mem[thread_id + s]);
}
__syncthreads();
}
// Store result to output data pointer
if (thread_id == 0) {
g_odata[blockIdx.x+ gridDim.x*i] = result;
}
}
}
|
0c8e88c17fecc1dcfb93b77216b5160c3aaad58c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void sgd_sparse_update(const float *grad_data, const float *indices_data, float *param_data, size_t size, size_t length, float lr) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t index = thread_ind / length;
size_t offset = thread_ind % length;
int id = indices_data[index];
const float cur_grad = grad_data[thread_ind];
float *param_ptr = param_data + length * id + offset;
atomicAdd(param_ptr, -lr * cur_grad);
}
int SGDOptimizerSparseUpdate(DLArrayHandle param, const DLArrayHandle grad_indices, const DLArrayHandle grad_values, float lr, DLStreamHandle stream_handle = NULL) {
size_t size = 1;
size_t length = param->shape[1];
for(int i = 0; i < grad_values->ndim; i++){
size *= grad_values->shape[i];
}
dim3 blocks;
dim3 threads;
const float *grad_data = (const float *)grad_values->data;
float *param_data = (float *)param->data;
const float *indices_data = (const float*)grad_indices->data;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
hipLaunchKernelGGL(( sgd_sparse_update), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, grad_data, indices_data, param_data, size, length, lr);
else
hipLaunchKernelGGL(( sgd_sparse_update), dim3(blocks), dim3(threads), 0, 0, grad_data, indices_data, param_data, size, length, lr);
return 0;
}
__global__ void nesterov_sparse_first_phase(float *param_data, float *veloc_data, const float *grad_data, const float *indices_data, float lr, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int grad_ind = indices_data[ind];
const float cur_grad = grad_data[thread_ind];
size_t total_offset = length * grad_ind + offset;
float *veloc_ptr = veloc_data + total_offset;
float *param_ptr = param_data + total_offset;
float temp = -lr * cur_grad;
atomicAdd(veloc_ptr, temp);
atomicAdd(param_ptr, temp);
}
__global__ void nesterov_sparse_second_phase(float *param_data, float *veloc_data, float momentum, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
float temp_veloc = momentum * veloc_data[ind];
veloc_data[ind] = temp_veloc;
param_data[ind] = param_data[ind] + temp_veloc;
}
__global__ void momentum_sparse_first_phase(float *veloc_data, const float *grad_data, const float *indices_data, float lr, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int grad_ind = indices_data[ind];
const float cur_grad = grad_data[thread_ind];
float *veloc_ptr = veloc_data + length * grad_ind + offset;
atomicAdd(veloc_ptr, -lr * cur_grad);
}
__global__ void momentum_sparse_second_phase(float *param_data, float *veloc_data, float momentum, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
param_data[ind] = param_data[ind] + veloc_data[ind];
veloc_data[ind] = momentum * veloc_data[ind];
}
int MomentumOptimizerSparseUpdate(DLArrayHandle param, const DLArrayHandle grad_indices, const DLArrayHandle grad_values, DLArrayHandle velocity, float lr, float momentum, bool nesterov, DLStreamHandle stream_handle = NULL) {
size_t size = 1;
size_t total_size = 1;
size_t length = param->shape[1];
for (int i = 0; i < grad_values->ndim; ++i){
size *= grad_values->shape[i];
}
for (int i = 0; i < param->ndim; ++i) {
total_size *= param->shape[i];
}
float *param_data = (float *) param->data;
const float *grad_data = (const float *) grad_values->data;
const float *indices_data = (const float*)grad_indices->data;
float *velocity_data = (float *) velocity->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
dim3 dense_blocks;
dim3 dense_threads;
if (total_size <= 1024) {
dense_threads.x = total_size;
dense_blocks.x = 1;
} else {
dense_threads.x = 1024;
dense_blocks.x = (total_size + 1023) / 1024;
}
if (nesterov) {
if (stream_handle) {
hipLaunchKernelGGL(( nesterov_sparse_first_phase), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, param_data, velocity_data, grad_data, indices_data, lr, size, length);
hipLaunchKernelGGL(( nesterov_sparse_second_phase), dim3(dense_blocks), dim3(dense_threads), 0, *(hipStream_t*)stream_handle->handle, param_data, velocity_data, momentum, total_size);
} else {
hipLaunchKernelGGL(( nesterov_sparse_first_phase), dim3(blocks), dim3(threads), 0, 0, param_data, velocity_data, grad_data, indices_data, lr, size, length);
hipLaunchKernelGGL(( nesterov_sparse_second_phase), dim3(dense_blocks), dim3(dense_threads), 0, 0, param_data, velocity_data, momentum, total_size);
}
} else {
if (stream_handle) {
hipLaunchKernelGGL(( momentum_sparse_first_phase), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, velocity_data, grad_data, indices_data, lr, size, length);
hipLaunchKernelGGL(( momentum_sparse_second_phase), dim3(dense_blocks), dim3(dense_threads), 0, *(hipStream_t*)stream_handle->handle, param_data, velocity_data, momentum, total_size);
} else {
hipLaunchKernelGGL(( momentum_sparse_first_phase), dim3(blocks), dim3(threads), 0, 0, velocity_data, grad_data, indices_data, lr, size, length);
hipLaunchKernelGGL(( momentum_sparse_second_phase), dim3(dense_blocks), dim3(dense_threads), 0, 0, param_data, velocity_data, momentum, total_size);
}
}
return 0;
}
__global__ void deduplicate_kernel(const float *origin_data, const float *inverse_data, float *compressed_data, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int com_ind = inverse_data[ind];
const float cur_origin = origin_data[thread_ind];
float *compressed_ptr = compressed_data + length * com_ind + offset;
atomicAdd(compressed_ptr, cur_origin);
}
int DeduplicateIndexedSlices(const DLArrayHandle origin, const DLArrayHandle inverse, DLArrayHandle compressed, DLStreamHandle stream_handle = NULL) {
size_t size = 1;
size_t length = compressed->shape[compressed->ndim-1];
for (int i = 0; i < origin->ndim; ++i) {
size *= origin->shape[i];
}
const float *origin_data = (const float*)origin->data;
const float *inverse_data = (const float*)inverse->data;
float *compressed_data = (float*)compressed->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
hipLaunchKernelGGL(( deduplicate_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, origin_data, inverse_data, compressed_data, size, length);
else
hipLaunchKernelGGL(( deduplicate_kernel), dim3(blocks), dim3(threads), 0, 0, origin_data, inverse_data, compressed_data, size, length);
return 0;
}
__global__ void adagrad_sparse_update(float *param_data, const float *grad_data, const float *indices_data, float *acc_data, float lr, float eps, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int grad_ind = indices_data[ind];
const float cur_grad = grad_data[thread_ind];
size_t total_offset = length * grad_ind + offset;
float *acc_ptr = acc_data + total_offset;
float *param_ptr = param_data + total_offset;
float cur_acc = *acc_ptr + cur_grad * cur_grad;
*acc_ptr = cur_acc;
*param_ptr -= lr * cur_grad / (sqrtf(cur_acc) + eps);
}
int AdaGradOptimizerSparseUpdate(DLArrayHandle param, const DLArrayHandle grad_indices, const DLArrayHandle grad_values, DLArrayHandle acc, float lr, float eps, DLStreamHandle stream_handle = NULL) {
size_t size = 1;
size_t length = param->shape[1];
for (int i = 0; i < grad_values->ndim; ++i){
size *= grad_values->shape[i];
}
float *param_data = (float *) param->data;
const float *grad_data = (const float *) grad_values->data;
const float *indices_data = (const float*)grad_indices->data;
float *acc_data = (float *) acc->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle) {
hipLaunchKernelGGL(( adagrad_sparse_update), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, param_data, grad_data, indices_data, acc_data, lr, eps, size, length);
} else {
hipLaunchKernelGGL(( adagrad_sparse_update), dim3(blocks), dim3(threads), 0, 0, param_data, grad_data, indices_data, acc_data, lr, eps, size, length);
}
return 0;
}
__global__ void adam_sparse_update(float *param, const float *grad_data, const float *indices_data, float *m, float *v, float lr, float beta1, float beta2, float beta1t, float beta2t, float eps, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int grad_ind = indices_data[ind];
const float cur_grad = grad_data[thread_ind];
size_t total_offset = length * grad_ind + offset;
float *m_ptr = m + total_offset;
float *v_ptr = v + total_offset;
float *param_ptr = param + total_offset;
float cur_m = beta1 * (*m_ptr) + (1 - beta1) * cur_grad;
float cur_v = beta2 * (*v_ptr) + (1 - beta2) * cur_grad * cur_grad;
*m_ptr = cur_m;
*v_ptr = cur_v;
cur_m /= (1 - beta1t);
cur_v /= (1 - beta2t);
*(param_ptr) -= lr * cur_m / (sqrtf(cur_v) + eps);
}
int AdamOptimizerSparseUpdate(
DLArrayHandle param, const DLArrayHandle grad_indices, const DLArrayHandle grad_values, DLArrayHandle expavg, DLArrayHandle expavgsq,
float lr, float beta1, float beta2, float beta1t, float beta2t, float eps,
DLStreamHandle stream_handle = NULL
) {
size_t size = 1;
size_t length = param->shape[1];
for (int i = 0; i < grad_values->ndim; ++i){
size *= grad_values->shape[i];
}
dim3 blocks;
dim3 threads;
float *param_data = (float *) param->data;
const float *grad_data = (const float *) grad_values->data;
const float *indices_data = (const float*)grad_indices->data;
float *m_data = (float *) expavg->data;
float *v_data = (float *) expavgsq->data;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
hipLaunchKernelGGL(( adam_sparse_update), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, param_data, grad_data, indices_data, m_data, v_data, lr, beta1, beta2, beta1t, beta2t, eps, size, length);
else
hipLaunchKernelGGL(( adam_sparse_update), dim3(blocks), dim3(threads), 0, 0, param_data, grad_data, indices_data, m_data, v_data, lr, beta1, beta2, beta1t, beta2t, eps, size, length);
return 0;
}
|
0c8e88c17fecc1dcfb93b77216b5160c3aaad58c.cu
|
#include "gpu_runtime.h"
__global__ void sgd_sparse_update(const float *grad_data, const float *indices_data, float *param_data, size_t size, size_t length, float lr) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t index = thread_ind / length;
size_t offset = thread_ind % length;
int id = indices_data[index];
const float cur_grad = grad_data[thread_ind];
float *param_ptr = param_data + length * id + offset;
atomicAdd(param_ptr, -lr * cur_grad);
}
int SGDOptimizerSparseUpdate(DLArrayHandle param, const DLArrayHandle grad_indices, const DLArrayHandle grad_values, float lr, DLStreamHandle stream_handle = NULL) {
size_t size = 1;
size_t length = param->shape[1];
for(int i = 0; i < grad_values->ndim; i++){
size *= grad_values->shape[i];
}
dim3 blocks;
dim3 threads;
const float *grad_data = (const float *)grad_values->data;
float *param_data = (float *)param->data;
const float *indices_data = (const float*)grad_indices->data;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
sgd_sparse_update<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(grad_data, indices_data, param_data, size, length, lr);
else
sgd_sparse_update<<<blocks, threads>>>(grad_data, indices_data, param_data, size, length, lr);
return 0;
}
__global__ void nesterov_sparse_first_phase(float *param_data, float *veloc_data, const float *grad_data, const float *indices_data, float lr, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int grad_ind = indices_data[ind];
const float cur_grad = grad_data[thread_ind];
size_t total_offset = length * grad_ind + offset;
float *veloc_ptr = veloc_data + total_offset;
float *param_ptr = param_data + total_offset;
float temp = -lr * cur_grad;
atomicAdd(veloc_ptr, temp);
atomicAdd(param_ptr, temp);
}
__global__ void nesterov_sparse_second_phase(float *param_data, float *veloc_data, float momentum, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
float temp_veloc = momentum * veloc_data[ind];
veloc_data[ind] = temp_veloc;
param_data[ind] = param_data[ind] + temp_veloc;
}
__global__ void momentum_sparse_first_phase(float *veloc_data, const float *grad_data, const float *indices_data, float lr, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int grad_ind = indices_data[ind];
const float cur_grad = grad_data[thread_ind];
float *veloc_ptr = veloc_data + length * grad_ind + offset;
atomicAdd(veloc_ptr, -lr * cur_grad);
}
__global__ void momentum_sparse_second_phase(float *param_data, float *veloc_data, float momentum, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
param_data[ind] = param_data[ind] + veloc_data[ind];
veloc_data[ind] = momentum * veloc_data[ind];
}
int MomentumOptimizerSparseUpdate(DLArrayHandle param, const DLArrayHandle grad_indices, const DLArrayHandle grad_values, DLArrayHandle velocity, float lr, float momentum, bool nesterov, DLStreamHandle stream_handle = NULL) {
size_t size = 1;
size_t total_size = 1;
size_t length = param->shape[1];
for (int i = 0; i < grad_values->ndim; ++i){
size *= grad_values->shape[i];
}
for (int i = 0; i < param->ndim; ++i) {
total_size *= param->shape[i];
}
float *param_data = (float *) param->data;
const float *grad_data = (const float *) grad_values->data;
const float *indices_data = (const float*)grad_indices->data;
float *velocity_data = (float *) velocity->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
dim3 dense_blocks;
dim3 dense_threads;
if (total_size <= 1024) {
dense_threads.x = total_size;
dense_blocks.x = 1;
} else {
dense_threads.x = 1024;
dense_blocks.x = (total_size + 1023) / 1024;
}
if (nesterov) {
if (stream_handle) {
nesterov_sparse_first_phase<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(param_data, velocity_data, grad_data, indices_data, lr, size, length);
nesterov_sparse_second_phase<<<dense_blocks, dense_threads, 0, *(cudaStream_t*)stream_handle->handle>>>(param_data, velocity_data, momentum, total_size);
} else {
nesterov_sparse_first_phase<<<blocks, threads>>>(param_data, velocity_data, grad_data, indices_data, lr, size, length);
nesterov_sparse_second_phase<<<dense_blocks, dense_threads>>>(param_data, velocity_data, momentum, total_size);
}
} else {
if (stream_handle) {
momentum_sparse_first_phase<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(velocity_data, grad_data, indices_data, lr, size, length);
momentum_sparse_second_phase<<<dense_blocks, dense_threads, 0, *(cudaStream_t*)stream_handle->handle>>>(param_data, velocity_data, momentum, total_size);
} else {
momentum_sparse_first_phase<<<blocks, threads>>>(velocity_data, grad_data, indices_data, lr, size, length);
momentum_sparse_second_phase<<<dense_blocks, dense_threads>>>(param_data, velocity_data, momentum, total_size);
}
}
return 0;
}
__global__ void deduplicate_kernel(const float *origin_data, const float *inverse_data, float *compressed_data, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int com_ind = inverse_data[ind];
const float cur_origin = origin_data[thread_ind];
float *compressed_ptr = compressed_data + length * com_ind + offset;
atomicAdd(compressed_ptr, cur_origin);
}
int DeduplicateIndexedSlices(const DLArrayHandle origin, const DLArrayHandle inverse, DLArrayHandle compressed, DLStreamHandle stream_handle = NULL) {
size_t size = 1;
size_t length = compressed->shape[compressed->ndim-1];
for (int i = 0; i < origin->ndim; ++i) {
size *= origin->shape[i];
}
const float *origin_data = (const float*)origin->data;
const float *inverse_data = (const float*)inverse->data;
float *compressed_data = (float*)compressed->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
deduplicate_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(origin_data, inverse_data, compressed_data, size, length);
else
deduplicate_kernel<<<blocks, threads>>>(origin_data, inverse_data, compressed_data, size, length);
return 0;
}
__global__ void adagrad_sparse_update(float *param_data, const float *grad_data, const float *indices_data, float *acc_data, float lr, float eps, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int grad_ind = indices_data[ind];
const float cur_grad = grad_data[thread_ind];
size_t total_offset = length * grad_ind + offset;
float *acc_ptr = acc_data + total_offset;
float *param_ptr = param_data + total_offset;
float cur_acc = *acc_ptr + cur_grad * cur_grad;
*acc_ptr = cur_acc;
*param_ptr -= lr * cur_grad / (sqrtf(cur_acc) + eps);
}
int AdaGradOptimizerSparseUpdate(DLArrayHandle param, const DLArrayHandle grad_indices, const DLArrayHandle grad_values, DLArrayHandle acc, float lr, float eps, DLStreamHandle stream_handle = NULL) {
size_t size = 1;
size_t length = param->shape[1];
for (int i = 0; i < grad_values->ndim; ++i){
size *= grad_values->shape[i];
}
float *param_data = (float *) param->data;
const float *grad_data = (const float *) grad_values->data;
const float *indices_data = (const float*)grad_indices->data;
float *acc_data = (float *) acc->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle) {
adagrad_sparse_update<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(param_data, grad_data, indices_data, acc_data, lr, eps, size, length);
} else {
adagrad_sparse_update<<<blocks, threads>>>(param_data, grad_data, indices_data, acc_data, lr, eps, size, length);
}
return 0;
}
__global__ void adam_sparse_update(float *param, const float *grad_data, const float *indices_data, float *m, float *v, float lr, float beta1, float beta2, float beta1t, float beta2t, float eps, size_t size, size_t length) {
size_t thread_ind = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_ind >= size) return;
size_t ind = thread_ind / length;
size_t offset = thread_ind % length;
int grad_ind = indices_data[ind];
const float cur_grad = grad_data[thread_ind];
size_t total_offset = length * grad_ind + offset;
float *m_ptr = m + total_offset;
float *v_ptr = v + total_offset;
float *param_ptr = param + total_offset;
float cur_m = beta1 * (*m_ptr) + (1 - beta1) * cur_grad;
float cur_v = beta2 * (*v_ptr) + (1 - beta2) * cur_grad * cur_grad;
*m_ptr = cur_m;
*v_ptr = cur_v;
cur_m /= (1 - beta1t);
cur_v /= (1 - beta2t);
*(param_ptr) -= lr * cur_m / (sqrtf(cur_v) + eps);
}
int AdamOptimizerSparseUpdate(
DLArrayHandle param, const DLArrayHandle grad_indices, const DLArrayHandle grad_values, DLArrayHandle expavg, DLArrayHandle expavgsq,
float lr, float beta1, float beta2, float beta1t, float beta2t, float eps,
DLStreamHandle stream_handle = NULL
) {
size_t size = 1;
size_t length = param->shape[1];
for (int i = 0; i < grad_values->ndim; ++i){
size *= grad_values->shape[i];
}
dim3 blocks;
dim3 threads;
float *param_data = (float *) param->data;
const float *grad_data = (const float *) grad_values->data;
const float *indices_data = (const float*)grad_indices->data;
float *m_data = (float *) expavg->data;
float *v_data = (float *) expavgsq->data;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
adam_sparse_update<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(param_data, grad_data, indices_data, m_data, v_data, lr, beta1, beta2, beta1t, beta2t, eps, size, length);
else
adam_sparse_update<<<blocks, threads>>>(param_data, grad_data, indices_data, m_data, v_data, lr, beta1, beta2, beta1t, beta2t, eps, size, length);
return 0;
}
|
7ac36c7653445872d58270a861f4d90b13bea784.hip
|
// !!! This is a file automatically generated by hipify!!!
//#include <boost>
#include "types.h"
//#include <chrono>
#include <fstream>
#include <iostream>
//#include <cmath>
#include "functionCPU.h"
#include "functionGPU.cuh"
#include "helper_kernels.cuh"
#include "CustomTimer.h"
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//using namespace NIP;
//#define FREEGLUT_STATIC
//#define _LIB
//#define FREEGLUT_LIB_PRAGMAS 0 /////?????????????
#include <helper_gl.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cuda_gl_interop.h>
//#include <hip/hip_vector_types.h>
//typedef float Real;
dim3 windowSize(512, 512);
dim3 windowBlockSize(32, 32, 1);
dim3 windowGridSize(windowSize.x / windowBlockSize.x, windowSize.y / windowBlockSize.y);
Real* d_Field = NULL;
Real* d_IR = NULL;
Real* d_F = NULL;
Real* d_FieldAlt = NULL;
int hz = 44100;// 44100 , 16 -
Real timestep = 1.0 / hz;//% 44100
Real v = 331;//% /
Real d_x = v * timestep * 2;//% 0.0075 = 7.5 , X2
Real X_size = 10;//25;//%
Real Y_size = 10;//25;
int N = int(Y_size / d_x);//%
int M = int(X_size / d_x);
// int N = 1000;
// int M = 1000;
int x_ir = -1;// int(M * 0.85);//%
int y_ir = -1;// int(N * 0.52);
int x_s = -1;//int(M * 0.5);//%
int y_s = -1;//int(N * 0.5);
int T = 100;
Real b = 0.5;
char filename[256] = "";
char F_filename[256] = "";
//Array3D Field(4, N, M);
//Array1D IR(2*T);
//Array1D F(2*T);
Array3D Field;
Array4D FieldAlt;
Array1D IR;
Array1D F;
int time_ind = 0;
const int NFrames = 3;
CustomTimer timer = CustomTimer();
int CalculationMode = 4;
int intensitySetting = 0;
Array3D WriteResults;
int JustRunSim = 0;
int RunSimMode = -1;
int writeresults = 0;
int Threads = 5;
const char* Methods[] = { "WaveIteration\t\t\t","WaveIterationOMP\t\t","WaveIterationOMPMultipleFrames\t","WaveIterationKernel\t\t", "MultipleIterationsKernel\t",
"WaveIterationKernelSM\t\t","WaveIterationAltOMP\t\t","WaveIterationKernelAlt\t\t" };
void saveIR();
void read_parameters(std::istream& in) {
std::string name;
if (!in == false)
while (std::getline(in, name, '=')) {
if (name.size() > 0 && name[0] == '#') in.ignore(1024 * 1024, '\n');
else if (name == "hz") in >> hz;
else if (name == "v") in >> v;
else if (name == "X_size") in >> X_size;
else if (name == "Y_size") in >> Y_size;
else if (name == "x_ir") in >> x_ir;
else if (name == "y_ir") in >> y_ir;
else if (name == "x_s") in >> x_s;
else if (name == "y_s") in >> y_s;
else if (name == "T") in >> T;
else if (name == "b") in >> b;
else if (name == "room") in >> filename;
else if (name == "JustRunSim") in >> JustRunSim;
else if (name == "RunSimMode") in >> RunSimMode;
else if (name == "F") in >> F_filename;
else if (name == "WriteSimResults") in >> writeresults;
else if (name == "Threads") in >> Threads;
else {
in.ignore(1024 * 1024, '\n');
std::stringstream str;
str << "Unknown parameter: " << name << '.';
throw std::runtime_error(str.str().c_str());
}
in >> std::ws;
}
timestep = 1.0 / hz;
d_x = v * timestep * 2;
N = int(Y_size / d_x);
M = int(X_size / d_x);
if (x_ir < 0 || x_ir >= M) x_ir = int(M * 0.85);
if (y_ir < 0 || y_ir >= N) y_ir = int(N * 0.52);
if (x_s < 0 || x_s >= M) x_s = int(M * 0.5);
if (y_s < 0 || y_s >= N) y_s = int(N * 0.5);
//std::cout << filename << std::endl;
}
void saveIR() {
if (d_IR != NULL && CalculationMode>=3) hipMemcpy(IR.data(), d_IR, T * sizeof(Real), hipMemcpyDeviceToHost);
std::ofstream out("out.data");
out << IR(blitz::Range(0,T-1));
out.close();
}
bool loadF(char* name="in.data") {
std::ifstream in(name);
if (&in == nullptr) return false;
in >> F;
in.close();
if (d_F == NULL) return true;
hipMemcpy(d_F, F.data(), T * sizeof(Real), hipMemcpyHostToDevice);
return true;
}
#define PI 3.14159265359
void loadDefaultF() {
// Source
float f0 = hz / 10; /* source dominant frequency, Hz */
float t0 = 1.2 / f0; /* source padding to move wavelet from left of zero */
// Fill source waveform vecror
float a = PI * PI * f0 * f0; /* const for wavelet */
float dt2dx2 = (timestep * timestep) / (d_x * d_x); /* const for fd stencil */
for (int it = 0; it < T; it++)
{
// Ricker wavelet (Mexican hat), second derivative of Gaussian
//F(it) = 1e10 * (1.0 - 2.0 * a * pow(it * timestep - t0, 2)) * exp(-a * pow(it * timestep - t0, 2)) * dt2dx2;
F(it) = 1e2 * exp(-a * .25 * pow(it * timestep - 4 / (PI * f0), 2));
}
if (d_F == NULL) return;
hipMemcpy(d_F, F.data(), T * sizeof(Real), hipMemcpyHostToDevice);
return;
}
void saveField() {
std::ofstream out("outField.data");
out << WriteResults;
out.close();
}
void benchmark(int begN=1000,int endN=10000, int stepN=1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 200;
b = 0.5;
const unsigned int iter = 10;
CustomTimer timer0 = CustomTimer();
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
hipMalloc((void**)&d_FieldAlt, 3 * 3 * M * N * sizeof(Real));
hipMalloc((void**)&d_Field, 4 * M * N * sizeof(Real));
hipMalloc((void**)&d_IR, T * sizeof(Real));
hipMalloc((void**)&d_F, T * sizeof(Real));
hipMemcpy(d_IR, IR.data(), T * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy(d_F, F.data(), T * sizeof(Real), hipMemcpyHostToDevice);
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
Field.resize(4, N, M); /*Field = 0; */Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
FieldAlt.resize(3, N, M, 3);
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (CalculationMode=0; CalculationMode <= 7 ;++CalculationMode)
{
Field(blitz::Range(0,2), blitz::Range::all(), blitz::Range::all()) = 0.0;
hipMemcpy(d_Field, Field.data(), 4 * M * N * sizeof(Real), hipMemcpyHostToDevice);
//Field = 0; Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
if (CalculationMode == 6 || CalculationMode == 7) {
FieldAlt(blitz::Range(0, 1), blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 0.0;
hipMemcpy(d_FieldAlt, FieldAlt.data(), 3 * 2 * M * N * sizeof(Real), hipMemcpyHostToDevice);
}
for (time_ind = 0; time_ind < T;) {
switch (CalculationMode) {
case 0:
timer0.tic();
WaveIteration(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b);
timer0.toc(); break;
case 1:
timer0.tic();
WaveIterationOMP(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b,Threads);
timer0.toc(); break;
case 2:
timer0.tic();
WaveIterationOMPMultipleFrames(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b, iter, Threads);
timer0.toc(iter);
time_ind += iter - 1;
break;
case 3:
timer0.tic();
WaveIterationKernel << < windowGridSize, windowBlockSize >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
hipDeviceSynchronize();
timer0.toc(); break;
case 4:
timer0.tic();
MultipleIterationsKernel << < 1, 1 >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b, iter);
hipDeviceSynchronize();
timer0.toc(iter);
time_ind += iter - 1; break;
case 5:
timer0.tic();
WaveIterationKernelSM << < windowGridSize, windowBlockSize, ((windowBlockSize.x + 2) * (windowBlockSize.y + 2) * 3) * sizeof(Real) >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
hipDeviceSynchronize();
timer0.toc(); break;
case 6:
timer0.tic();
WaveIterationAltOMP(FieldAlt.data(), IR.data(), N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F.data(), b, 64, Threads);
timer0.toc();
//hipMemcpy(d_FieldAlt, FieldAlt.data(), 6 * M * N * sizeof(Real), hipMemcpyHostToDevice);
//checkCudaErrors(hipGetLastError());
break;
case 7:
timer0.tic();
WaveIterationKernelAlt << < 1, 1 >> > (d_FieldAlt, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, 32 ,b);
hipDeviceSynchronize();
timer0.toc();
//checkCudaErrors(hipGetLastError());
break;
default:
break;
}
++time_ind;
}
if (CalculationMode >=3) checkCudaErrors(hipGetLastError());
TimerInfo timerinfo;
if (CalculationMode == 4 || CalculationMode == 2)
timerinfo = timer0.getInfo(min(timer0.N, T) / iter);
else
timerinfo = timer0.getInfo();
//std::cout <<"size = "<< sizes << ", calculation mode = " << CalculationMode << ", mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
// <<" min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
std::cout << "size = " << sizes << " \tMethod: " << Methods[CalculationMode] << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<< " min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
hipFree(d_Field);
hipFree(d_FieldAlt);
hipFree(d_IR);
hipFree(d_F);
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void benchmarkOMP(int begN = 1000, int endN = 10000, int stepN = 1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 200;
b = 0.5;
CustomTimer timer0 = CustomTimer();
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
Field.resize(4, N, M); /*Field = 0; */Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (int threads = 1; threads <= 16; ++threads)
{
Field(blitz::Range(0, 2), blitz::Range::all(), blitz::Range::all()) = 0.0;
for (time_ind = 0; time_ind < T;) {
timer0.tic();
WaveIterationOMP(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b, threads);
timer0.toc();
++time_ind;
//std::cout << time_ind << " ";
}
TimerInfo timerinfo;
timerinfo = timer0.getInfo();
std::cout << "size = " << sizes << " NumThreads = " << threads << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<<" min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void benchmarkOMPAlt(int begN = 1000, int endN = 10000, int stepN = 1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 200;
b = 0.5;
CustomTimer timer0 = CustomTimer();
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
FieldAlt.resize(3, N, M, 3);
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (int threads = 1; threads <= 16; ++threads)
{
FieldAlt(blitz::Range(0, 1), blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 0.0;
for (time_ind = 0; time_ind < T;) {
timer0.tic();
WaveIterationAltOMP(FieldAlt.data(), IR.data(), N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F.data(), b, 64, threads);
timer0.toc();
++time_ind;
//std::cout << time_ind << " ";
}
TimerInfo timerinfo;
timerinfo = timer0.getInfo();
std::cout << "size = " << sizes << " NumThreads = " << threads << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<< " min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void benchmarkKernel(int begN = 1000, int endN = 10000, int stepN = 1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 500;
b = 0.5;
const unsigned int iter = 10;
CustomTimer timer0 = CustomTimer(400);
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
hipMalloc((void**)&d_Field, 4 * M * N * sizeof(Real));
hipMalloc((void**)&d_IR, T * sizeof(Real));
hipMalloc((void**)&d_F, T * sizeof(Real));
hipMemcpy(d_IR, IR.data(), T * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy(d_F, F.data(), T * sizeof(Real), hipMemcpyHostToDevice);
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
Field.resize(4, N, M); /*Field = 0; */Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (int ksize = 1; ksize <= 32; ++ksize)
{
windowBlockSize.x = ksize;
windowBlockSize.y = ksize;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
Field(blitz::Range(0, 2), blitz::Range::all(), blitz::Range::all()) = 0.0;
//Field = 0; Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
hipMemcpy(d_Field, Field.data(), 4 * M * N * sizeof(Real), hipMemcpyHostToDevice);
for (time_ind = 0; time_ind < T;) {
timer0.tic();
WaveIterationKernel << < windowGridSize, windowBlockSize >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
hipDeviceSynchronize();
timer0.toc();
++time_ind;
}
checkCudaErrors(hipGetLastError());
TimerInfo timerinfo;
timerinfo = timer0.getInfo();
std::cout << "size = " << sizes << " blocksize = " << ksize << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<< " min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
hipFree(d_Field);
hipFree(d_IR);
hipFree(d_F);
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void benchmarkKernelAlt(int begN = 1000, int endN = 10000, int stepN = 1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 500;
b = 0.5;
const unsigned int iter = 10;
CustomTimer timer0 = CustomTimer(400);
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
hipMalloc((void**)&d_FieldAlt, 3 * 3 * M * N * sizeof(Real));
hipMalloc((void**)&d_IR, T * sizeof(Real));
hipMalloc((void**)&d_F, T * sizeof(Real));
hipMemcpy(d_IR, IR.data(), T * sizeof(Real), hipMemcpyHostToDevice);
hipMemcpy(d_F, F.data(), T * sizeof(Real), hipMemcpyHostToDevice);
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
FieldAlt.resize(3, N, M, 3);
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (int ksize = 1; ksize <= 32; ++ksize)
{
windowBlockSize.x = ksize;
windowBlockSize.y = ksize;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
FieldAlt(blitz::Range(0, 1), blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 0.0;
hipMemcpy(d_FieldAlt, FieldAlt.data(), 3 * 2 * M * N * sizeof(Real), hipMemcpyHostToDevice);
for (time_ind = 0; time_ind < T;) {
timer0.tic();
WaveIterationKernelAlt << < 1, 1 >> > (d_FieldAlt, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, ksize, b);
hipDeviceSynchronize();
timer0.toc();
++time_ind;
}
checkCudaErrors(hipGetLastError());
TimerInfo timerinfo;
timerinfo = timer0.getInfo();
std::cout << "size = " << sizes << " blocksize = " << ksize << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<< " min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
hipFree(d_FieldAlt);
hipFree(d_IR);
hipFree(d_F);
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void RunAndSaveResults(int mode = 0, int writeresults=0) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
CustomTimer timer0 = CustomTimer();
if (writeresults) {
try { WriteResults.resize(T, N, M); }
catch (...) { std::cout << "memory error, array too big"; return; }
}
//if (mode == 1) CalculationMode = 1;
//if (mode == 1) CalculationMode = 2;
//if (mode == 2) CalculationMode = 3;
//if (mode == 3) CalculationMode = 4;
//if (mode == 4) CalculationMode = 5;
//if (mode == 5) CalculationMode = 6;
int iter = T;
for (time_ind = 0; time_ind < T;) {
switch (mode) {
case 0:
timer0.tic();
WaveIteration(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b);
timer0.toc();
if (writeresults) WriteResults(time_ind % T, blitz::Range::all(), blitz::Range::all()) = Field((time_ind + 2) % 3, blitz::Range::all(), blitz::Range::all());
break;
case 1:
timer0.tic();
WaveIterationOMP(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b, Threads);
timer0.toc();
if (writeresults) WriteResults(time_ind % T, blitz::Range::all(), blitz::Range::all()) = Field((time_ind + 2) % 3, blitz::Range::all(), blitz::Range::all());
break;
case 2:
timer0.tic();
WaveIterationOMPMultipleFrames(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b, iter, Threads);
timer0.toc(iter);
time_ind += iter - 1;
break;
case 3:
timer0.tic();
hipLaunchKernelGGL(( WaveIterationKernel) , dim3(windowGridSize), dim3(windowBlockSize) , 0, 0, d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
hipDeviceSynchronize();
timer0.toc();
if (writeresults) hipMemcpy(WriteResults.data() + M * N * (time_ind % T), d_Field + ((time_ind + 2) % 3) * M * N, M * N * sizeof(Real), hipMemcpyDeviceToHost);
break;
case 4:
timer0.tic();
hipLaunchKernelGGL(( MultipleIterationsKernel) , dim3(1), dim3(1) , 0, 0, d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b, iter);
hipDeviceSynchronize();
time_ind += iter - 1;
timer0.toc(iter);
checkCudaErrors(hipGetLastError());
break;
case 5:
timer0.tic();
WaveIterationKernelSM << < windowGridSize, windowBlockSize, ((windowBlockSize.x + 2) * (windowBlockSize.y + 2) * 3) * sizeof(Real) >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
//WaveIterationKernelSM << < windowGridSize, windowBlockSize >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
hipDeviceSynchronize();
timer0.toc();
if (writeresults) hipMemcpy(WriteResults.data() + M * N * (time_ind % T), d_Field + ((time_ind + 2) % 3) * M * N, M * N * sizeof(Real), hipMemcpyDeviceToHost);
break;
case 6:
timer0.tic();
WaveIterationAltOMP(FieldAlt.data(), IR.data(), N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F.data(), b, 64, Threads);
timer0.toc();
//hipMemcpy(d_FieldAlt, FieldAlt.data(), 6 * M * N * sizeof(Real), hipMemcpyHostToDevice);
//if (writeresults) hipMemcpy(WriteResults.data() + M * N * (time_ind % T), d_Field + ((time_ind + 2) % 3) * M * N, M * N * sizeof(Real), hipMemcpyDeviceToHost);
break;
case 7:
timer0.tic();
WaveIterationKernelAlt << < 1, 1 >> > (d_FieldAlt, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, 32, b);
hipDeviceSynchronize();
timer0.toc();
//checkCudaErrors(hipGetLastError());
break;
default:
break;
}
++time_ind;
}
if (mode >= 2) {
checkCudaErrors(hipGetLastError());
hipMemcpy(IR.data(), d_IR, T * sizeof(Real), hipMemcpyDeviceToHost);
checkCudaErrors(hipGetLastError());
}
TimerInfo timerinfo;
//timerinfo = timer0.getInfo();
if (mode == 4 || mode == 2) timerinfo = timer0.getInfo(1);
else timerinfo = timer0.getInfo();
std::cout <<"runSim method = " << Methods[mode] << ", mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<<" min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << "running ended in " << diff << " milliseconds" << std::endl;
start = std::chrono::high_resolution_clock::now();
saveIR();
if (writeresults) {
saveField();
end = std::chrono::high_resolution_clock::now();
diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "writing ended in " << diff << " seconds" << std::endl;
}
hipFree(d_Field);
hipFree(d_IR);
hipFree(d_F);
hipFree(d_FieldAlt);
}
int main(int argc, char** argv) {
using namespace NIP;
int myNr = 0;
if (argc > 1)
myNr = std::stoi(argv[1]);
CalculationMode = myNr;
if (CalculationMode == -1) { benchmark(500,5000, 500); return 0; }
if (CalculationMode == -2) { benchmarkOMP(500, 5000, 500); return 0; }
if (CalculationMode == -3) { benchmarkOMPAlt(500, 5000, 500); return 0; }
if (CalculationMode == -4) { benchmarkKernel(500, 5000, 500); return 0; }
if (CalculationMode == -5) { benchmarkKernelAlt(500, 5000, 500); return 0; }
const char* input_filename = (argc > 2) ? argv[2] : "config.model";
std::ifstream cfg(input_filename);
read_parameters(cfg);
cfg.close();
//if (CalculationMode == 5 || JustRunSim && RunSimMode == 4) NFrames = 3; else NFrames = 3;
if (filename[0] != '\0') {
std::ifstream room(filename,std::ios_base::binary);
int x, y, n;
unsigned char *data = stbi_load(filename, &x, &y, &n, 1);
if (data != nullptr) {
N = y;
M = x;
X_size = M * d_x;
Y_size = N * d_x;
if (x_ir < 0 || x_ir >= M) x_ir = int(M * 0.85);
if (y_ir < 0 || y_ir >= N) y_ir = int(N * 0.52);
if (x_s < 0 || x_s >= M) x_s = int(M * 0.5);
if (y_s < 0 || y_s >= N) y_s = int(N * 0.5);
Field.resize(NFrames+1, N, M);
Field = 0;
blitz::Array<unsigned char, 2> tmp(data, blitz::shape(N, M), blitz::duplicateData);
FieldAlt.resize(3, N, M, 3);
FieldAlt = 0.0;
for (int y0 = 0; y0 < N; ++y0) {
for (int x0 = 0; x0 < M; ++x0) {
Field(NFrames, y0, x0) = ((Real)tmp(y0, x0)) / 255.0;
*(FieldAlt.data() + 3*2*M*N +x0+M*y0) = ((Real)tmp(y0, x0)) / 255.0;
}
}
//tmp.free();
//std::cout << Field(3,0,0) << " " <<Field(3, 100, 100);
stbi_image_free(data);
}
else { Field.resize(NFrames+1, N, M); Field = 0; Field(NFrames, blitz::Range::all(), blitz::Range::all()) = 1.0;
FieldAlt.resize(3,N,M,3);
FieldAlt = 0.0;
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;}
room.close();
}
else { Field.resize(NFrames+1, N, M); Field = 0; Field(NFrames, blitz::Range::all(), blitz::Range::all()) = 1.0;
FieldAlt.resize(3, N, M, 3);
FieldAlt = 0.0;
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
}
IR.resize(2 * T);
F.resize(2 * T);
//Field = 0;
//Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
IR = 0;
if (F_filename[0] != '\0') {
bool b = loadF(F_filename);
if (b == false) {
F = 0.0;
F(0) = 100.0;
loadDefaultF();
}
}
else {
F = 0.0;
F(0) = 100.0;
loadDefaultF();
}
//FieldAlt.resize(3,N,M,3);
//FieldAlt = 0.0;
//FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
hipMalloc((void**)&d_FieldAlt, 3 * 3 * M * N * sizeof(Real));
hipMemcpy(d_FieldAlt, FieldAlt.data(), 3 * 3 * M * N * sizeof(Real), hipMemcpyHostToDevice);
hipMalloc((void**)&d_Field, (NFrames + 1) * M * N * sizeof(Real));
hipMemcpy(d_Field, Field.data(), (NFrames + 1) * M * N * sizeof(Real), hipMemcpyHostToDevice);
hipMalloc((void**)&d_IR, T * sizeof(Real));
hipMemcpy(d_IR, IR.data(), T * sizeof(Real), hipMemcpyHostToDevice);
hipMalloc((void**)&d_F, T * sizeof(Real));
hipMemcpy(d_F, F.data(), T * sizeof(Real), hipMemcpyHostToDevice);
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x==0)? 0:1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
//windowGridSize = dim3(windowSize.x / windowBlockSize.x, windowSize.y / windowBlockSize.y);
if (JustRunSim) {
RunSimMode = (RunSimMode >= 0)? RunSimMode : CalculationMode;
RunAndSaveResults(RunSimMode,writeresults);
return 0;
}
std::cout << "Method: " << Methods[CalculationMode] << std::endl;
hipFree(d_Field);
hipFree(d_IR);
hipFree(d_F);
return 0;
}
|
7ac36c7653445872d58270a861f4d90b13bea784.cu
|
//#include <boost>
#include "types.h"
//#include <chrono>
#include <fstream>
#include <iostream>
//#include <cmath>
#include "functionCPU.h"
#include "functionGPU.cuh"
#include "helper_kernels.cuh"
#include "CustomTimer.h"
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//using namespace NIP;
//#define FREEGLUT_STATIC
//#define _LIB
//#define FREEGLUT_LIB_PRAGMAS 0 /////?????????????
#include <helper_gl.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cuda_gl_interop.h>
//#include <vector_types.h>
//typedef float Real;
dim3 windowSize(512, 512);
dim3 windowBlockSize(32, 32, 1);
dim3 windowGridSize(windowSize.x / windowBlockSize.x, windowSize.y / windowBlockSize.y);
Real* d_Field = NULL;
Real* d_IR = NULL;
Real* d_F = NULL;
Real* d_FieldAlt = NULL;
int hz = 44100;// звук 44100 герц, 16 - битный
Real timestep = 1.0 / hz;//% ход симуляции для 44100 герц
Real v = 331;//% скорость звука м / с
Real d_x = v * timestep * 2;//% шаг сетки в пространстве 0.0075 м = 7.5 мм, но для симуляции надо брать больше X2
Real X_size = 10;//25;//% размеры комнаты
Real Y_size = 10;//25;
int N = int(Y_size / d_x);//% размеры в пикселях
int M = int(X_size / d_x);
// int N = 1000;
// int M = 1000;
int x_ir = -1;// int(M * 0.85);//% положение слушателя
int y_ir = -1;// int(N * 0.52);
int x_s = -1;//int(M * 0.5);//% положение источника
int y_s = -1;//int(N * 0.5);
int T = 100;
Real b = 0.5;
char filename[256] = "";
char F_filename[256] = "";
//Array3D Field(4, N, M);
//Array1D IR(2*T);
//Array1D F(2*T);
Array3D Field;
Array4D FieldAlt;
Array1D IR;
Array1D F;
int time_ind = 0;
const int NFrames = 3;
CustomTimer timer = CustomTimer();
int CalculationMode = 4;
int intensitySetting = 0;
Array3D WriteResults;
int JustRunSim = 0;
int RunSimMode = -1;
int writeresults = 0;
int Threads = 5;
const char* Methods[] = { "WaveIteration\t\t\t","WaveIterationOMP\t\t","WaveIterationOMPMultipleFrames\t","WaveIterationKernel\t\t", "MultipleIterationsKernel\t",
"WaveIterationKernelSM\t\t","WaveIterationAltOMP\t\t","WaveIterationKernelAlt\t\t" };
void saveIR();
void read_parameters(std::istream& in) {
std::string name;
if (!in == false)
while (std::getline(in, name, '=')) {
if (name.size() > 0 && name[0] == '#') in.ignore(1024 * 1024, '\n');
else if (name == "hz") in >> hz;
else if (name == "v") in >> v;
else if (name == "X_size") in >> X_size;
else if (name == "Y_size") in >> Y_size;
else if (name == "x_ir") in >> x_ir;
else if (name == "y_ir") in >> y_ir;
else if (name == "x_s") in >> x_s;
else if (name == "y_s") in >> y_s;
else if (name == "T") in >> T;
else if (name == "b") in >> b;
else if (name == "room") in >> filename;
else if (name == "JustRunSim") in >> JustRunSim;
else if (name == "RunSimMode") in >> RunSimMode;
else if (name == "F") in >> F_filename;
else if (name == "WriteSimResults") in >> writeresults;
else if (name == "Threads") in >> Threads;
else {
in.ignore(1024 * 1024, '\n');
std::stringstream str;
str << "Unknown parameter: " << name << '.';
throw std::runtime_error(str.str().c_str());
}
in >> std::ws;
}
timestep = 1.0 / hz;
d_x = v * timestep * 2;
N = int(Y_size / d_x);
M = int(X_size / d_x);
if (x_ir < 0 || x_ir >= M) x_ir = int(M * 0.85);
if (y_ir < 0 || y_ir >= N) y_ir = int(N * 0.52);
if (x_s < 0 || x_s >= M) x_s = int(M * 0.5);
if (y_s < 0 || y_s >= N) y_s = int(N * 0.5);
//std::cout << filename << std::endl;
}
void saveIR() {
if (d_IR != NULL && CalculationMode>=3) cudaMemcpy(IR.data(), d_IR, T * sizeof(Real), cudaMemcpyDeviceToHost);
std::ofstream out("out.data");
out << IR(blitz::Range(0,T-1));
out.close();
}
bool loadF(char* name="in.data") {
std::ifstream in(name);
if (&in == nullptr) return false;
in >> F;
in.close();
if (d_F == NULL) return true;
cudaMemcpy(d_F, F.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
return true;
}
#define PI 3.14159265359
void loadDefaultF() {
// Source
float f0 = hz / 10; /* source dominant frequency, Hz */
float t0 = 1.2 / f0; /* source padding to move wavelet from left of zero */
// Fill source waveform vecror
float a = PI * PI * f0 * f0; /* const for wavelet */
float dt2dx2 = (timestep * timestep) / (d_x * d_x); /* const for fd stencil */
for (int it = 0; it < T; it++)
{
// Ricker wavelet (Mexican hat), second derivative of Gaussian
//F(it) = 1e10 * (1.0 - 2.0 * a * pow(it * timestep - t0, 2)) * exp(-a * pow(it * timestep - t0, 2)) * dt2dx2;
F(it) = 1e2 * exp(-a * .25 * pow(it * timestep - 4 / (PI * f0), 2));
}
if (d_F == NULL) return;
cudaMemcpy(d_F, F.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
return;
}
void saveField() {
std::ofstream out("outField.data");
out << WriteResults;
out.close();
}
void benchmark(int begN=1000,int endN=10000, int stepN=1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 200;
b = 0.5;
const unsigned int iter = 10;
CustomTimer timer0 = CustomTimer();
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
cudaMalloc((void**)&d_FieldAlt, 3 * 3 * M * N * sizeof(Real));
cudaMalloc((void**)&d_Field, 4 * M * N * sizeof(Real));
cudaMalloc((void**)&d_IR, T * sizeof(Real));
cudaMalloc((void**)&d_F, T * sizeof(Real));
cudaMemcpy(d_IR, IR.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy(d_F, F.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
Field.resize(4, N, M); /*Field = 0; */Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
FieldAlt.resize(3, N, M, 3);
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (CalculationMode=0; CalculationMode <= 7 ;++CalculationMode)
{
Field(blitz::Range(0,2), blitz::Range::all(), blitz::Range::all()) = 0.0;
cudaMemcpy(d_Field, Field.data(), 4 * M * N * sizeof(Real), cudaMemcpyHostToDevice);
//Field = 0; Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
if (CalculationMode == 6 || CalculationMode == 7) {
FieldAlt(blitz::Range(0, 1), blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 0.0;
cudaMemcpy(d_FieldAlt, FieldAlt.data(), 3 * 2 * M * N * sizeof(Real), cudaMemcpyHostToDevice);
}
for (time_ind = 0; time_ind < T;) {
switch (CalculationMode) {
case 0:
timer0.tic();
WaveIteration(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b);
timer0.toc(); break;
case 1:
timer0.tic();
WaveIterationOMP(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b,Threads);
timer0.toc(); break;
case 2:
timer0.tic();
WaveIterationOMPMultipleFrames(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b, iter, Threads);
timer0.toc(iter);
time_ind += iter - 1;
break;
case 3:
timer0.tic();
WaveIterationKernel << < windowGridSize, windowBlockSize >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
cudaDeviceSynchronize();
timer0.toc(); break;
case 4:
timer0.tic();
MultipleIterationsKernel << < 1, 1 >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b, iter);
cudaDeviceSynchronize();
timer0.toc(iter);
time_ind += iter - 1; break;
case 5:
timer0.tic();
WaveIterationKernelSM << < windowGridSize, windowBlockSize, ((windowBlockSize.x + 2) * (windowBlockSize.y + 2) * 3) * sizeof(Real) >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
cudaDeviceSynchronize();
timer0.toc(); break;
case 6:
timer0.tic();
WaveIterationAltOMP(FieldAlt.data(), IR.data(), N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F.data(), b, 64, Threads);
timer0.toc();
//cudaMemcpy(d_FieldAlt, FieldAlt.data(), 6 * M * N * sizeof(Real), cudaMemcpyHostToDevice);
//checkCudaErrors(cudaGetLastError());
break;
case 7:
timer0.tic();
WaveIterationKernelAlt << < 1, 1 >> > (d_FieldAlt, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, 32 ,b);
cudaDeviceSynchronize();
timer0.toc();
//checkCudaErrors(cudaGetLastError());
break;
default:
break;
}
++time_ind;
}
if (CalculationMode >=3) checkCudaErrors(cudaGetLastError());
TimerInfo timerinfo;
if (CalculationMode == 4 || CalculationMode == 2)
timerinfo = timer0.getInfo(min(timer0.N, T) / iter);
else
timerinfo = timer0.getInfo();
//std::cout <<"size = "<< sizes << ", calculation mode = " << CalculationMode << ", mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
// <<" min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
std::cout << "size = " << sizes << " \tMethod: " << Methods[CalculationMode] << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<< " min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
cudaFree(d_Field);
cudaFree(d_FieldAlt);
cudaFree(d_IR);
cudaFree(d_F);
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void benchmarkOMP(int begN = 1000, int endN = 10000, int stepN = 1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 200;
b = 0.5;
CustomTimer timer0 = CustomTimer();
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
Field.resize(4, N, M); /*Field = 0; */Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (int threads = 1; threads <= 16; ++threads)
{
Field(blitz::Range(0, 2), blitz::Range::all(), blitz::Range::all()) = 0.0;
for (time_ind = 0; time_ind < T;) {
timer0.tic();
WaveIterationOMP(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b, threads);
timer0.toc();
++time_ind;
//std::cout << time_ind << " ";
}
TimerInfo timerinfo;
timerinfo = timer0.getInfo();
std::cout << "size = " << sizes << " NumThreads = " << threads << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<<" min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void benchmarkOMPAlt(int begN = 1000, int endN = 10000, int stepN = 1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 200;
b = 0.5;
CustomTimer timer0 = CustomTimer();
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
FieldAlt.resize(3, N, M, 3);
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (int threads = 1; threads <= 16; ++threads)
{
FieldAlt(blitz::Range(0, 1), blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 0.0;
for (time_ind = 0; time_ind < T;) {
timer0.tic();
WaveIterationAltOMP(FieldAlt.data(), IR.data(), N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F.data(), b, 64, threads);
timer0.toc();
++time_ind;
//std::cout << time_ind << " ";
}
TimerInfo timerinfo;
timerinfo = timer0.getInfo();
std::cout << "size = " << sizes << " NumThreads = " << threads << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<< " min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void benchmarkKernel(int begN = 1000, int endN = 10000, int stepN = 1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 500;
b = 0.5;
const unsigned int iter = 10;
CustomTimer timer0 = CustomTimer(400);
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
cudaMalloc((void**)&d_Field, 4 * M * N * sizeof(Real));
cudaMalloc((void**)&d_IR, T * sizeof(Real));
cudaMalloc((void**)&d_F, T * sizeof(Real));
cudaMemcpy(d_IR, IR.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy(d_F, F.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
Field.resize(4, N, M); /*Field = 0; */Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (int ksize = 1; ksize <= 32; ++ksize)
{
windowBlockSize.x = ksize;
windowBlockSize.y = ksize;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
Field(blitz::Range(0, 2), blitz::Range::all(), blitz::Range::all()) = 0.0;
//Field = 0; Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
cudaMemcpy(d_Field, Field.data(), 4 * M * N * sizeof(Real), cudaMemcpyHostToDevice);
for (time_ind = 0; time_ind < T;) {
timer0.tic();
WaveIterationKernel << < windowGridSize, windowBlockSize >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
cudaDeviceSynchronize();
timer0.toc();
++time_ind;
}
checkCudaErrors(cudaGetLastError());
TimerInfo timerinfo;
timerinfo = timer0.getInfo();
std::cout << "size = " << sizes << " blocksize = " << ksize << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<< " min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
cudaFree(d_Field);
cudaFree(d_IR);
cudaFree(d_F);
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void benchmarkKernelAlt(int begN = 1000, int endN = 10000, int stepN = 1000) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
N = endN;
M = endN;
x_ir = int(M * 0.85);
y_ir = int(N * 0.52);
x_s = int(M * 0.5);
y_s = -int(N * 0.5);
T = 500;
b = 0.5;
const unsigned int iter = 10;
CustomTimer timer0 = CustomTimer(400);
IR.resize(2 * T);
F.resize(2 * T);
IR = 0.0;
F = 0.0;
F(0) = 100.0;
loadDefaultF();
cudaMalloc((void**)&d_FieldAlt, 3 * 3 * M * N * sizeof(Real));
cudaMalloc((void**)&d_IR, T * sizeof(Real));
cudaMalloc((void**)&d_F, T * sizeof(Real));
cudaMemcpy(d_IR, IR.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
cudaMemcpy(d_F, F.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
for (int sizes = begN; sizes <= endN; sizes += stepN) {
N = sizes; M = sizes;
x_ir = int(M * 0.85); y_ir = int(N * 0.52);
x_s = int(M * 0.5); y_s = int(N * 0.5);
FieldAlt.resize(3, N, M, 3);
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
for (int ksize = 1; ksize <= 32; ++ksize)
{
windowBlockSize.x = ksize;
windowBlockSize.y = ksize;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x == 0) ? 0 : 1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
FieldAlt(blitz::Range(0, 1), blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 0.0;
cudaMemcpy(d_FieldAlt, FieldAlt.data(), 3 * 2 * M * N * sizeof(Real), cudaMemcpyHostToDevice);
for (time_ind = 0; time_ind < T;) {
timer0.tic();
WaveIterationKernelAlt << < 1, 1 >> > (d_FieldAlt, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, ksize, b);
cudaDeviceSynchronize();
timer0.toc();
++time_ind;
}
checkCudaErrors(cudaGetLastError());
TimerInfo timerinfo;
timerinfo = timer0.getInfo();
std::cout << "size = " << sizes << " blocksize = " << ksize << " mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<< " min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
}
}
cudaFree(d_FieldAlt);
cudaFree(d_IR);
cudaFree(d_F);
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "benchmark ended in " << diff << " seconds" << std::endl;
}
void RunAndSaveResults(int mode = 0, int writeresults=0) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
CustomTimer timer0 = CustomTimer();
if (writeresults) {
try { WriteResults.resize(T, N, M); }
catch (...) { std::cout << "memory error, array too big"; return; }
}
//if (mode == 1) CalculationMode = 1;
//if (mode == 1) CalculationMode = 2;
//if (mode == 2) CalculationMode = 3;
//if (mode == 3) CalculationMode = 4;
//if (mode == 4) CalculationMode = 5;
//if (mode == 5) CalculationMode = 6;
int iter = T;
for (time_ind = 0; time_ind < T;) {
switch (mode) {
case 0:
timer0.tic();
WaveIteration(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b);
timer0.toc();
if (writeresults) WriteResults(time_ind % T, blitz::Range::all(), blitz::Range::all()) = Field((time_ind + 2) % 3, blitz::Range::all(), blitz::Range::all());
break;
case 1:
timer0.tic();
WaveIterationOMP(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b, Threads);
timer0.toc();
if (writeresults) WriteResults(time_ind % T, blitz::Range::all(), blitz::Range::all()) = Field((time_ind + 2) % 3, blitz::Range::all(), blitz::Range::all());
break;
case 2:
timer0.tic();
WaveIterationOMPMultipleFrames(Field, IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F, b, iter, Threads);
timer0.toc(iter);
time_ind += iter - 1;
break;
case 3:
timer0.tic();
WaveIterationKernel <<< windowGridSize, windowBlockSize >>> (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
cudaDeviceSynchronize();
timer0.toc();
if (writeresults) cudaMemcpy(WriteResults.data() + M * N * (time_ind % T), d_Field + ((time_ind + 2) % 3) * M * N, M * N * sizeof(Real), cudaMemcpyDeviceToHost);
break;
case 4:
timer0.tic();
MultipleIterationsKernel <<< 1, 1 >>> (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b, iter);
cudaDeviceSynchronize();
time_ind += iter - 1;
timer0.toc(iter);
checkCudaErrors(cudaGetLastError());
break;
case 5:
timer0.tic();
WaveIterationKernelSM << < windowGridSize, windowBlockSize, ((windowBlockSize.x + 2) * (windowBlockSize.y + 2) * 3) * sizeof(Real) >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
//WaveIterationKernelSM << < windowGridSize, windowBlockSize >> > (d_Field, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, b);
cudaDeviceSynchronize();
timer0.toc();
if (writeresults) cudaMemcpy(WriteResults.data() + M * N * (time_ind % T), d_Field + ((time_ind + 2) % 3) * M * N, M * N * sizeof(Real), cudaMemcpyDeviceToHost);
break;
case 6:
timer0.tic();
WaveIterationAltOMP(FieldAlt.data(), IR.data(), N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, F.data(), b, 64, Threads);
timer0.toc();
//cudaMemcpy(d_FieldAlt, FieldAlt.data(), 6 * M * N * sizeof(Real), cudaMemcpyHostToDevice);
//if (writeresults) cudaMemcpy(WriteResults.data() + M * N * (time_ind % T), d_Field + ((time_ind + 2) % 3) * M * N, M * N * sizeof(Real), cudaMemcpyDeviceToHost);
break;
case 7:
timer0.tic();
WaveIterationKernelAlt << < 1, 1 >> > (d_FieldAlt, d_IR, N, M, time_ind % T + 1, v, d_x, timestep, x_ir, y_ir, x_s, y_s, d_F, 32, b);
cudaDeviceSynchronize();
timer0.toc();
//checkCudaErrors(cudaGetLastError());
break;
default:
break;
}
++time_ind;
}
if (mode >= 2) {
checkCudaErrors(cudaGetLastError());
cudaMemcpy(IR.data(), d_IR, T * sizeof(Real), cudaMemcpyDeviceToHost);
checkCudaErrors(cudaGetLastError());
}
TimerInfo timerinfo;
//timerinfo = timer0.getInfo();
if (mode == 4 || mode == 2) timerinfo = timer0.getInfo(1);
else timerinfo = timer0.getInfo();
std::cout <<"runSim method = " << Methods[mode] << ", mean = " << timerinfo.mean << " microseconds, Sigma = " << sqrt(timerinfo.dispersion)
<<" min,max= " << timerinfo.min << " " << timerinfo.max << std::endl;
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::rep diff = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << "running ended in " << diff << " milliseconds" << std::endl;
start = std::chrono::high_resolution_clock::now();
saveIR();
if (writeresults) {
saveField();
end = std::chrono::high_resolution_clock::now();
diff = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
std::cout << "writing ended in " << diff << " seconds" << std::endl;
}
cudaFree(d_Field);
cudaFree(d_IR);
cudaFree(d_F);
cudaFree(d_FieldAlt);
}
int main(int argc, char** argv) {
using namespace NIP;
int myNr = 0;
if (argc > 1)
myNr = std::stoi(argv[1]);
CalculationMode = myNr;
if (CalculationMode == -1) { benchmark(500,5000, 500); return 0; }
if (CalculationMode == -2) { benchmarkOMP(500, 5000, 500); return 0; }
if (CalculationMode == -3) { benchmarkOMPAlt(500, 5000, 500); return 0; }
if (CalculationMode == -4) { benchmarkKernel(500, 5000, 500); return 0; }
if (CalculationMode == -5) { benchmarkKernelAlt(500, 5000, 500); return 0; }
const char* input_filename = (argc > 2) ? argv[2] : "config.model";
std::ifstream cfg(input_filename);
read_parameters(cfg);
cfg.close();
//if (CalculationMode == 5 || JustRunSim && RunSimMode == 4) NFrames = 3; else NFrames = 3;
if (filename[0] != '\0') {
std::ifstream room(filename,std::ios_base::binary);
int x, y, n;
unsigned char *data = stbi_load(filename, &x, &y, &n, 1);
if (data != nullptr) {
N = y;
M = x;
X_size = M * d_x;
Y_size = N * d_x;
if (x_ir < 0 || x_ir >= M) x_ir = int(M * 0.85);
if (y_ir < 0 || y_ir >= N) y_ir = int(N * 0.52);
if (x_s < 0 || x_s >= M) x_s = int(M * 0.5);
if (y_s < 0 || y_s >= N) y_s = int(N * 0.5);
Field.resize(NFrames+1, N, M);
Field = 0;
blitz::Array<unsigned char, 2> tmp(data, blitz::shape(N, M), blitz::duplicateData);
FieldAlt.resize(3, N, M, 3);
FieldAlt = 0.0;
for (int y0 = 0; y0 < N; ++y0) {
for (int x0 = 0; x0 < M; ++x0) {
Field(NFrames, y0, x0) = ((Real)tmp(y0, x0)) / 255.0;
*(FieldAlt.data() + 3*2*M*N +x0+M*y0) = ((Real)tmp(y0, x0)) / 255.0;
}
}
//tmp.free();
//std::cout << Field(3,0,0) << " " <<Field(3, 100, 100);
stbi_image_free(data);
}
else { Field.resize(NFrames+1, N, M); Field = 0; Field(NFrames, blitz::Range::all(), blitz::Range::all()) = 1.0;
FieldAlt.resize(3,N,M,3);
FieldAlt = 0.0;
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;}
room.close();
}
else { Field.resize(NFrames+1, N, M); Field = 0; Field(NFrames, blitz::Range::all(), blitz::Range::all()) = 1.0;
FieldAlt.resize(3, N, M, 3);
FieldAlt = 0.0;
FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
}
IR.resize(2 * T);
F.resize(2 * T);
//Field = 0;
//Field(3, blitz::Range::all(), blitz::Range::all()) = 1.0;
IR = 0;
if (F_filename[0] != '\0') {
bool b = loadF(F_filename);
if (b == false) {
F = 0.0;
F(0) = 100.0;
loadDefaultF();
}
}
else {
F = 0.0;
F(0) = 100.0;
loadDefaultF();
}
//FieldAlt.resize(3,N,M,3);
//FieldAlt = 0.0;
//FieldAlt(2, blitz::Range::all(), blitz::Range::all(), blitz::Range::all()) = 1.0;
cudaMalloc((void**)&d_FieldAlt, 3 * 3 * M * N * sizeof(Real));
cudaMemcpy(d_FieldAlt, FieldAlt.data(), 3 * 3 * M * N * sizeof(Real), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_Field, (NFrames + 1) * M * N * sizeof(Real));
cudaMemcpy(d_Field, Field.data(), (NFrames + 1) * M * N * sizeof(Real), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_IR, T * sizeof(Real));
cudaMemcpy(d_IR, IR.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_F, T * sizeof(Real));
cudaMemcpy(d_F, F.data(), T * sizeof(Real), cudaMemcpyHostToDevice);
windowSize.x = M;
windowSize.y = N;
windowGridSize = dim3(windowSize.x / windowBlockSize.x + ((windowSize.x % windowBlockSize.x==0)? 0:1), windowSize.y / windowBlockSize.y + ((windowSize.y % windowBlockSize.y == 0) ? 0 : 1));
//windowGridSize = dim3(windowSize.x / windowBlockSize.x, windowSize.y / windowBlockSize.y);
if (JustRunSim) {
RunSimMode = (RunSimMode >= 0)? RunSimMode : CalculationMode;
RunAndSaveResults(RunSimMode,writeresults);
return 0;
}
std::cout << "Method: " << Methods[CalculationMode] << std::endl;
cudaFree(d_Field);
cudaFree(d_IR);
cudaFree(d_F);
return 0;
}
|
41818ddd6e007c85ee958c06c6ddd2d04771db9b.hip
|
// !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include <cutil_math.h>
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(const glm::vec2 &resolution, float time, const int x, const int y, const glm::vec3 &eye, const glm::vec3 &View,
const glm::vec3 &Up, const glm::vec2 &fov)
{
ray r;
r.origin = eye;
// Distance of View Plane from eye
float tanfovy = tan(fov.y);
float dist = (resolution.y / 2.0f) / tanfovy;
glm::vec3 view = glm::normalize(View);
glm::vec3 up = glm::normalize(Up);
glm::vec3 c = dist * view;
glm::vec3 a = glm::cross(view, up);
glm::vec3 b = glm::cross(a, view);
//Center of screen
glm::vec3 m = c + eye;
//Using same vector a instead of a separate vector h
a = (resolution.x / 2.0f) * a;
//Using same vector b instead of a separate vector v
b = (resolution.y / 2.0f) * b;
//Point in space towards which ray has to be shot
glm::vec3 p = m + (2.0f * x / (resolution.x - 1.0f) - 1.0f) * a + (2.0f * y / (resolution.y - 1.0f) - 1.0f) * b;
r.direction = p - eye;
r.direction = glm::normalize(r.direction);
return r;
}
__global__ void initialRaysGenerator(cameraData cam, ray *rays)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * cam.resolution.x);
if(x <= cam.resolution.x && y <= cam.resolution.y)
{
rays[index] = raycastFromCameraKernel(cam.resolution, 0.0f, x, y, cam.position, cam.view, cam.up, cam.fov);
}
__syncthreads();
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(const glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, const glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x * 255.0f;
color.y = image[index].y * 255.0f;
color.z = image[index].z * 255.0f;
if(color.x>255.0f){
color.x = 255.0f;
}
if(color.y>255.0f){
color.y = 255.0f;
}
if(color.z>255.0f){
color.z = 255.0f;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = (unsigned char)0;
PBOpos[index].x = (unsigned char)color.x;
PBOpos[index].y = (unsigned char)color.y;
PBOpos[index].z = (unsigned char)color.z;
}
}
__host__ __device__ int findNearestPrimitiveInRay(const staticGeom const *geoms, int numberOfGeoms, const ray &rt, glm::vec3 &intersectionPoint, glm::vec3 &normal) //Return -1 if no intersection, else index of geom
{
float dist = 10000000.0f;
int geomIndex = -1;
for(unsigned int i = 0; i < numberOfGeoms; ++i)
{
glm::vec3 currentIntersectionPoint, currentNormal;
float currentDist = 0.0f;
if(geoms[i].type == SPHERE)
{
currentDist = sphereIntersectionTest(geoms[i], rt, currentIntersectionPoint, currentNormal);
if(currentDist != -1.0f && currentDist < dist)
{
intersectionPoint = currentIntersectionPoint;
normal = currentNormal;
dist = currentDist;
geomIndex = i;
}
}
else if(geoms[i].type == CUBE)
{
currentDist = boxIntersectionTest(geoms[i], rt, currentIntersectionPoint, currentNormal);
if(currentDist != -1.0f && currentDist < dist)
{
intersectionPoint = currentIntersectionPoint;
normal = currentNormal;
dist = currentDist;
geomIndex = i;
}
}
else if(geoms[i].type == MESH)
{
}
}
return geomIndex;
}
__global__ void raytraceRay(ray *rays, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, unsigned int numberOfGeoms, material *materials, unsigned int numberOfMaterials, light *lights, unsigned int numberOfLights){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int rayIndex = x + (y * cam.resolution.x);
int index = (cam.resolution.x - x) + ((cam.resolution.y - y) * cam.resolution.x);
if(((float)x <= cam.resolution.x && (float)y <= cam.resolution.y))
{
//ray rt = raycastFromCameraKernel(cam.resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
ray rt = rays[rayIndex];
if(rayDepth >= 10 || (rays[rayIndex].origin == glm::vec3(-10000, -10000, -10000)))
{
return;
}
glm::vec3 intersectionPoint, normal;
int geomIndex = findNearestPrimitiveInRay(geoms, numberOfGeoms, rt, intersectionPoint, normal);
if(geomIndex != -1)
{
//Flat Shading
//colors[index] = materials[geoms[geomIndex].materialid].color;
//Setting initial value
if(rayDepth == 0)
{
colors[index] = glm::vec3(0, 0, 0);
}
for(unsigned int i = 0; i < numberOfLights; ++i)
{
ray lightRay;
lightRay.origin = intersectionPoint;
lightRay.direction = lights[i].position - intersectionPoint;
if(glm::length(lightRay.direction) > 0.001f)
{
lightRay.direction = glm::normalize(lightRay.direction);
}
else
{
continue;
}
lightRay.origin += lightRay.direction * 0.01f;
int obstructionIndex = -1;
glm::vec3 obstructionIntersectionPoint, obstructionNormal;
obstructionIndex = findNearestPrimitiveInRay(geoms, numberOfGeoms, lightRay, obstructionIntersectionPoint, obstructionNormal);
if(obstructionIndex == -1 || (glm::distance(intersectionPoint, obstructionIntersectionPoint) > glm::distance(intersectionPoint, lights[i].position)))
{
//Lambert Shading
float KD = 0.8f;
colors[index] += KD * lights[i].color * materials[geoms[geomIndex].materialid].color * glm::dot(lightRay.direction, normal);
//Phong Shading
float KS = 0.10f;
glm::vec3 reflectedRay = calculateReflectionDirection(normal, rt.direction);
glm::vec3 V = glm::normalize((cam.position - intersectionPoint));
colors[index] += (KS * materials[geoms[geomIndex].materialid].specularColor * lights[i].color * pow((float)glm::dot(reflectedRay, V),
materials[geoms[geomIndex].materialid].specularExponent));
//Reflection
if(materials[geoms[geomIndex].materialid].hasReflective == 1.0f)
{
rays[rayIndex].origin = intersectionPoint + reflectedRay * 0.01f;
rays[rayIndex].direction = reflectedRay;
}
//Refraction
else if(materials[geoms[geomIndex].materialid].hasRefractive == 1.0f)
{
}
else
{
rays[rayIndex].origin = glm::vec3(-10000, -10000, -10000);
}
}
//Coloring due to reflection
else if(materials[geoms[geomIndex].materialid].hasReflective == 1.0f)
{
glm::vec3 reflectedRay = calculateReflectionDirection(normal, rt.direction);
rays[rayIndex].origin = intersectionPoint + reflectedRay * 0.01f;
rays[rayIndex].direction = reflectedRay;
}
//Coloring due to refraction
else if(materials[geoms[geomIndex].materialid].hasRefractive == 1.0f)
{
}
//Ambient Lighting
float KA = 0.1f;
glm::vec3 ambientLight(0.2f, 0.2f, 0.2f);
colors[index] += KA * materials[geoms[geomIndex].materialid].color * ambientLight;
}
}
//Background
else
{
glm::vec3 backRGB(0, 0, 0);
colors[index] = backRGB;
}
}
//__syncthreads();
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms,
light* lights, int numberOfLights){
unsigned int traceDepth = 10; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//package geometry and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
//Packaging materials and sending to GPU
material *cudaMaterials = NULL;
hipMalloc((void**)&cudaMaterials, numberOfMaterials * sizeof(material));
hipMemcpy(cudaMaterials, materials, numberOfMaterials * sizeof(material), hipMemcpyHostToDevice);
//Packaging lights and sending to GPU
light *cudaLights = NULL;
hipMalloc((void**)&cudaLights, numberOfLights * sizeof(light));
hipMemcpy(cudaLights, lights, numberOfLights * sizeof(light), hipMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//Packaging rays
int numberOfRays = (int)cam.resolution.x * (int)cam.resolution.y;
ray *rays = new ray[numberOfRays];
ray *cudaRays = NULL;
hipMalloc((void**)&cudaRays, numberOfRays * sizeof(ray));
hipMemcpy(cudaRays, rays, numberOfRays * sizeof(ray), hipMemcpyHostToDevice);
//kernel launches
hipLaunchKernelGGL(( initialRaysGenerator), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cam, cudaRays);
//kernel launches
for(int i = 0; i < traceDepth; ++i)
{
hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudaRays, (float)iterations, cam, i, cudaimage, cudageoms, (unsigned int)numberOfGeoms, cudaMaterials,
(unsigned int)numberOfMaterials, cudaLights, (unsigned int)numberOfLights);
}
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage);
//retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
//delete geomList;
delete [] geomList;
//Freeing memory from materials, lights and rays
hipFree(cudaMaterials);
hipFree(cudaLights);
hipFree(cudaRays);
delete [] rays;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
|
41818ddd6e007c85ee958c06c6ddd2d04771db9b.cu
|
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include <cutil_math.h>
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(const glm::vec2 &resolution, float time, const int x, const int y, const glm::vec3 &eye, const glm::vec3 &View,
const glm::vec3 &Up, const glm::vec2 &fov)
{
ray r;
r.origin = eye;
// Distance of View Plane from eye
float tanfovy = tan(fov.y);
float dist = (resolution.y / 2.0f) / tanfovy;
glm::vec3 view = glm::normalize(View);
glm::vec3 up = glm::normalize(Up);
glm::vec3 c = dist * view;
glm::vec3 a = glm::cross(view, up);
glm::vec3 b = glm::cross(a, view);
//Center of screen
glm::vec3 m = c + eye;
//Using same vector a instead of a separate vector h
a = (resolution.x / 2.0f) * a;
//Using same vector b instead of a separate vector v
b = (resolution.y / 2.0f) * b;
//Point in space towards which ray has to be shot
glm::vec3 p = m + (2.0f * x / (resolution.x - 1.0f) - 1.0f) * a + (2.0f * y / (resolution.y - 1.0f) - 1.0f) * b;
r.direction = p - eye;
r.direction = glm::normalize(r.direction);
return r;
}
__global__ void initialRaysGenerator(cameraData cam, ray *rays)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * cam.resolution.x);
if(x <= cam.resolution.x && y <= cam.resolution.y)
{
rays[index] = raycastFromCameraKernel(cam.resolution, 0.0f, x, y, cam.position, cam.view, cam.up, cam.fov);
}
__syncthreads();
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(const glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, const glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x * 255.0f;
color.y = image[index].y * 255.0f;
color.z = image[index].z * 255.0f;
if(color.x>255.0f){
color.x = 255.0f;
}
if(color.y>255.0f){
color.y = 255.0f;
}
if(color.z>255.0f){
color.z = 255.0f;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = (unsigned char)0;
PBOpos[index].x = (unsigned char)color.x;
PBOpos[index].y = (unsigned char)color.y;
PBOpos[index].z = (unsigned char)color.z;
}
}
__host__ __device__ int findNearestPrimitiveInRay(const staticGeom const *geoms, int numberOfGeoms, const ray &rt, glm::vec3 &intersectionPoint, glm::vec3 &normal) //Return -1 if no intersection, else index of geom
{
float dist = 10000000.0f;
int geomIndex = -1;
for(unsigned int i = 0; i < numberOfGeoms; ++i)
{
glm::vec3 currentIntersectionPoint, currentNormal;
float currentDist = 0.0f;
if(geoms[i].type == SPHERE)
{
currentDist = sphereIntersectionTest(geoms[i], rt, currentIntersectionPoint, currentNormal);
if(currentDist != -1.0f && currentDist < dist)
{
intersectionPoint = currentIntersectionPoint;
normal = currentNormal;
dist = currentDist;
geomIndex = i;
}
}
else if(geoms[i].type == CUBE)
{
currentDist = boxIntersectionTest(geoms[i], rt, currentIntersectionPoint, currentNormal);
if(currentDist != -1.0f && currentDist < dist)
{
intersectionPoint = currentIntersectionPoint;
normal = currentNormal;
dist = currentDist;
geomIndex = i;
}
}
else if(geoms[i].type == MESH)
{
}
}
return geomIndex;
}
__global__ void raytraceRay(ray *rays, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, unsigned int numberOfGeoms, material *materials, unsigned int numberOfMaterials, light *lights, unsigned int numberOfLights){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int rayIndex = x + (y * cam.resolution.x);
int index = (cam.resolution.x - x) + ((cam.resolution.y - y) * cam.resolution.x);
if(((float)x <= cam.resolution.x && (float)y <= cam.resolution.y))
{
//ray rt = raycastFromCameraKernel(cam.resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
ray rt = rays[rayIndex];
if(rayDepth >= 10 || (rays[rayIndex].origin == glm::vec3(-10000, -10000, -10000)))
{
return;
}
glm::vec3 intersectionPoint, normal;
int geomIndex = findNearestPrimitiveInRay(geoms, numberOfGeoms, rt, intersectionPoint, normal);
if(geomIndex != -1)
{
//Flat Shading
//colors[index] = materials[geoms[geomIndex].materialid].color;
//Setting initial value
if(rayDepth == 0)
{
colors[index] = glm::vec3(0, 0, 0);
}
for(unsigned int i = 0; i < numberOfLights; ++i)
{
ray lightRay;
lightRay.origin = intersectionPoint;
lightRay.direction = lights[i].position - intersectionPoint;
if(glm::length(lightRay.direction) > 0.001f)
{
lightRay.direction = glm::normalize(lightRay.direction);
}
else
{
continue;
}
lightRay.origin += lightRay.direction * 0.01f;
int obstructionIndex = -1;
glm::vec3 obstructionIntersectionPoint, obstructionNormal;
obstructionIndex = findNearestPrimitiveInRay(geoms, numberOfGeoms, lightRay, obstructionIntersectionPoint, obstructionNormal);
if(obstructionIndex == -1 || (glm::distance(intersectionPoint, obstructionIntersectionPoint) > glm::distance(intersectionPoint, lights[i].position)))
{
//Lambert Shading
float KD = 0.8f;
colors[index] += KD * lights[i].color * materials[geoms[geomIndex].materialid].color * glm::dot(lightRay.direction, normal);
//Phong Shading
float KS = 0.10f;
glm::vec3 reflectedRay = calculateReflectionDirection(normal, rt.direction);
glm::vec3 V = glm::normalize((cam.position - intersectionPoint));
colors[index] += (KS * materials[geoms[geomIndex].materialid].specularColor * lights[i].color * pow((float)glm::dot(reflectedRay, V),
materials[geoms[geomIndex].materialid].specularExponent));
//Reflection
if(materials[geoms[geomIndex].materialid].hasReflective == 1.0f)
{
rays[rayIndex].origin = intersectionPoint + reflectedRay * 0.01f;
rays[rayIndex].direction = reflectedRay;
}
//Refraction
else if(materials[geoms[geomIndex].materialid].hasRefractive == 1.0f)
{
}
else
{
rays[rayIndex].origin = glm::vec3(-10000, -10000, -10000);
}
}
//Coloring due to reflection
else if(materials[geoms[geomIndex].materialid].hasReflective == 1.0f)
{
glm::vec3 reflectedRay = calculateReflectionDirection(normal, rt.direction);
rays[rayIndex].origin = intersectionPoint + reflectedRay * 0.01f;
rays[rayIndex].direction = reflectedRay;
}
//Coloring due to refraction
else if(materials[geoms[geomIndex].materialid].hasRefractive == 1.0f)
{
}
//Ambient Lighting
float KA = 0.1f;
glm::vec3 ambientLight(0.2f, 0.2f, 0.2f);
colors[index] += KA * materials[geoms[geomIndex].materialid].color * ambientLight;
}
}
//Background
else
{
glm::vec3 backRGB(0, 0, 0);
colors[index] = backRGB;
}
}
//__syncthreads();
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms,
light* lights, int numberOfLights){
unsigned int traceDepth = 10; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//package geometry and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
//Packaging materials and sending to GPU
material *cudaMaterials = NULL;
cudaMalloc((void**)&cudaMaterials, numberOfMaterials * sizeof(material));
cudaMemcpy(cudaMaterials, materials, numberOfMaterials * sizeof(material), cudaMemcpyHostToDevice);
//Packaging lights and sending to GPU
light *cudaLights = NULL;
cudaMalloc((void**)&cudaLights, numberOfLights * sizeof(light));
cudaMemcpy(cudaLights, lights, numberOfLights * sizeof(light), cudaMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//Packaging rays
int numberOfRays = (int)cam.resolution.x * (int)cam.resolution.y;
ray *rays = new ray[numberOfRays];
ray *cudaRays = NULL;
cudaMalloc((void**)&cudaRays, numberOfRays * sizeof(ray));
cudaMemcpy(cudaRays, rays, numberOfRays * sizeof(ray), cudaMemcpyHostToDevice);
//kernel launches
initialRaysGenerator<<<fullBlocksPerGrid, threadsPerBlock>>>(cam, cudaRays);
//kernel launches
for(int i = 0; i < traceDepth; ++i)
{
raytraceRay<<<fullBlocksPerGrid, threadsPerBlock>>>(cudaRays, (float)iterations, cam, i, cudaimage, cudageoms, (unsigned int)numberOfGeoms, cudaMaterials,
(unsigned int)numberOfMaterials, cudaLights, (unsigned int)numberOfLights);
}
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage);
//retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
//delete geomList;
delete [] geomList;
//Freeing memory from materials, lights and rays
cudaFree(cudaMaterials);
cudaFree(cudaLights);
cudaFree(cudaRays);
delete [] rays;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
|
f06cbe1730709bbda2b15770dc75dda35b3e1935.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
static bool check(const float* A, const float* B, const float* C, int m, int n, int k) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
float sum = 0.f;
for (int p = 0; p < k; ++p) {
sum += A[i * k + p] * B[j + p * n];
}
if (::fabs(sum - C[i * n + j]) / ::fabs(sum) > 1e-5f) {
printf("C[%d][%d] not match, %f vs %f\n", i, j, sum, C[i * n + j]);
return false;
}
}
}
return true;
}
__device__ __forceinline__ uint32_t smem_u32addr(const void* smem_ptr) {
uint32_t addr;
asm("{.reg .u64 u64addr;\n"
" cvta.to.shared.u64 u64addr, %1;\n"
" cvt.u32.u64 %0, u64addr;}\n"
: "=r"(addr)
: "l"(smem_ptr));
return addr;
}
__device__ __forceinline__ void ldgsts32(const uint32_t& smem_addr, const void* gmem_ptr, bool guard) {
asm volatile("{.reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
#if __CUDACC_VER_MAJOR__ >= 11 && __CUDACC_VER_MINOR__ >= 4
" @p cp.async.ca.shared.global.L2::128B [%0], [%1], 4;}\n"
#else
" @p cp.async.ca.shared.global [%0], [%1], 4;}\n"
#endif
:
: "r"(smem_addr), "l"(gmem_ptr), "r"((int)guard));
}
__device__ __forceinline__ void ldgsts32(
const uint32_t& smem_addr, const void* gmem_ptr, const uint32_t& src_size, bool guard) {
asm volatile("{.reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
#if __CUDACC_VER_MAJOR__ >= 11 && __CUDACC_VER_MINOR__ >= 4
" @p cp.async.ca.shared.global.L2::128B [%0], [%1], 4, %2;}\n"
#else
" @p cp.async.ca.shared.global [%0], [%1], 4, %2;}\n"
#endif
:
: "r"(smem_addr), "l"(gmem_ptr), "r"(src_size), "r"((int)guard));
}
__device__ __forceinline__ void ldgsts_commit() { asm volatile("cp.async.wait_all;\n" ::); }
__device__ __forceinline__ void stg32(const float& reg, void* ptr, bool guard) {
asm volatile("{.reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.f32 [%0], %1;}\n"
:
: "l"(ptr), "f"(reg), "r"((int)guard));
}
__device__ __forceinline__ void lds128(float& reg0, float& reg1, float& reg2, float& reg3, const uint32_t& addr) {
asm volatile("ld.shared.v4.f32 {%0, %1, %2, %3}, [%4];\n"
: "=f"(reg0), "=f"(reg1), "=f"(reg2), "=f"(reg3)
: "r"(addr));
}
__device__ __forceinline__ void sts32(const float& reg, const uint32_t& addr) {
asm volatile("st.shared.f32 [%0], %1;\n" : : "r"(addr), "f"(reg));
}
__device__ __forceinline__ void sts128(
const float& reg0, const float& reg1, const float& reg2, const float& reg3, const uint32_t& addr) {
asm volatile("st.shared.v4.f32 [%0], {%1, %2, %3, %4};\n"
:
: "r"(addr), "f"(reg0), "f"(reg1), "f"(reg2), "f"(reg3));
}
struct StgFrag {
float data[4][4];
__device__ __forceinline__ StgFrag(const float (&C_frag)[16][8], int tile_x, int tile_y) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 4; ++j) {
data[i][j] = C_frag[tile_y * 4 + i][tile_x * 4 + j];
}
}
}
};
static __device__ __noinline__ void C_tile_wb(StgFrag C_frag, float* C_stg_ptr, const float* C_lds_ptr,
uint32_t C_sts_addr, uint32_t m, uint32_t n, uint32_t m_idx, uint32_t n_idx) {
__syncthreads();
#pragma unroll
for (int i = 0; i < 4; ++i) {
sts128(C_frag.data[i][0], C_frag.data[i][1], C_frag.data[i][2], C_frag.data[i][3],
C_sts_addr + i * 9 * sizeof(float4));
}
__syncthreads();
uint32_t m_guard = m < m_idx ? 0 : m - m_idx;
#pragma unroll
for (int i = 0; i < 16; ++i) {
stg32(C_lds_ptr[i * 36], C_stg_ptr + i * n, i < m_guard && n_idx < n);
}
}
/*
* matrix A, B and C: row-major
*
* mma block:
* thread block tile: m128n256k8
* warp tile: m64n64k8
* thread tile: m16n8k8
* thread fragment:
* matrixA: 16x1 FP32
* matrixB: 1x8 FP32
*
* ----------------------------------------------------------------
* thread block tile map:
*
* 256
* --|---------------------------------------|
* B_tile 8| |
* --|---------------------------------------|
*
* A_tile | 8 | | 64 |
* --|---| --|---------|---------|---------|---------|
* | | | | | | |
* | | 64| warp0 | warp1 | warp2 | warp3 |
* | | | | | | |
* 128| | --|---------|---------|---------|---------|
* | | | | | | |
* | | | warp4 | warp5 | warp6 | warp7 |
* | | | | | | |
* --|---| |---------|---------|---------|---------|
*
* ----------------------------------------------------------------
* warp tile map:
*
* 'z' thread map to avoid LDS.128 shared memory broadcast limitation.
*
* | 32 ||
* B_frag --|---|---|---|---|---|---|---|---||---|---|---|---|---|---|---|---|
* 1|///| | | | | | | ||///| | | | | | | |
* --|---|---|---|---|---|---|---|---||---|---|---|---|---|---|---|---|
* A_frag | 4 | ||
* | 1 | ||
* --|---|-- |---|---|---|---|---|---|---|---||---|---------------------------|
* |///|4 |t0 |t2 |t4 |t6 |t8 |t10|t12|t14||t0 | |
* |---|-- |---|---|---|---|---|---|---|---||---| |
* | | |t1 |t3 |t5 |t7 |t9 |t11|t13|t15|| |
* 16|---| |---|---|---|---|---|---|---|---|| |
* | | |t16|t18|t20|t22|t24|t26|t28|t30|| |
* |---| |---|---|---|---|---|---|---|---|| |
* | | |t17|t19|t21|t23|t25|t27|t29|t31|| |
* ==|===|=====|===|===|===|===|===|===|===|===||===|============================
* |///| |t0 | ||t0 | |
* |---| |---| ||---| |
* | | | || |
* |---| | || |
* | | | || |
* |---| | || |
* | | | || |
* |---| |-------------------------------||-------------------------------|
* |///| |t0 | ||t0 | |
* |---| |---| ||---| |
* | | | || |
* |---| | || |
* | | | || |
* |---| | || |
* | | | || |
* |---| |-------------------------------||-------------------------------|
* |///| |t0 | ||t0 | |
* |---| |---| ||---| |
* | | | || |
* |---| | || |
* | | | || |
* |---| | || |
* | | | || |
* |---| |-------------------------------||-------------------------------|
*
*/
__global__ __launch_bounds__(256) void ampere_sgemm_my_opt_128x256x8_kernel_no_pingpong(const float* A, const float* B,
float* C, size_t m, size_t n, size_t k, size_t B_ldg_step,
int cycle_count) { // n * sizeof(float) * 8
/*
* matrix A & B thread block tile shared memory (double buffer)
* matrix A: 132 * 8 * 4Byte/item * double buffer = 4.125KB * 2
* matrix B: 256 * 8 * 4Byte/item * double buffer = 16KB
*
* for double buffer faster switch, A_smem requires 8KB * 2 shared memory
* and 16KB aligned, B_smem should be 16KB aligned, then the double buffer
* can be switched by only 1 xor instruction:
* (uint32_t &)A_smem ^= 0x2000;
* (uint32_t &)B_smem ^= 0x2000;
*/
// for (int i = 0; i < cycle_count; ++i) {
__shared__ __align__(16 * 1024) char smem[32 * 1024];
float* A_smem = reinterpret_cast<float*>(smem);
float* B_smem = reinterpret_cast<float*>(smem + 16 * 1024);
// A, B and C register fragment
float A_frag[1][16];
float B_frag[1][8];
float C_frag[16][8];
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] = 0;
}
}
const uint32_t lane_id = threadIdx.x % 32;
const uint32_t warp_id = threadIdx.x / 32;
// 4x8 threads each warp for FFMA
const uint32_t mma_tid_x = (lane_id / 2) % 8;
const uint32_t mma_tid_y = (lane_id / 16) * 2 + (lane_id % 2);
// A_tile & B_tile ldg pointer
const char* A_ldg_ptr = (const char*)(A + (blockIdx.y * 128 + threadIdx.x / 8) * k + threadIdx.x % 8);
const char* B_ldg_ptr = (const char*)(B + (threadIdx.x / 128) * n + blockIdx.x * 256 + threadIdx.x % 128);
// A_ldg_offset
uint32_t A_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
A_ldg_offset[i] = i * 32 * k * sizeof(float);
}
// B_ldg_offset
uint32_t B_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
B_ldg_offset[i] = i * 2 * n * sizeof(float);
}
// A_tile & B_tile sts/lds pointer
// using uint32_t pointer for faster double buffer switch
uint32_t A_sts_addr = smem_u32addr(A_smem + (threadIdx.x % 8) * 132 + (threadIdx.x / 8));
uint32_t B_sts_addr = smem_u32addr(B_smem + (threadIdx.x / 128) * 256 + (threadIdx.x % 128));
uint32_t A_lds_addr = smem_u32addr(A_smem + (warp_id / 4) * 64 + mma_tid_y * 4);
uint32_t B_lds_addr = smem_u32addr(B_smem + (warp_id % 4) * 64 + mma_tid_x * 4);
// ldg_guard to avoid LDG out of bound
uint32_t A_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 4; ++i) {
int m_idx = blockIdx.y * 128 + threadIdx.x / 8 + i * 32;
if (m_idx < m) {
A_ldg_guard |= (1u << i);
}
}
uint32_t B_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 2; ++i) {
int n_idx = blockIdx.x * 256 + threadIdx.x % 128 + i * 128;
if (n_idx < n) {
B_ldg_guard |= (1u << i);
}
}
// 1'st A&B tile loaded before the k_tile loop
uint32_t k_tiles = (k + 7) / 8 - 1;
// load 1'st tile to shared memory
uint32_t first_k_tile = k - k_tiles * 8;
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = threadIdx.x % 8 < first_k_tile ? 4 : 0;
ldgsts32(A_sts_addr + i * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[i], src_size,
(A_ldg_guard & (1u << i)) != 0);
}
A_ldg_ptr += first_k_tile * sizeof(float);
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = i * 2 + threadIdx.x / 128 < first_k_tile ? 4 : 0;
ldgsts32(B_sts_addr + i * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[i], src_size,
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (i * 2 * 256 + 128) * sizeof(float), B_ldg_ptr + B_ldg_offset[i] + 128 * sizeof(float),
src_size, (B_ldg_guard & (1u << 1)) != 0);
}
B_ldg_ptr += n * first_k_tile * sizeof(float);
ldgsts_commit();
__syncthreads();
k_tiles += 1;
// k_tiles loop
for (; k_tiles > 0; --k_tiles) {
#pragma unroll
for (int k_frag = 0; k_frag < 8; ++k_frag) {
lds128(A_frag[0][0], A_frag[0][1], A_frag[0][2], A_frag[0][3], A_lds_addr + k_frag * 132 * sizeof(float));
lds128(A_frag[0][4], A_frag[0][5], A_frag[0][6], A_frag[0][7],
A_lds_addr + (k_frag * 132 + 16) * sizeof(float));
lds128(A_frag[0][8], A_frag[0][9], A_frag[0][10], A_frag[0][11],
A_lds_addr + (k_frag * 132 + 32) * sizeof(float));
lds128(A_frag[0][12], A_frag[0][13], A_frag[0][14], A_frag[0][15],
A_lds_addr + (k_frag * 132 + 48) * sizeof(float));
lds128(B_frag[0][0], B_frag[0][1], B_frag[0][2], B_frag[0][3], B_lds_addr + k_frag * 256 * sizeof(float));
lds128(B_frag[0][4], B_frag[0][5], B_frag[0][6], B_frag[0][7],
B_lds_addr + (k_frag * 256 + 32) * sizeof(float));
// FFMA loop
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] += A_frag[0][i] * B_frag[0][j];
}
}
}
if (k_tiles > 1) {
// load next A&B tile
#pragma unroll
for (int k_frag = 0; k_frag < 4; ++k_frag) {
ldgsts32(A_sts_addr + k_frag * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[k_frag],
(A_ldg_guard & (1u << k_frag)) != 0);
ldgsts32(B_sts_addr + k_frag * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[k_frag],
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (k_frag * 2 * 256 + 128) * sizeof(float),
B_ldg_ptr + B_ldg_offset[k_frag] + 128 * sizeof(float), (B_ldg_guard & (1u << 1)) != 0);
}
// ldg pointer for next tile
A_ldg_ptr += 8 * sizeof(float);
B_ldg_ptr += B_ldg_step;
ldgsts_commit();
__syncthreads();
}
}
// C_tile write back, reuse A&B tile shared memory buffer
uint32_t C_sts_addr = smem_u32addr((float4*)(smem + warp_id * 4096) + mma_tid_y * 4 * 9 + mma_tid_x);
const float* C_lds_ptr = (float*)(smem + warp_id * 4096) + lane_id;
uint32_t m_idx = blockIdx.y * 128 + warp_id / 4 * 64;
uint32_t n_idx = blockIdx.x * 256 + warp_id % 4 * 64 + lane_id;
float* C_stg_ptr = C + m_idx * n + n_idx;
if (m_idx >= m) {
return;
} else if (m_idx + 64 <= m) {
uint32_t n_guard = n < n_idx ? 0 : n - n_idx;
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
__syncthreads();
#pragma unroll
for (int p = 0; p < 4; ++p) {
sts128(C_frag[i * 4 + p][j * 4], C_frag[i * 4 + p][j * 4 + 1], C_frag[i * 4 + p][j * 4 + 2],
C_frag[i * 4 + p][j * 4 + 3], C_sts_addr + p * 9 * sizeof(float4));
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 16; ++p) {
stg32(C_lds_ptr[p * 36], C_stg_ptr + (i * 16 + p) * n + j * 32, j * 32 < n_guard);
}
}
}
} else {
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
StgFrag stg_frag(C_frag, j, i);
C_tile_wb(stg_frag, C_stg_ptr + i * 16 * n + j * 32, C_lds_ptr, C_sts_addr, m, n, m_idx + i * 16,
n_idx + j * 32);
}
}
}
// }
}
__global__ __launch_bounds__(256) void ampere_sgemm_my_opt_128x256x8_kernel_sm_pingpong(const float* A, const float* B,
float* C, size_t m, size_t n, size_t k, size_t B_ldg_step,
int cycle_count) { // n * sizeof(float) * 8
// for (int i = 0; i < cycle_count; ++i) {
__shared__ __align__(16 * 1024) char smem[32 * 1024];
float* A_smem = reinterpret_cast<float*>(smem);
float* B_smem = reinterpret_cast<float*>(smem + 16 * 1024);
// A, B and C register fragment
float A_frag[1][16];
float B_frag[1][8];
float C_frag[16][8];
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] = 0;
}
}
const uint32_t lane_id = threadIdx.x % 32;
const uint32_t warp_id = threadIdx.x / 32;
// 4x8 threads each warp for FFMA
const uint32_t mma_tid_x = (lane_id / 2) % 8;
const uint32_t mma_tid_y = (lane_id / 16) * 2 + (lane_id % 2);
// A_tile & B_tile ldg pointer
const char* A_ldg_ptr = (const char*)(A + (blockIdx.y * 128 + threadIdx.x / 8) * k + threadIdx.x % 8);
const char* B_ldg_ptr = (const char*)(B + (threadIdx.x / 128) * n + blockIdx.x * 256 + threadIdx.x % 128);
// A_ldg_offset
uint32_t A_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
A_ldg_offset[i] = i * 32 * k * sizeof(float);
}
// B_ldg_offset
uint32_t B_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
B_ldg_offset[i] = i * 2 * n * sizeof(float);
}
// A_tile & B_tile sts/lds pointer
// using uint32_t pointer for faster double buffer switch
uint32_t A_sts_addr = smem_u32addr(A_smem + (threadIdx.x % 8) * 132 + (threadIdx.x / 8));
uint32_t B_sts_addr = smem_u32addr(B_smem + (threadIdx.x / 128) * 256 + (threadIdx.x % 128));
uint32_t A_lds_addr = smem_u32addr(A_smem + (warp_id / 4) * 64 + mma_tid_y * 4);
uint32_t B_lds_addr = smem_u32addr(B_smem + (warp_id % 4) * 64 + mma_tid_x * 4);
// ldg_guard to avoid LDG out of bound
uint32_t A_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 4; ++i) {
int m_idx = blockIdx.y * 128 + threadIdx.x / 8 + i * 32;
if (m_idx < m) {
A_ldg_guard |= (1u << i);
}
}
uint32_t B_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 2; ++i) {
int n_idx = blockIdx.x * 256 + threadIdx.x % 128 + i * 128;
if (n_idx < n) {
B_ldg_guard |= (1u << i);
}
}
// 1'st A&B tile loaded before the k_tile loop
uint32_t k_tiles = (k + 7) / 8 - 1;
// load 1'st tile to shared memory
uint32_t first_k_tile = k - k_tiles * 8;
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = threadIdx.x % 8 < first_k_tile ? 4 : 0;
ldgsts32(A_sts_addr + i * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[i], src_size,
(A_ldg_guard & (1u << i)) != 0);
}
A_ldg_ptr += first_k_tile * sizeof(float);
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = i * 2 + threadIdx.x / 128 < first_k_tile ? 4 : 0;
ldgsts32(B_sts_addr + i * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[i], src_size,
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (i * 2 * 256 + 128) * sizeof(float), B_ldg_ptr + B_ldg_offset[i] + 128 * sizeof(float),
src_size, (B_ldg_guard & (1u << 1)) != 0);
}
B_ldg_ptr += n * first_k_tile * sizeof(float);
ldgsts_commit();
__syncthreads();
// switch double buffer
A_sts_addr ^= 0x2000;
B_sts_addr ^= 0x2000;
k_tiles += 1;
// k_tiles loop
for (; k_tiles > 0; --k_tiles) {
#pragma unroll
for (int k_frag = 0; k_frag < 8; ++k_frag) {
lds128(A_frag[0][0], A_frag[0][1], A_frag[0][2], A_frag[0][3], A_lds_addr + k_frag * 132 * sizeof(float));
lds128(A_frag[0][4], A_frag[0][5], A_frag[0][6], A_frag[0][7],
A_lds_addr + (k_frag * 132 + 16) * sizeof(float));
lds128(A_frag[0][8], A_frag[0][9], A_frag[0][10], A_frag[0][11],
A_lds_addr + (k_frag * 132 + 32) * sizeof(float));
lds128(A_frag[0][12], A_frag[0][13], A_frag[0][14], A_frag[0][15],
A_lds_addr + (k_frag * 132 + 48) * sizeof(float));
lds128(B_frag[0][0], B_frag[0][1], B_frag[0][2], B_frag[0][3], B_lds_addr + k_frag * 256 * sizeof(float));
lds128(B_frag[0][4], B_frag[0][5], B_frag[0][6], B_frag[0][7],
B_lds_addr + (k_frag * 256 + 32) * sizeof(float));
// load next A&B tile
if (k_tiles > 1 && k_frag < 4) {
ldgsts32(A_sts_addr + k_frag * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[k_frag],
(A_ldg_guard & (1u << k_frag)) != 0);
ldgsts32(B_sts_addr + k_frag * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[k_frag],
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (k_frag * 2 * 256 + 128) * sizeof(float),
B_ldg_ptr + B_ldg_offset[k_frag] + 128 * sizeof(float), (B_ldg_guard & (1u << 1)) != 0);
}
// FFMA loop
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] += A_frag[0][i] * B_frag[0][j];
}
}
if (k_frag == 7) {
// switch double buffer
A_lds_addr ^= 0x2000;
B_lds_addr ^= 0x2000;
A_sts_addr ^= 0x2000;
B_sts_addr ^= 0x2000;
// ldg pointer for next tile
A_ldg_ptr += 8 * sizeof(float);
B_ldg_ptr += B_ldg_step;
ldgsts_commit();
__syncthreads();
}
}
}
// C_tile write back, reuse A&B tile shared memory buffer
uint32_t C_sts_addr = smem_u32addr((float4*)(smem + warp_id * 4096) + mma_tid_y * 4 * 9 + mma_tid_x);
const float* C_lds_ptr = (float*)(smem + warp_id * 4096) + lane_id;
uint32_t m_idx = blockIdx.y * 128 + warp_id / 4 * 64;
uint32_t n_idx = blockIdx.x * 256 + warp_id % 4 * 64 + lane_id;
float* C_stg_ptr = C + m_idx * n + n_idx;
if (m_idx >= m) {
return;
} else if (m_idx + 64 <= m) {
uint32_t n_guard = n < n_idx ? 0 : n - n_idx;
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
__syncthreads();
#pragma unroll
for (int p = 0; p < 4; ++p) {
sts128(C_frag[i * 4 + p][j * 4], C_frag[i * 4 + p][j * 4 + 1], C_frag[i * 4 + p][j * 4 + 2],
C_frag[i * 4 + p][j * 4 + 3], C_sts_addr + p * 9 * sizeof(float4));
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 16; ++p) {
stg32(C_lds_ptr[p * 36], C_stg_ptr + (i * 16 + p) * n + j * 32, j * 32 < n_guard);
}
}
}
} else {
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
StgFrag stg_frag(C_frag, j, i);
C_tile_wb(stg_frag, C_stg_ptr + i * 16 * n + j * 32, C_lds_ptr, C_sts_addr, m, n, m_idx + i * 16,
n_idx + j * 32);
}
}
}
// }
}
__global__ __launch_bounds__(256) void ampere_sgemm_my_opt_128x256x8_kernel_sm_reg_pingpong(const float* A,
const float* B, float* C, size_t m, size_t n, size_t k, size_t B_ldg_step,
int cycle_count) { // n * sizeof(float) * 8
// for (int i = 0; i < cycle_count; ++i) {
__shared__ __align__(16 * 1024) char smem[32 * 1024];
float* A_smem = reinterpret_cast<float*>(smem);
float* B_smem = reinterpret_cast<float*>(smem + 16 * 1024);
// A, B and C register fragment
float A_frag[2][16];
float B_frag[2][8];
float C_frag[16][8];
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] = 0;
}
}
const uint32_t lane_id = threadIdx.x % 32;
const uint32_t warp_id = threadIdx.x / 32;
// 4x8 threads each warp for FFMA
const uint32_t mma_tid_x = (lane_id / 2) % 8;
const uint32_t mma_tid_y = (lane_id / 16) * 2 + (lane_id % 2);
// A_tile & B_tile ldg pointer
const char* A_ldg_ptr = (const char*)(A + (blockIdx.y * 128 + threadIdx.x / 8) * k + threadIdx.x % 8);
const char* B_ldg_ptr = (const char*)(B + (threadIdx.x / 128) * n + blockIdx.x * 256 + threadIdx.x % 128);
// A_ldg_offset
uint32_t A_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
A_ldg_offset[i] = i * 32 * k * sizeof(float);
}
// B_ldg_offset
uint32_t B_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
B_ldg_offset[i] = i * 2 * n * sizeof(float);
}
// A_tile & B_tile sts/lds pointer
// using uint32_t pointer for faster double buffer switch
uint32_t A_sts_addr = smem_u32addr(A_smem + (threadIdx.x % 8) * 132 + (threadIdx.x / 8));
uint32_t B_sts_addr = smem_u32addr(B_smem + (threadIdx.x / 128) * 256 + (threadIdx.x % 128));
uint32_t A_lds_addr = smem_u32addr(A_smem + (warp_id / 4) * 64 + mma_tid_y * 4);
uint32_t B_lds_addr = smem_u32addr(B_smem + (warp_id % 4) * 64 + mma_tid_x * 4);
// ldg_guard to avoid LDG out of bound
uint32_t A_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 4; ++i) {
int m_idx = blockIdx.y * 128 + threadIdx.x / 8 + i * 32;
if (m_idx < m) {
A_ldg_guard |= (1u << i);
}
}
uint32_t B_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 2; ++i) {
int n_idx = blockIdx.x * 256 + threadIdx.x % 128 + i * 128;
if (n_idx < n) {
B_ldg_guard |= (1u << i);
}
}
// 1'st A&B tile loaded before the k_tile loop
uint32_t k_tiles = (k + 7) / 8 - 1;
// load 1'st tile to shared memory
uint32_t first_k_tile = k - k_tiles * 8;
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = threadIdx.x % 8 < first_k_tile ? 4 : 0;
ldgsts32(A_sts_addr + i * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[i], src_size,
(A_ldg_guard & (1u << i)) != 0);
}
A_ldg_ptr += first_k_tile * sizeof(float);
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = i * 2 + threadIdx.x / 128 < first_k_tile ? 4 : 0;
ldgsts32(B_sts_addr + i * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[i], src_size,
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (i * 2 * 256 + 128) * sizeof(float), B_ldg_ptr + B_ldg_offset[i] + 128 * sizeof(float),
src_size, (B_ldg_guard & (1u << 1)) != 0);
}
B_ldg_ptr += n * first_k_tile * sizeof(float);
ldgsts_commit();
__syncthreads();
// switch double buffer
A_sts_addr ^= 0x2000;
B_sts_addr ^= 0x2000;
// load 1'st fragment
lds128(A_frag[0][0], A_frag[0][1], A_frag[0][2], A_frag[0][3], A_lds_addr);
lds128(A_frag[0][4], A_frag[0][5], A_frag[0][6], A_frag[0][7], A_lds_addr + 16 * sizeof(float));
lds128(A_frag[0][8], A_frag[0][9], A_frag[0][10], A_frag[0][11], A_lds_addr + 32 * sizeof(float));
lds128(A_frag[0][12], A_frag[0][13], A_frag[0][14], A_frag[0][15], A_lds_addr + 48 * sizeof(float));
lds128(B_frag[0][0], B_frag[0][1], B_frag[0][2], B_frag[0][3], B_lds_addr);
lds128(B_frag[0][4], B_frag[0][5], B_frag[0][6], B_frag[0][7], B_lds_addr + 32 * sizeof(float));
// k_tiles += 1;
// k_tiles loop
for (; k_tiles > 0; --k_tiles) {
#pragma unroll
for (int k_frag = 0; k_frag < 8; ++k_frag) {
if (k_frag == 7) {
// switch double buffer
A_lds_addr ^= 0x2000;
B_lds_addr ^= 0x2000;
A_sts_addr ^= 0x2000;
B_sts_addr ^= 0x2000;
// ldg pointer for next tile
A_ldg_ptr += 8 * sizeof(float);
B_ldg_ptr += B_ldg_step;
ldgsts_commit();
__syncthreads();
}
// load next A&B fragment from shared memory to register
lds128(A_frag[(k_frag + 1) % 2][0], A_frag[(k_frag + 1) % 2][1], A_frag[(k_frag + 1) % 2][2],
A_frag[(k_frag + 1) % 2][3], A_lds_addr + (k_frag + 1) % 8 * 132 * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][4], A_frag[(k_frag + 1) % 2][5], A_frag[(k_frag + 1) % 2][6],
A_frag[(k_frag + 1) % 2][7], A_lds_addr + ((k_frag + 1) % 8 * 132 + 16) * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][8], A_frag[(k_frag + 1) % 2][9], A_frag[(k_frag + 1) % 2][10],
A_frag[(k_frag + 1) % 2][11], A_lds_addr + ((k_frag + 1) % 8 * 132 + 32) * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][12], A_frag[(k_frag + 1) % 2][13], A_frag[(k_frag + 1) % 2][14],
A_frag[(k_frag + 1) % 2][15], A_lds_addr + ((k_frag + 1) % 8 * 132 + 48) * sizeof(float));
lds128(B_frag[(k_frag + 1) % 2][0], B_frag[(k_frag + 1) % 2][1], B_frag[(k_frag + 1) % 2][2],
B_frag[(k_frag + 1) % 2][3], B_lds_addr + (k_frag + 1) % 8 * 256 * sizeof(float));
lds128(B_frag[(k_frag + 1) % 2][4], B_frag[(k_frag + 1) % 2][5], B_frag[(k_frag + 1) % 2][6],
B_frag[(k_frag + 1) % 2][7], B_lds_addr + ((k_frag + 1) % 8 * 256 + 32) * sizeof(float));
// load next A&B tile
// if (k_tiles > 1 && k_frag < 4) {
if (k_frag < 4) {
ldgsts32(A_sts_addr + k_frag * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[k_frag],
(A_ldg_guard & (1u << k_frag)) != 0);
ldgsts32(B_sts_addr + k_frag * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[k_frag],
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (k_frag * 2 * 256 + 128) * sizeof(float),
B_ldg_ptr + B_ldg_offset[k_frag] + 128 * sizeof(float), (B_ldg_guard & (1u << 1)) != 0);
}
// FFMA loop
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] += A_frag[k_frag % 2][i] * B_frag[k_frag % 2][j];
}
}
}
}
// FFMA for the last tile
#pragma unroll
for (int k_frag = 0; k_frag < 8; ++k_frag) {
if (k_frag < 7) {
// load next A&B fragment from shared memory to register
lds128(A_frag[(k_frag + 1) % 2][0], A_frag[(k_frag + 1) % 2][1], A_frag[(k_frag + 1) % 2][2],
A_frag[(k_frag + 1) % 2][3], A_lds_addr + (k_frag + 1) % 8 * 132 * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][4], A_frag[(k_frag + 1) % 2][5], A_frag[(k_frag + 1) % 2][6],
A_frag[(k_frag + 1) % 2][7], A_lds_addr + ((k_frag + 1) % 8 * 132 + 16) * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][8], A_frag[(k_frag + 1) % 2][9], A_frag[(k_frag + 1) % 2][10],
A_frag[(k_frag + 1) % 2][11], A_lds_addr + ((k_frag + 1) % 8 * 132 + 32) * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][12], A_frag[(k_frag + 1) % 2][13], A_frag[(k_frag + 1) % 2][14],
A_frag[(k_frag + 1) % 2][15], A_lds_addr + ((k_frag + 1) % 8 * 132 + 48) * sizeof(float));
lds128(B_frag[(k_frag + 1) % 2][0], B_frag[(k_frag + 1) % 2][1], B_frag[(k_frag + 1) % 2][2],
B_frag[(k_frag + 1) % 2][3], B_lds_addr + (k_frag + 1) % 8 * 256 * sizeof(float));
lds128(B_frag[(k_frag + 1) % 2][4], B_frag[(k_frag + 1) % 2][5], B_frag[(k_frag + 1) % 2][6],
B_frag[(k_frag + 1) % 2][7], B_lds_addr + ((k_frag + 1) % 8 * 256 + 32) * sizeof(float));
}
// FFMA loop
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] += A_frag[k_frag % 2][i] * B_frag[k_frag % 2][j];
}
}
}
// C_tile write back, reuse A&B tile shared memory buffer
uint32_t C_sts_addr = smem_u32addr((float4*)(smem + warp_id * 4096) + mma_tid_y * 4 * 9 + mma_tid_x);
const float* C_lds_ptr = (float*)(smem + warp_id * 4096) + lane_id;
uint32_t m_idx = blockIdx.y * 128 + warp_id / 4 * 64;
uint32_t n_idx = blockIdx.x * 256 + warp_id % 4 * 64 + lane_id;
float* C_stg_ptr = C + m_idx * n + n_idx;
if (m_idx >= m) {
return;
} else if (m_idx + 64 <= m) {
uint32_t n_guard = n < n_idx ? 0 : n - n_idx;
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
__syncthreads();
#pragma unroll
for (int p = 0; p < 4; ++p) {
sts128(C_frag[i * 4 + p][j * 4], C_frag[i * 4 + p][j * 4 + 1], C_frag[i * 4 + p][j * 4 + 2],
C_frag[i * 4 + p][j * 4 + 3], C_sts_addr + p * 9 * sizeof(float4));
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 16; ++p) {
stg32(C_lds_ptr[p * 36], C_stg_ptr + (i * 16 + p) * n + j * 32, j * 32 < n_guard);
}
}
}
} else {
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
StgFrag stg_frag(C_frag, j, i);
C_tile_wb(stg_frag, C_stg_ptr + i * 16 * n + j * 32, C_lds_ptr, C_sts_addr, m, n, m_idx + i * 16,
n_idx + j * 32);
}
}
}
// }
}
|
f06cbe1730709bbda2b15770dc75dda35b3e1935.cu
|
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
static bool check(const float* A, const float* B, const float* C, int m, int n, int k) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
float sum = 0.f;
for (int p = 0; p < k; ++p) {
sum += A[i * k + p] * B[j + p * n];
}
if (std::fabs(sum - C[i * n + j]) / std::fabs(sum) > 1e-5f) {
printf("C[%d][%d] not match, %f vs %f\n", i, j, sum, C[i * n + j]);
return false;
}
}
}
return true;
}
__device__ __forceinline__ uint32_t smem_u32addr(const void* smem_ptr) {
uint32_t addr;
asm("{.reg .u64 u64addr;\n"
" cvta.to.shared.u64 u64addr, %1;\n"
" cvt.u32.u64 %0, u64addr;}\n"
: "=r"(addr)
: "l"(smem_ptr));
return addr;
}
__device__ __forceinline__ void ldgsts32(const uint32_t& smem_addr, const void* gmem_ptr, bool guard) {
asm volatile("{.reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
#if __CUDACC_VER_MAJOR__ >= 11 && __CUDACC_VER_MINOR__ >= 4
" @p cp.async.ca.shared.global.L2::128B [%0], [%1], 4;}\n"
#else
" @p cp.async.ca.shared.global [%0], [%1], 4;}\n"
#endif
:
: "r"(smem_addr), "l"(gmem_ptr), "r"((int)guard));
}
__device__ __forceinline__ void ldgsts32(
const uint32_t& smem_addr, const void* gmem_ptr, const uint32_t& src_size, bool guard) {
asm volatile("{.reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
#if __CUDACC_VER_MAJOR__ >= 11 && __CUDACC_VER_MINOR__ >= 4
" @p cp.async.ca.shared.global.L2::128B [%0], [%1], 4, %2;}\n"
#else
" @p cp.async.ca.shared.global [%0], [%1], 4, %2;}\n"
#endif
:
: "r"(smem_addr), "l"(gmem_ptr), "r"(src_size), "r"((int)guard));
}
__device__ __forceinline__ void ldgsts_commit() { asm volatile("cp.async.wait_all;\n" ::); }
__device__ __forceinline__ void stg32(const float& reg, void* ptr, bool guard) {
asm volatile("{.reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.f32 [%0], %1;}\n"
:
: "l"(ptr), "f"(reg), "r"((int)guard));
}
__device__ __forceinline__ void lds128(float& reg0, float& reg1, float& reg2, float& reg3, const uint32_t& addr) {
asm volatile("ld.shared.v4.f32 {%0, %1, %2, %3}, [%4];\n"
: "=f"(reg0), "=f"(reg1), "=f"(reg2), "=f"(reg3)
: "r"(addr));
}
__device__ __forceinline__ void sts32(const float& reg, const uint32_t& addr) {
asm volatile("st.shared.f32 [%0], %1;\n" : : "r"(addr), "f"(reg));
}
__device__ __forceinline__ void sts128(
const float& reg0, const float& reg1, const float& reg2, const float& reg3, const uint32_t& addr) {
asm volatile("st.shared.v4.f32 [%0], {%1, %2, %3, %4};\n"
:
: "r"(addr), "f"(reg0), "f"(reg1), "f"(reg2), "f"(reg3));
}
struct StgFrag {
float data[4][4];
__device__ __forceinline__ StgFrag(const float (&C_frag)[16][8], int tile_x, int tile_y) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 4; ++j) {
data[i][j] = C_frag[tile_y * 4 + i][tile_x * 4 + j];
}
}
}
};
static __device__ __noinline__ void C_tile_wb(StgFrag C_frag, float* C_stg_ptr, const float* C_lds_ptr,
uint32_t C_sts_addr, uint32_t m, uint32_t n, uint32_t m_idx, uint32_t n_idx) {
__syncthreads();
#pragma unroll
for (int i = 0; i < 4; ++i) {
sts128(C_frag.data[i][0], C_frag.data[i][1], C_frag.data[i][2], C_frag.data[i][3],
C_sts_addr + i * 9 * sizeof(float4));
}
__syncthreads();
uint32_t m_guard = m < m_idx ? 0 : m - m_idx;
#pragma unroll
for (int i = 0; i < 16; ++i) {
stg32(C_lds_ptr[i * 36], C_stg_ptr + i * n, i < m_guard && n_idx < n);
}
}
/*
* matrix A, B and C: row-major
*
* mma block:
* thread block tile: m128n256k8
* warp tile: m64n64k8
* thread tile: m16n8k8
* thread fragment:
* matrixA: 16x1 FP32
* matrixB: 1x8 FP32
*
* ----------------------------------------------------------------
* thread block tile map:
*
* 256
* --|---------------------------------------|
* B_tile 8| |
* --|---------------------------------------|
*
* A_tile | 8 | | 64 |
* --|---| --|---------|---------|---------|---------|
* | | | | | | |
* | | 64| warp0 | warp1 | warp2 | warp3 |
* | | | | | | |
* 128| | --|---------|---------|---------|---------|
* | | | | | | |
* | | | warp4 | warp5 | warp6 | warp7 |
* | | | | | | |
* --|---| |---------|---------|---------|---------|
*
* ----------------------------------------------------------------
* warp tile map:
*
* 'z' thread map to avoid LDS.128 shared memory broadcast limitation.
*
* | 32 ||
* B_frag --|---|---|---|---|---|---|---|---||---|---|---|---|---|---|---|---|
* 1|///| | | | | | | ||///| | | | | | | |
* --|---|---|---|---|---|---|---|---||---|---|---|---|---|---|---|---|
* A_frag | 4 | ||
* | 1 | ||
* --|---|-- |---|---|---|---|---|---|---|---||---|---------------------------|
* |///|4 |t0 |t2 |t4 |t6 |t8 |t10|t12|t14||t0 | |
* |---|-- |---|---|---|---|---|---|---|---||---| |
* | | |t1 |t3 |t5 |t7 |t9 |t11|t13|t15|| |
* 16|---| |---|---|---|---|---|---|---|---|| |
* | | |t16|t18|t20|t22|t24|t26|t28|t30|| |
* |---| |---|---|---|---|---|---|---|---|| |
* | | |t17|t19|t21|t23|t25|t27|t29|t31|| |
* ==|===|=====|===|===|===|===|===|===|===|===||===|============================
* |///| |t0 | ||t0 | |
* |---| |---| ||---| |
* | | | || |
* |---| | || |
* | | | || |
* |---| | || |
* | | | || |
* |---| |-------------------------------||-------------------------------|
* |///| |t0 | ||t0 | |
* |---| |---| ||---| |
* | | | || |
* |---| | || |
* | | | || |
* |---| | || |
* | | | || |
* |---| |-------------------------------||-------------------------------|
* |///| |t0 | ||t0 | |
* |---| |---| ||---| |
* | | | || |
* |---| | || |
* | | | || |
* |---| | || |
* | | | || |
* |---| |-------------------------------||-------------------------------|
*
*/
__global__ __launch_bounds__(256) void ampere_sgemm_my_opt_128x256x8_kernel_no_pingpong(const float* A, const float* B,
float* C, size_t m, size_t n, size_t k, size_t B_ldg_step,
int cycle_count) { // n * sizeof(float) * 8
/*
* matrix A & B thread block tile shared memory (double buffer)
* matrix A: 132 * 8 * 4Byte/item * double buffer = 4.125KB * 2
* matrix B: 256 * 8 * 4Byte/item * double buffer = 16KB
*
* for double buffer faster switch, A_smem requires 8KB * 2 shared memory
* and 16KB aligned, B_smem should be 16KB aligned, then the double buffer
* can be switched by only 1 xor instruction:
* (uint32_t &)A_smem ^= 0x2000;
* (uint32_t &)B_smem ^= 0x2000;
*/
// for (int i = 0; i < cycle_count; ++i) {
__shared__ __align__(16 * 1024) char smem[32 * 1024];
float* A_smem = reinterpret_cast<float*>(smem);
float* B_smem = reinterpret_cast<float*>(smem + 16 * 1024);
// A, B and C register fragment
float A_frag[1][16];
float B_frag[1][8];
float C_frag[16][8];
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] = 0;
}
}
const uint32_t lane_id = threadIdx.x % 32;
const uint32_t warp_id = threadIdx.x / 32;
// 4x8 threads each warp for FFMA
const uint32_t mma_tid_x = (lane_id / 2) % 8;
const uint32_t mma_tid_y = (lane_id / 16) * 2 + (lane_id % 2);
// A_tile & B_tile ldg pointer
const char* A_ldg_ptr = (const char*)(A + (blockIdx.y * 128 + threadIdx.x / 8) * k + threadIdx.x % 8);
const char* B_ldg_ptr = (const char*)(B + (threadIdx.x / 128) * n + blockIdx.x * 256 + threadIdx.x % 128);
// A_ldg_offset
uint32_t A_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
A_ldg_offset[i] = i * 32 * k * sizeof(float);
}
// B_ldg_offset
uint32_t B_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
B_ldg_offset[i] = i * 2 * n * sizeof(float);
}
// A_tile & B_tile sts/lds pointer
// using uint32_t pointer for faster double buffer switch
uint32_t A_sts_addr = smem_u32addr(A_smem + (threadIdx.x % 8) * 132 + (threadIdx.x / 8));
uint32_t B_sts_addr = smem_u32addr(B_smem + (threadIdx.x / 128) * 256 + (threadIdx.x % 128));
uint32_t A_lds_addr = smem_u32addr(A_smem + (warp_id / 4) * 64 + mma_tid_y * 4);
uint32_t B_lds_addr = smem_u32addr(B_smem + (warp_id % 4) * 64 + mma_tid_x * 4);
// ldg_guard to avoid LDG out of bound
uint32_t A_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 4; ++i) {
int m_idx = blockIdx.y * 128 + threadIdx.x / 8 + i * 32;
if (m_idx < m) {
A_ldg_guard |= (1u << i);
}
}
uint32_t B_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 2; ++i) {
int n_idx = blockIdx.x * 256 + threadIdx.x % 128 + i * 128;
if (n_idx < n) {
B_ldg_guard |= (1u << i);
}
}
// 1'st A&B tile loaded before the k_tile loop
uint32_t k_tiles = (k + 7) / 8 - 1;
// load 1'st tile to shared memory
uint32_t first_k_tile = k - k_tiles * 8;
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = threadIdx.x % 8 < first_k_tile ? 4 : 0;
ldgsts32(A_sts_addr + i * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[i], src_size,
(A_ldg_guard & (1u << i)) != 0);
}
A_ldg_ptr += first_k_tile * sizeof(float);
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = i * 2 + threadIdx.x / 128 < first_k_tile ? 4 : 0;
ldgsts32(B_sts_addr + i * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[i], src_size,
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (i * 2 * 256 + 128) * sizeof(float), B_ldg_ptr + B_ldg_offset[i] + 128 * sizeof(float),
src_size, (B_ldg_guard & (1u << 1)) != 0);
}
B_ldg_ptr += n * first_k_tile * sizeof(float);
ldgsts_commit();
__syncthreads();
k_tiles += 1;
// k_tiles loop
for (; k_tiles > 0; --k_tiles) {
#pragma unroll
for (int k_frag = 0; k_frag < 8; ++k_frag) {
lds128(A_frag[0][0], A_frag[0][1], A_frag[0][2], A_frag[0][3], A_lds_addr + k_frag * 132 * sizeof(float));
lds128(A_frag[0][4], A_frag[0][5], A_frag[0][6], A_frag[0][7],
A_lds_addr + (k_frag * 132 + 16) * sizeof(float));
lds128(A_frag[0][8], A_frag[0][9], A_frag[0][10], A_frag[0][11],
A_lds_addr + (k_frag * 132 + 32) * sizeof(float));
lds128(A_frag[0][12], A_frag[0][13], A_frag[0][14], A_frag[0][15],
A_lds_addr + (k_frag * 132 + 48) * sizeof(float));
lds128(B_frag[0][0], B_frag[0][1], B_frag[0][2], B_frag[0][3], B_lds_addr + k_frag * 256 * sizeof(float));
lds128(B_frag[0][4], B_frag[0][5], B_frag[0][6], B_frag[0][7],
B_lds_addr + (k_frag * 256 + 32) * sizeof(float));
// FFMA loop
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] += A_frag[0][i] * B_frag[0][j];
}
}
}
if (k_tiles > 1) {
// load next A&B tile
#pragma unroll
for (int k_frag = 0; k_frag < 4; ++k_frag) {
ldgsts32(A_sts_addr + k_frag * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[k_frag],
(A_ldg_guard & (1u << k_frag)) != 0);
ldgsts32(B_sts_addr + k_frag * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[k_frag],
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (k_frag * 2 * 256 + 128) * sizeof(float),
B_ldg_ptr + B_ldg_offset[k_frag] + 128 * sizeof(float), (B_ldg_guard & (1u << 1)) != 0);
}
// ldg pointer for next tile
A_ldg_ptr += 8 * sizeof(float);
B_ldg_ptr += B_ldg_step;
ldgsts_commit();
__syncthreads();
}
}
// C_tile write back, reuse A&B tile shared memory buffer
uint32_t C_sts_addr = smem_u32addr((float4*)(smem + warp_id * 4096) + mma_tid_y * 4 * 9 + mma_tid_x);
const float* C_lds_ptr = (float*)(smem + warp_id * 4096) + lane_id;
uint32_t m_idx = blockIdx.y * 128 + warp_id / 4 * 64;
uint32_t n_idx = blockIdx.x * 256 + warp_id % 4 * 64 + lane_id;
float* C_stg_ptr = C + m_idx * n + n_idx;
if (m_idx >= m) {
return;
} else if (m_idx + 64 <= m) {
uint32_t n_guard = n < n_idx ? 0 : n - n_idx;
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
__syncthreads();
#pragma unroll
for (int p = 0; p < 4; ++p) {
sts128(C_frag[i * 4 + p][j * 4], C_frag[i * 4 + p][j * 4 + 1], C_frag[i * 4 + p][j * 4 + 2],
C_frag[i * 4 + p][j * 4 + 3], C_sts_addr + p * 9 * sizeof(float4));
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 16; ++p) {
stg32(C_lds_ptr[p * 36], C_stg_ptr + (i * 16 + p) * n + j * 32, j * 32 < n_guard);
}
}
}
} else {
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
StgFrag stg_frag(C_frag, j, i);
C_tile_wb(stg_frag, C_stg_ptr + i * 16 * n + j * 32, C_lds_ptr, C_sts_addr, m, n, m_idx + i * 16,
n_idx + j * 32);
}
}
}
// }
}
__global__ __launch_bounds__(256) void ampere_sgemm_my_opt_128x256x8_kernel_sm_pingpong(const float* A, const float* B,
float* C, size_t m, size_t n, size_t k, size_t B_ldg_step,
int cycle_count) { // n * sizeof(float) * 8
// for (int i = 0; i < cycle_count; ++i) {
__shared__ __align__(16 * 1024) char smem[32 * 1024];
float* A_smem = reinterpret_cast<float*>(smem);
float* B_smem = reinterpret_cast<float*>(smem + 16 * 1024);
// A, B and C register fragment
float A_frag[1][16];
float B_frag[1][8];
float C_frag[16][8];
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] = 0;
}
}
const uint32_t lane_id = threadIdx.x % 32;
const uint32_t warp_id = threadIdx.x / 32;
// 4x8 threads each warp for FFMA
const uint32_t mma_tid_x = (lane_id / 2) % 8;
const uint32_t mma_tid_y = (lane_id / 16) * 2 + (lane_id % 2);
// A_tile & B_tile ldg pointer
const char* A_ldg_ptr = (const char*)(A + (blockIdx.y * 128 + threadIdx.x / 8) * k + threadIdx.x % 8);
const char* B_ldg_ptr = (const char*)(B + (threadIdx.x / 128) * n + blockIdx.x * 256 + threadIdx.x % 128);
// A_ldg_offset
uint32_t A_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
A_ldg_offset[i] = i * 32 * k * sizeof(float);
}
// B_ldg_offset
uint32_t B_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
B_ldg_offset[i] = i * 2 * n * sizeof(float);
}
// A_tile & B_tile sts/lds pointer
// using uint32_t pointer for faster double buffer switch
uint32_t A_sts_addr = smem_u32addr(A_smem + (threadIdx.x % 8) * 132 + (threadIdx.x / 8));
uint32_t B_sts_addr = smem_u32addr(B_smem + (threadIdx.x / 128) * 256 + (threadIdx.x % 128));
uint32_t A_lds_addr = smem_u32addr(A_smem + (warp_id / 4) * 64 + mma_tid_y * 4);
uint32_t B_lds_addr = smem_u32addr(B_smem + (warp_id % 4) * 64 + mma_tid_x * 4);
// ldg_guard to avoid LDG out of bound
uint32_t A_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 4; ++i) {
int m_idx = blockIdx.y * 128 + threadIdx.x / 8 + i * 32;
if (m_idx < m) {
A_ldg_guard |= (1u << i);
}
}
uint32_t B_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 2; ++i) {
int n_idx = blockIdx.x * 256 + threadIdx.x % 128 + i * 128;
if (n_idx < n) {
B_ldg_guard |= (1u << i);
}
}
// 1'st A&B tile loaded before the k_tile loop
uint32_t k_tiles = (k + 7) / 8 - 1;
// load 1'st tile to shared memory
uint32_t first_k_tile = k - k_tiles * 8;
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = threadIdx.x % 8 < first_k_tile ? 4 : 0;
ldgsts32(A_sts_addr + i * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[i], src_size,
(A_ldg_guard & (1u << i)) != 0);
}
A_ldg_ptr += first_k_tile * sizeof(float);
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = i * 2 + threadIdx.x / 128 < first_k_tile ? 4 : 0;
ldgsts32(B_sts_addr + i * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[i], src_size,
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (i * 2 * 256 + 128) * sizeof(float), B_ldg_ptr + B_ldg_offset[i] + 128 * sizeof(float),
src_size, (B_ldg_guard & (1u << 1)) != 0);
}
B_ldg_ptr += n * first_k_tile * sizeof(float);
ldgsts_commit();
__syncthreads();
// switch double buffer
A_sts_addr ^= 0x2000;
B_sts_addr ^= 0x2000;
k_tiles += 1;
// k_tiles loop
for (; k_tiles > 0; --k_tiles) {
#pragma unroll
for (int k_frag = 0; k_frag < 8; ++k_frag) {
lds128(A_frag[0][0], A_frag[0][1], A_frag[0][2], A_frag[0][3], A_lds_addr + k_frag * 132 * sizeof(float));
lds128(A_frag[0][4], A_frag[0][5], A_frag[0][6], A_frag[0][7],
A_lds_addr + (k_frag * 132 + 16) * sizeof(float));
lds128(A_frag[0][8], A_frag[0][9], A_frag[0][10], A_frag[0][11],
A_lds_addr + (k_frag * 132 + 32) * sizeof(float));
lds128(A_frag[0][12], A_frag[0][13], A_frag[0][14], A_frag[0][15],
A_lds_addr + (k_frag * 132 + 48) * sizeof(float));
lds128(B_frag[0][0], B_frag[0][1], B_frag[0][2], B_frag[0][3], B_lds_addr + k_frag * 256 * sizeof(float));
lds128(B_frag[0][4], B_frag[0][5], B_frag[0][6], B_frag[0][7],
B_lds_addr + (k_frag * 256 + 32) * sizeof(float));
// load next A&B tile
if (k_tiles > 1 && k_frag < 4) {
ldgsts32(A_sts_addr + k_frag * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[k_frag],
(A_ldg_guard & (1u << k_frag)) != 0);
ldgsts32(B_sts_addr + k_frag * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[k_frag],
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (k_frag * 2 * 256 + 128) * sizeof(float),
B_ldg_ptr + B_ldg_offset[k_frag] + 128 * sizeof(float), (B_ldg_guard & (1u << 1)) != 0);
}
// FFMA loop
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] += A_frag[0][i] * B_frag[0][j];
}
}
if (k_frag == 7) {
// switch double buffer
A_lds_addr ^= 0x2000;
B_lds_addr ^= 0x2000;
A_sts_addr ^= 0x2000;
B_sts_addr ^= 0x2000;
// ldg pointer for next tile
A_ldg_ptr += 8 * sizeof(float);
B_ldg_ptr += B_ldg_step;
ldgsts_commit();
__syncthreads();
}
}
}
// C_tile write back, reuse A&B tile shared memory buffer
uint32_t C_sts_addr = smem_u32addr((float4*)(smem + warp_id * 4096) + mma_tid_y * 4 * 9 + mma_tid_x);
const float* C_lds_ptr = (float*)(smem + warp_id * 4096) + lane_id;
uint32_t m_idx = blockIdx.y * 128 + warp_id / 4 * 64;
uint32_t n_idx = blockIdx.x * 256 + warp_id % 4 * 64 + lane_id;
float* C_stg_ptr = C + m_idx * n + n_idx;
if (m_idx >= m) {
return;
} else if (m_idx + 64 <= m) {
uint32_t n_guard = n < n_idx ? 0 : n - n_idx;
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
__syncthreads();
#pragma unroll
for (int p = 0; p < 4; ++p) {
sts128(C_frag[i * 4 + p][j * 4], C_frag[i * 4 + p][j * 4 + 1], C_frag[i * 4 + p][j * 4 + 2],
C_frag[i * 4 + p][j * 4 + 3], C_sts_addr + p * 9 * sizeof(float4));
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 16; ++p) {
stg32(C_lds_ptr[p * 36], C_stg_ptr + (i * 16 + p) * n + j * 32, j * 32 < n_guard);
}
}
}
} else {
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
StgFrag stg_frag(C_frag, j, i);
C_tile_wb(stg_frag, C_stg_ptr + i * 16 * n + j * 32, C_lds_ptr, C_sts_addr, m, n, m_idx + i * 16,
n_idx + j * 32);
}
}
}
// }
}
__global__ __launch_bounds__(256) void ampere_sgemm_my_opt_128x256x8_kernel_sm_reg_pingpong(const float* A,
const float* B, float* C, size_t m, size_t n, size_t k, size_t B_ldg_step,
int cycle_count) { // n * sizeof(float) * 8
// for (int i = 0; i < cycle_count; ++i) {
__shared__ __align__(16 * 1024) char smem[32 * 1024];
float* A_smem = reinterpret_cast<float*>(smem);
float* B_smem = reinterpret_cast<float*>(smem + 16 * 1024);
// A, B and C register fragment
float A_frag[2][16];
float B_frag[2][8];
float C_frag[16][8];
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] = 0;
}
}
const uint32_t lane_id = threadIdx.x % 32;
const uint32_t warp_id = threadIdx.x / 32;
// 4x8 threads each warp for FFMA
const uint32_t mma_tid_x = (lane_id / 2) % 8;
const uint32_t mma_tid_y = (lane_id / 16) * 2 + (lane_id % 2);
// A_tile & B_tile ldg pointer
const char* A_ldg_ptr = (const char*)(A + (blockIdx.y * 128 + threadIdx.x / 8) * k + threadIdx.x % 8);
const char* B_ldg_ptr = (const char*)(B + (threadIdx.x / 128) * n + blockIdx.x * 256 + threadIdx.x % 128);
// A_ldg_offset
uint32_t A_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
A_ldg_offset[i] = i * 32 * k * sizeof(float);
}
// B_ldg_offset
uint32_t B_ldg_offset[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
B_ldg_offset[i] = i * 2 * n * sizeof(float);
}
// A_tile & B_tile sts/lds pointer
// using uint32_t pointer for faster double buffer switch
uint32_t A_sts_addr = smem_u32addr(A_smem + (threadIdx.x % 8) * 132 + (threadIdx.x / 8));
uint32_t B_sts_addr = smem_u32addr(B_smem + (threadIdx.x / 128) * 256 + (threadIdx.x % 128));
uint32_t A_lds_addr = smem_u32addr(A_smem + (warp_id / 4) * 64 + mma_tid_y * 4);
uint32_t B_lds_addr = smem_u32addr(B_smem + (warp_id % 4) * 64 + mma_tid_x * 4);
// ldg_guard to avoid LDG out of bound
uint32_t A_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 4; ++i) {
int m_idx = blockIdx.y * 128 + threadIdx.x / 8 + i * 32;
if (m_idx < m) {
A_ldg_guard |= (1u << i);
}
}
uint32_t B_ldg_guard = 0;
#pragma unroll
for (int i = 0; i < 2; ++i) {
int n_idx = blockIdx.x * 256 + threadIdx.x % 128 + i * 128;
if (n_idx < n) {
B_ldg_guard |= (1u << i);
}
}
// 1'st A&B tile loaded before the k_tile loop
uint32_t k_tiles = (k + 7) / 8 - 1;
// load 1'st tile to shared memory
uint32_t first_k_tile = k - k_tiles * 8;
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = threadIdx.x % 8 < first_k_tile ? 4 : 0;
ldgsts32(A_sts_addr + i * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[i], src_size,
(A_ldg_guard & (1u << i)) != 0);
}
A_ldg_ptr += first_k_tile * sizeof(float);
#pragma unroll
for (int i = 0; i < 4; ++i) {
uint32_t src_size = i * 2 + threadIdx.x / 128 < first_k_tile ? 4 : 0;
ldgsts32(B_sts_addr + i * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[i], src_size,
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (i * 2 * 256 + 128) * sizeof(float), B_ldg_ptr + B_ldg_offset[i] + 128 * sizeof(float),
src_size, (B_ldg_guard & (1u << 1)) != 0);
}
B_ldg_ptr += n * first_k_tile * sizeof(float);
ldgsts_commit();
__syncthreads();
// switch double buffer
A_sts_addr ^= 0x2000;
B_sts_addr ^= 0x2000;
// load 1'st fragment
lds128(A_frag[0][0], A_frag[0][1], A_frag[0][2], A_frag[0][3], A_lds_addr);
lds128(A_frag[0][4], A_frag[0][5], A_frag[0][6], A_frag[0][7], A_lds_addr + 16 * sizeof(float));
lds128(A_frag[0][8], A_frag[0][9], A_frag[0][10], A_frag[0][11], A_lds_addr + 32 * sizeof(float));
lds128(A_frag[0][12], A_frag[0][13], A_frag[0][14], A_frag[0][15], A_lds_addr + 48 * sizeof(float));
lds128(B_frag[0][0], B_frag[0][1], B_frag[0][2], B_frag[0][3], B_lds_addr);
lds128(B_frag[0][4], B_frag[0][5], B_frag[0][6], B_frag[0][7], B_lds_addr + 32 * sizeof(float));
// k_tiles += 1;
// k_tiles loop
for (; k_tiles > 0; --k_tiles) {
#pragma unroll
for (int k_frag = 0; k_frag < 8; ++k_frag) {
if (k_frag == 7) {
// switch double buffer
A_lds_addr ^= 0x2000;
B_lds_addr ^= 0x2000;
A_sts_addr ^= 0x2000;
B_sts_addr ^= 0x2000;
// ldg pointer for next tile
A_ldg_ptr += 8 * sizeof(float);
B_ldg_ptr += B_ldg_step;
ldgsts_commit();
__syncthreads();
}
// load next A&B fragment from shared memory to register
lds128(A_frag[(k_frag + 1) % 2][0], A_frag[(k_frag + 1) % 2][1], A_frag[(k_frag + 1) % 2][2],
A_frag[(k_frag + 1) % 2][3], A_lds_addr + (k_frag + 1) % 8 * 132 * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][4], A_frag[(k_frag + 1) % 2][5], A_frag[(k_frag + 1) % 2][6],
A_frag[(k_frag + 1) % 2][7], A_lds_addr + ((k_frag + 1) % 8 * 132 + 16) * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][8], A_frag[(k_frag + 1) % 2][9], A_frag[(k_frag + 1) % 2][10],
A_frag[(k_frag + 1) % 2][11], A_lds_addr + ((k_frag + 1) % 8 * 132 + 32) * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][12], A_frag[(k_frag + 1) % 2][13], A_frag[(k_frag + 1) % 2][14],
A_frag[(k_frag + 1) % 2][15], A_lds_addr + ((k_frag + 1) % 8 * 132 + 48) * sizeof(float));
lds128(B_frag[(k_frag + 1) % 2][0], B_frag[(k_frag + 1) % 2][1], B_frag[(k_frag + 1) % 2][2],
B_frag[(k_frag + 1) % 2][3], B_lds_addr + (k_frag + 1) % 8 * 256 * sizeof(float));
lds128(B_frag[(k_frag + 1) % 2][4], B_frag[(k_frag + 1) % 2][5], B_frag[(k_frag + 1) % 2][6],
B_frag[(k_frag + 1) % 2][7], B_lds_addr + ((k_frag + 1) % 8 * 256 + 32) * sizeof(float));
// load next A&B tile
// if (k_tiles > 1 && k_frag < 4) {
if (k_frag < 4) {
ldgsts32(A_sts_addr + k_frag * 32 * sizeof(float), A_ldg_ptr + A_ldg_offset[k_frag],
(A_ldg_guard & (1u << k_frag)) != 0);
ldgsts32(B_sts_addr + k_frag * 2 * 256 * sizeof(float), B_ldg_ptr + B_ldg_offset[k_frag],
(B_ldg_guard & (1u << 0)) != 0);
ldgsts32(B_sts_addr + (k_frag * 2 * 256 + 128) * sizeof(float),
B_ldg_ptr + B_ldg_offset[k_frag] + 128 * sizeof(float), (B_ldg_guard & (1u << 1)) != 0);
}
// FFMA loop
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] += A_frag[k_frag % 2][i] * B_frag[k_frag % 2][j];
}
}
}
}
// FFMA for the last tile
#pragma unroll
for (int k_frag = 0; k_frag < 8; ++k_frag) {
if (k_frag < 7) {
// load next A&B fragment from shared memory to register
lds128(A_frag[(k_frag + 1) % 2][0], A_frag[(k_frag + 1) % 2][1], A_frag[(k_frag + 1) % 2][2],
A_frag[(k_frag + 1) % 2][3], A_lds_addr + (k_frag + 1) % 8 * 132 * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][4], A_frag[(k_frag + 1) % 2][5], A_frag[(k_frag + 1) % 2][6],
A_frag[(k_frag + 1) % 2][7], A_lds_addr + ((k_frag + 1) % 8 * 132 + 16) * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][8], A_frag[(k_frag + 1) % 2][9], A_frag[(k_frag + 1) % 2][10],
A_frag[(k_frag + 1) % 2][11], A_lds_addr + ((k_frag + 1) % 8 * 132 + 32) * sizeof(float));
lds128(A_frag[(k_frag + 1) % 2][12], A_frag[(k_frag + 1) % 2][13], A_frag[(k_frag + 1) % 2][14],
A_frag[(k_frag + 1) % 2][15], A_lds_addr + ((k_frag + 1) % 8 * 132 + 48) * sizeof(float));
lds128(B_frag[(k_frag + 1) % 2][0], B_frag[(k_frag + 1) % 2][1], B_frag[(k_frag + 1) % 2][2],
B_frag[(k_frag + 1) % 2][3], B_lds_addr + (k_frag + 1) % 8 * 256 * sizeof(float));
lds128(B_frag[(k_frag + 1) % 2][4], B_frag[(k_frag + 1) % 2][5], B_frag[(k_frag + 1) % 2][6],
B_frag[(k_frag + 1) % 2][7], B_lds_addr + ((k_frag + 1) % 8 * 256 + 32) * sizeof(float));
}
// FFMA loop
#pragma unroll
for (int i = 0; i < 16; ++i) {
#pragma unroll
for (int j = 0; j < 8; ++j) {
C_frag[i][j] += A_frag[k_frag % 2][i] * B_frag[k_frag % 2][j];
}
}
}
// C_tile write back, reuse A&B tile shared memory buffer
uint32_t C_sts_addr = smem_u32addr((float4*)(smem + warp_id * 4096) + mma_tid_y * 4 * 9 + mma_tid_x);
const float* C_lds_ptr = (float*)(smem + warp_id * 4096) + lane_id;
uint32_t m_idx = blockIdx.y * 128 + warp_id / 4 * 64;
uint32_t n_idx = blockIdx.x * 256 + warp_id % 4 * 64 + lane_id;
float* C_stg_ptr = C + m_idx * n + n_idx;
if (m_idx >= m) {
return;
} else if (m_idx + 64 <= m) {
uint32_t n_guard = n < n_idx ? 0 : n - n_idx;
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
__syncthreads();
#pragma unroll
for (int p = 0; p < 4; ++p) {
sts128(C_frag[i * 4 + p][j * 4], C_frag[i * 4 + p][j * 4 + 1], C_frag[i * 4 + p][j * 4 + 2],
C_frag[i * 4 + p][j * 4 + 3], C_sts_addr + p * 9 * sizeof(float4));
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 16; ++p) {
stg32(C_lds_ptr[p * 36], C_stg_ptr + (i * 16 + p) * n + j * 32, j * 32 < n_guard);
}
}
}
} else {
#pragma unroll
for (int i = 0; i < 4; ++i) {
#pragma unroll
for (int j = 0; j < 2; ++j) {
StgFrag stg_frag(C_frag, j, i);
C_tile_wb(stg_frag, C_stg_ptr + i * 16 * n + j * 32, C_lds_ptr, C_sts_addr, m, n, m_idx + i * 16,
n_idx + j * 32);
}
}
}
// }
}
|
31cb54d07b51892210e32ea0804864f950f8d53c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "fill_cols.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int M = 2;
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
float *V = NULL;
hipMalloc(&V, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
fill_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, N,M,X,V);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
fill_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, N,M,X,V);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
fill_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, N,M,X,V);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
31cb54d07b51892210e32ea0804864f950f8d53c.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "fill_cols.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int M = 2;
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
float *V = NULL;
cudaMalloc(&V, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
fill_cols<<<gridBlock,threadBlock>>>(N,M,X,V);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
fill_cols<<<gridBlock,threadBlock>>>(N,M,X,V);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
fill_cols<<<gridBlock,threadBlock>>>(N,M,X,V);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f2c3d0cc32367bd84711288cce8713539375a504.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <helper_cuda.h>
#include <cutil_math.h>
#include "geometry.h"
#include "linalg.h"
#define MAXTHREADS 512
#define L 4*0.015625f // cell lenght = 2*particle radius
#define L2 L*L // cell lenght squared
#define cells 33 // number of cells in one dimension
#define cells3 cells*cells*cells // number of cells in three dimensions
struct Particle{
float3 pos;
float3 vel;
float3 acc;
};
__device__
int cellIndex(float3 pos){
int i=(int)((pos.x+1.f)/L);
int j=(int)((pos.y+1.f)/L);
int k=(int)((pos.z+1.f)/L);
return (k*cells+j)*cells+i;
}
__device__
void boundaries(Particle &p, float3 pos, float3 vel){
// 2x2x2 box fixed boundaries
if(abs(pos.x)>1.f){
vel.x=-vel.x;
}
if(abs(pos.y)>1.f){
vel.y=-vel.y;
}
if(abs(pos.z)>1.f){
vel.z=-vel.z;
}
pos=clamp(pos, -1.f, 1.f);
p.pos=pos;
p.vel=vel;
}
__device__
void collision(Particle *d_gas, int a, int b){
float3 s=d_gas[a].pos-d_gas[b].pos;
float3 u=d_gas[a].vel-d_gas[b].vel;
float uu=dot(u, u);
float su=dot(s, u);
float ss=dot(s, s);
float t0=-(su+sqrtf(su*su-uu*(ss-L2)))/uu;
float3 r=s+u*t0; // |r|=L always
float3 du=r*(dot(u, r)/L2);
float3 ds=du*t0;
boundaries(d_gas[a], d_gas[a].pos+ds, d_gas[a].vel+du);
boundaries(d_gas[b], d_gas[b].pos-ds, d_gas[b].vel-du);
}
__global__
void indices(uint4* d_index, dim3 mesh){
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
int k=blockIdx.z*blockDim.z+threadIdx.z;
if(i<mesh.x && j<mesh.y && k<mesh.z){
int gid=(k*mesh.y+j)*mesh.x+i;
int ii=(i+1)%mesh.x;
int jj=(j+1)%mesh.y;
int a=(k*mesh.y+j)*mesh.x+i;
int b=(k*mesh.y+j)*mesh.x+ii;
int c=(k*mesh.y+jj)*mesh.x+ii;
int d=(k*mesh.y+jj)*mesh.x+i;
d_index[gid]=make_uint4(a, b, c, d);
}
}
__global__
void initialState(Particle* d_gas, dim3 mesh){
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
int k=blockIdx.z*blockDim.z+threadIdx.z;
if(i<mesh.x && j<mesh.y && k<mesh.z){
int gid=(k*mesh.y+j)*mesh.x+i;
float x=(float(2*i))/mesh.x-1.f;
float y=(float(2*j))/mesh.y-1.f;
float z=(float(2*k))/mesh.z-1.f;
d_gas[gid].pos={x, y, z};
d_gas[gid].vel={-x, -y, -z};
d_gas[gid].acc={0.f, 0.f, 0.f};
}
}
__global__
void updateGrid(Particle* d_gas, uint* d_gridCounters, uint* d_gridCells, int n){
int gid=blockIdx.x*blockDim.x+threadIdx.x;
if(gid<n){
// Update grid
int cid=cellIndex(d_gas[gid].pos);
int s=atomicInc(&(d_gridCounters[cid]), 1u);
if(s<4u){
d_gridCells[4*cid+s]=gid;
}
}
}
__global__
void neighbors(Particle* d_gas, uint* d_gridCounters, uint* d_gridCells){
int hx=blockIdx.x*blockDim.x+threadIdx.x;
int hy=blockIdx.y*blockDim.y+threadIdx.y;
int hz=blockIdx.z*blockDim.x+threadIdx.z;
if(hx<cells && hy<cells && hz<cells){
int hid=(hz*cells+hy)*cells+hx;
int hcount=d_gridCounters[hid];
if(hcount==0){
return;
}
int ncount=0;
int neighbors[128];
int nx, ny, nz;
for(int i=-1; i<=1; i++){
nx=hx+i;
for(int j=-1; j<=1; j++){
ny=hy+j;
for(int k=-1; k<=1; k++){
nz=hz+k;
if( (nx>=0 && nx<cells) &&
(ny>=0 && ny<cells) &&
(nz>=0 && nz<cells)){
int nid=(nz*cells+ny)*cells+nx;
int acount=d_gridCounters[nid];
for(int m=0; m<acount; m++){
neighbors[ncount++]=d_gridCells[4*nid+m];
}
}
}
}
}
int home, away;
for(int h=0; h<hcount; h++){
home=d_gridCells[4*hid+h];
float3 posh=d_gas[home].pos;
for(int a=0; a<ncount; a++){
away=neighbors[a];
if(home!=away){
// Check if particles are close enough
float3 posn=d_gas[away].pos;
float3 r=posh-posn;
float r2=dot(r,r);
if(r2<L2){
// Check if the barycenter belongs to home
float3 b=(posh+posn)/2.f;
if(cellIndex(b)==hid){
collision(d_gas, home, away);
}
}
}
}
}
}
}
__global__
void integrate(Particle* d_gas, float step, int n){
int gid=blockIdx.x*blockDim.x+threadIdx.x;
if(gid<n){
float3 vel=d_gas[gid].vel+d_gas[gid].acc*step;
float3 pos=d_gas[gid].pos+vel*step;
boundaries(d_gas[gid], pos, vel);
}
}
__global__
void updatePoints(float4 *d_pos, float4 *d_norm, uchar4 *d_color, Particle *d_gas, int n){
int gid=blockIdx.x*blockDim.x+threadIdx.x;
if(gid<n){
float3 pos=d_gas[gid].pos;
float3 vel=d_gas[gid].vel;
float3 N=normalize(vel);
d_pos[gid]=make_float4(pos.x, pos.y, pos.z, 1.f);
d_norm[gid]=make_float4(N.x, N.y, N.z, 0.f);
float x=N.x, y=N.y, z=N.z;
float x2=x*x, y2=y*y, z2=z*z;
float r=(x<0?0:x2)+(y<0?y2:0)+(z<0?z2:0);
float g=(x<0?x2:0)+(y<0?0:y2)+(z<0?z2:0);
float b=(x<0?x2:0)+(y<0?y2:0)+(z<0?0:z2);
d_color[gid].x=(unsigned char)(255.f*r)&0xff;
d_color[gid].y=(unsigned char)(255.f*g)&0xff;
d_color[gid].z=(unsigned char)(255.f*b)&0xff;
d_color[gid].w=255u;
}
}
inline uint ceil(uint num, uint den){
return (num+den-1u)/den;
}
void launch_kernel(float4 *d_pos, float4 *d_norm, uchar4 *d_color, uint4 *d_index, dim3 mesh, float time){
static const int n=mesh.x*mesh.y*mesh.z;
static const int bpg=ceil(cells, 8);
static const dim3 block1D(MAXTHREADS);
static const dim3 grid1D(ceil(n, MAXTHREADS));
static const dim3 block3D(8, 8, 8);
static const dim3 grid3D(bpg, bpg, bpg);
static Particle *d_gas=NULL;
static uint *d_gridCounters=NULL, *d_gridCells=NULL;
if(d_gas==NULL){
checkCudaErrors(hipMalloc((void**)&d_gas, n*sizeof(Particle)));
checkCudaErrors(hipMalloc((void**)&d_gridCounters, cells3*sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&d_gridCells, 4*cells3*sizeof(uint)));
dim3 tgrid(ceil(mesh.x, block3D.x), ceil(mesh.y, block3D.y), ceil(mesh.z, block3D.z));
hipLaunchKernelGGL(( initialState), dim3(tgrid), dim3(block3D), 0, 0, d_gas, mesh);
hipLaunchKernelGGL(( indices), dim3(tgrid), dim3(block3D), 0, 0, d_index, mesh);
}
checkCudaErrors(hipMemset(d_gridCounters, 0u, cells3*sizeof(uint)));
checkCudaErrors(hipMemset(d_gridCells, 0u, 4*cells3*sizeof(uint)));
static float step=0.001f;
hipLaunchKernelGGL(( integrate) , dim3(grid1D), dim3(block1D), 0, 0, d_gas, step, n);
hipLaunchKernelGGL(( updateGrid) , dim3(grid1D), dim3(block1D), 0, 0, d_gas, d_gridCounters, d_gridCells, n);
hipLaunchKernelGGL(( neighbors) , dim3(grid3D), dim3(block3D), 0, 0, d_gas, d_gridCounters, d_gridCells);
hipLaunchKernelGGL(( updatePoints), dim3(grid1D), dim3(block1D), 0, 0, d_pos, d_norm, d_color, d_gas, n);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
|
f2c3d0cc32367bd84711288cce8713539375a504.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <helper_cuda.h>
#include <cutil_math.h>
#include "geometry.h"
#include "linalg.h"
#define MAXTHREADS 512
#define L 4*0.015625f // cell lenght = 2*particle radius
#define L2 L*L // cell lenght squared
#define cells 33 // number of cells in one dimension
#define cells3 cells*cells*cells // number of cells in three dimensions
struct Particle{
float3 pos;
float3 vel;
float3 acc;
};
__device__
int cellIndex(float3 pos){
int i=(int)((pos.x+1.f)/L);
int j=(int)((pos.y+1.f)/L);
int k=(int)((pos.z+1.f)/L);
return (k*cells+j)*cells+i;
}
__device__
void boundaries(Particle &p, float3 pos, float3 vel){
// 2x2x2 box fixed boundaries
if(abs(pos.x)>1.f){
vel.x=-vel.x;
}
if(abs(pos.y)>1.f){
vel.y=-vel.y;
}
if(abs(pos.z)>1.f){
vel.z=-vel.z;
}
pos=clamp(pos, -1.f, 1.f);
p.pos=pos;
p.vel=vel;
}
__device__
void collision(Particle *d_gas, int a, int b){
float3 s=d_gas[a].pos-d_gas[b].pos;
float3 u=d_gas[a].vel-d_gas[b].vel;
float uu=dot(u, u);
float su=dot(s, u);
float ss=dot(s, s);
float t0=-(su+sqrtf(su*su-uu*(ss-L2)))/uu;
float3 r=s+u*t0; // |r|=L always
float3 du=r*(dot(u, r)/L2);
float3 ds=du*t0;
boundaries(d_gas[a], d_gas[a].pos+ds, d_gas[a].vel+du);
boundaries(d_gas[b], d_gas[b].pos-ds, d_gas[b].vel-du);
}
__global__
void indices(uint4* d_index, dim3 mesh){
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
int k=blockIdx.z*blockDim.z+threadIdx.z;
if(i<mesh.x && j<mesh.y && k<mesh.z){
int gid=(k*mesh.y+j)*mesh.x+i;
int ii=(i+1)%mesh.x;
int jj=(j+1)%mesh.y;
int a=(k*mesh.y+j)*mesh.x+i;
int b=(k*mesh.y+j)*mesh.x+ii;
int c=(k*mesh.y+jj)*mesh.x+ii;
int d=(k*mesh.y+jj)*mesh.x+i;
d_index[gid]=make_uint4(a, b, c, d);
}
}
__global__
void initialState(Particle* d_gas, dim3 mesh){
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
int k=blockIdx.z*blockDim.z+threadIdx.z;
if(i<mesh.x && j<mesh.y && k<mesh.z){
int gid=(k*mesh.y+j)*mesh.x+i;
float x=(float(2*i))/mesh.x-1.f;
float y=(float(2*j))/mesh.y-1.f;
float z=(float(2*k))/mesh.z-1.f;
d_gas[gid].pos={x, y, z};
d_gas[gid].vel={-x, -y, -z};
d_gas[gid].acc={0.f, 0.f, 0.f};
}
}
__global__
void updateGrid(Particle* d_gas, uint* d_gridCounters, uint* d_gridCells, int n){
int gid=blockIdx.x*blockDim.x+threadIdx.x;
if(gid<n){
// Update grid
int cid=cellIndex(d_gas[gid].pos);
int s=atomicInc(&(d_gridCounters[cid]), 1u);
if(s<4u){
d_gridCells[4*cid+s]=gid;
}
}
}
__global__
void neighbors(Particle* d_gas, uint* d_gridCounters, uint* d_gridCells){
int hx=blockIdx.x*blockDim.x+threadIdx.x;
int hy=blockIdx.y*blockDim.y+threadIdx.y;
int hz=blockIdx.z*blockDim.x+threadIdx.z;
if(hx<cells && hy<cells && hz<cells){
int hid=(hz*cells+hy)*cells+hx;
int hcount=d_gridCounters[hid];
if(hcount==0){
return;
}
int ncount=0;
int neighbors[128];
int nx, ny, nz;
for(int i=-1; i<=1; i++){
nx=hx+i;
for(int j=-1; j<=1; j++){
ny=hy+j;
for(int k=-1; k<=1; k++){
nz=hz+k;
if( (nx>=0 && nx<cells) &&
(ny>=0 && ny<cells) &&
(nz>=0 && nz<cells)){
int nid=(nz*cells+ny)*cells+nx;
int acount=d_gridCounters[nid];
for(int m=0; m<acount; m++){
neighbors[ncount++]=d_gridCells[4*nid+m];
}
}
}
}
}
int home, away;
for(int h=0; h<hcount; h++){
home=d_gridCells[4*hid+h];
float3 posh=d_gas[home].pos;
for(int a=0; a<ncount; a++){
away=neighbors[a];
if(home!=away){
// Check if particles are close enough
float3 posn=d_gas[away].pos;
float3 r=posh-posn;
float r2=dot(r,r);
if(r2<L2){
// Check if the barycenter belongs to home
float3 b=(posh+posn)/2.f;
if(cellIndex(b)==hid){
collision(d_gas, home, away);
}
}
}
}
}
}
}
__global__
void integrate(Particle* d_gas, float step, int n){
int gid=blockIdx.x*blockDim.x+threadIdx.x;
if(gid<n){
float3 vel=d_gas[gid].vel+d_gas[gid].acc*step;
float3 pos=d_gas[gid].pos+vel*step;
boundaries(d_gas[gid], pos, vel);
}
}
__global__
void updatePoints(float4 *d_pos, float4 *d_norm, uchar4 *d_color, Particle *d_gas, int n){
int gid=blockIdx.x*blockDim.x+threadIdx.x;
if(gid<n){
float3 pos=d_gas[gid].pos;
float3 vel=d_gas[gid].vel;
float3 N=normalize(vel);
d_pos[gid]=make_float4(pos.x, pos.y, pos.z, 1.f);
d_norm[gid]=make_float4(N.x, N.y, N.z, 0.f);
float x=N.x, y=N.y, z=N.z;
float x2=x*x, y2=y*y, z2=z*z;
float r=(x<0?0:x2)+(y<0?y2:0)+(z<0?z2:0);
float g=(x<0?x2:0)+(y<0?0:y2)+(z<0?z2:0);
float b=(x<0?x2:0)+(y<0?y2:0)+(z<0?0:z2);
d_color[gid].x=(unsigned char)(255.f*r)&0xff;
d_color[gid].y=(unsigned char)(255.f*g)&0xff;
d_color[gid].z=(unsigned char)(255.f*b)&0xff;
d_color[gid].w=255u;
}
}
inline uint ceil(uint num, uint den){
return (num+den-1u)/den;
}
void launch_kernel(float4 *d_pos, float4 *d_norm, uchar4 *d_color, uint4 *d_index, dim3 mesh, float time){
static const int n=mesh.x*mesh.y*mesh.z;
static const int bpg=ceil(cells, 8);
static const dim3 block1D(MAXTHREADS);
static const dim3 grid1D(ceil(n, MAXTHREADS));
static const dim3 block3D(8, 8, 8);
static const dim3 grid3D(bpg, bpg, bpg);
static Particle *d_gas=NULL;
static uint *d_gridCounters=NULL, *d_gridCells=NULL;
if(d_gas==NULL){
checkCudaErrors(cudaMalloc((void**)&d_gas, n*sizeof(Particle)));
checkCudaErrors(cudaMalloc((void**)&d_gridCounters, cells3*sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&d_gridCells, 4*cells3*sizeof(uint)));
dim3 tgrid(ceil(mesh.x, block3D.x), ceil(mesh.y, block3D.y), ceil(mesh.z, block3D.z));
initialState<<<tgrid, block3D>>>(d_gas, mesh);
indices<<<tgrid, block3D>>>(d_index, mesh);
}
checkCudaErrors(cudaMemset(d_gridCounters, 0u, cells3*sizeof(uint)));
checkCudaErrors(cudaMemset(d_gridCells, 0u, 4*cells3*sizeof(uint)));
static float step=0.001f;
integrate <<<grid1D, block1D>>>(d_gas, step, n);
updateGrid <<<grid1D, block1D>>>(d_gas, d_gridCounters, d_gridCells, n);
neighbors <<<grid3D, block3D>>>(d_gas, d_gridCounters, d_gridCells);
updatePoints<<<grid1D, block1D>>>(d_pos, d_norm, d_color, d_gas, n);
cudaThreadSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
490181932119e12a4715b10e354cabb2841cfe89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: x86-registered-target
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -o - | FileCheck --check-prefixes=COMMON,CHECK %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -disable-O0-optnone -o - | opt -S -O2 | FileCheck %s --check-prefixes=COMMON,OPT
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -x hip %s -o - | FileCheck -check-prefix=HOST %s
#include "Inputs/cuda.h"
// Coerced struct from `struct S` without all generic pointers lowered into
// global ones.
// On the host-side compilation, generic pointer won't be coerced.
// HOST-NOT: %struct.S.coerce
// HOST-NOT: %struct.T.coerce
// HOST: define{{.*}} void @_Z22__device_stub__kernel1Pi(i32* noundef %x)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel1Pi(i32 addrspace(1)*{{.*}} %x.coerce)
// CHECK: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4, !amdgpu.noclobber !2
// OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4
// OPT: ret void
__global__ void kernel1(int *x) {
x[0]++;
}
// HOST: define{{.*}} void @_Z22__device_stub__kernel2Ri(i32* noundef nonnull align 4 dereferenceable(4) %x)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel2Ri(i32 addrspace(1)*{{.*}} nonnull align 4 dereferenceable(4) %x.coerce)
// CHECK: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4, !amdgpu.noclobber !2
// OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4
// OPT: ret void
__global__ void kernel2(int &x) {
x++;
}
// HOST: define{{.*}} void @_Z22__device_stub__kernel3PU3AS2iPU3AS1i(i32 addrspace(2)* noundef %x, i32 addrspace(1)* noundef %y)
// CHECK-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel3PU3AS2iPU3AS1i(i32 addrspace(2)*{{.*}} %x, i32 addrspace(1)*{{.*}} %y)
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
__global__ void kernel3(__attribute__((address_space(2))) int *x,
__attribute__((address_space(1))) int *y) {
y[0] = x[0];
}
// COMMON-LABEL: define{{.*}} void @_Z4funcPi(i32*{{.*}} %x)
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
__device__ void func(int *x) {
x[0]++;
}
struct S {
int *x;
float *y;
};
// `by-val` struct is passed by-indirect-alias (a mix of by-ref and indirect
// by-val). However, the enhanced address inferring pass should be able to
// assume they are global pointers.
//
// HOST: define{{.*}} void @_Z22__device_stub__kernel41S(i32* %s.coerce0, float* %s.coerce1)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel41S(%struct.S addrspace(4)*{{.*}} byref(%struct.S) align 8 %0)
// OPT: [[R0:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 0
// OPT: [[P0:%.*]] = load i32*, i32* addrspace(4)* [[R0]], align 8
// OPT: [[G0:%.*]] ={{.*}} addrspacecast i32* [[P0]] to i32 addrspace(1)*
// OPT: [[R1:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 1
// OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8
// OPT: [[G1:%.*]] ={{.*}} addrspacecast float* [[P1]] to float addrspace(1)*
// OPT: [[V0:%.*]] = load i32, i32 addrspace(1)* [[G0]], align 4, !amdgpu.noclobber !2
// OPT: [[INC:%.*]] = add nsw i32 [[V0]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* [[G0]], align 4
// OPT: [[V1:%.*]] = load float, float addrspace(1)* [[G1]], align 4
// OPT: [[ADD:%.*]] = fadd contract float [[V1]], 1.000000e+00
// OPT: store float [[ADD]], float addrspace(1)* [[G1]], align 4
// OPT: ret void
__global__ void kernel4(struct S s) {
s.x[0]++;
s.y[0] += 1.f;
}
// If a pointer to struct is passed, only the pointer itself is coerced into the global one.
// HOST: define{{.*}} void @_Z22__device_stub__kernel5P1S(%struct.S* noundef %s)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel5P1S(%struct.S addrspace(1)*{{.*}} %s.coerce)
__global__ void kernel5(struct S *s) {
s->x[0]++;
s->y[0] += 1.f;
}
struct T {
float *x[2];
};
// `by-val` array is passed by-indirect-alias (a mix of by-ref and indirect
// by-val). However, the enhanced address inferring pass should be able to
// assume they are global pointers.
//
// HOST: define{{.*}} void @_Z22__device_stub__kernel61T(float* %t.coerce0, float* %t.coerce1)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel61T(%struct.T addrspace(4)*{{.*}} byref(%struct.T) align 8 %0)
// OPT: [[R0:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 0
// OPT: [[P0:%.*]] = load float*, float* addrspace(4)* [[R0]], align 8
// OPT: [[G0:%.*]] ={{.*}} addrspacecast float* [[P0]] to float addrspace(1)*
// OPT: [[R1:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 1
// OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8
// OPT: [[G1:%.*]] ={{.*}} addrspacecast float* [[P1]] to float addrspace(1)*
// OPT: [[V0:%.*]] = load float, float addrspace(1)* [[G0]], align 4, !amdgpu.noclobber !2
// OPT: [[ADD0:%.*]] = fadd contract float [[V0]], 1.000000e+00
// OPT: store float [[ADD0]], float addrspace(1)* [[G0]], align 4
// OPT: [[V1:%.*]] = load float, float addrspace(1)* [[G1]], align 4
// OPT: [[ADD1:%.*]] = fadd contract float [[V1]], 2.000000e+00
// OPT: store float [[ADD1]], float addrspace(1)* [[G1]], align 4
// OPT: ret void
__global__ void kernel6(struct T t) {
t.x[0][0] += 1.f;
t.x[1][0] += 2.f;
}
// Check that coerced pointers retain the noalias attribute when qualified with __restrict.
// HOST: define{{.*}} void @_Z22__device_stub__kernel7Pi(i32* noalias noundef %x)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel7Pi(i32 addrspace(1)* noalias{{.*}} %x.coerce)
__global__ void kernel7(int *__restrict x) {
x[0]++;
}
// Single element struct.
struct SS {
float *x;
};
// HOST: define{{.*}} void @_Z22__device_stub__kernel82SS(float* %a.coerce)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel82SS(float addrspace(1)*{{.*}} %a.coerce)
// CHECK: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load float, float addrspace(1)* %a.coerce, align 4, !amdgpu.noclobber !2
// OPT: [[INC:%.*]] = fadd contract float [[VAL]], 3.000000e+00
// OPT: store float [[INC]], float addrspace(1)* %a.coerce, align 4
// OPT: ret void
__global__ void kernel8(struct SS a) {
*a.x += 3.f;
}
|
490181932119e12a4715b10e354cabb2841cfe89.cu
|
// REQUIRES: x86-registered-target
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -o - | FileCheck --check-prefixes=COMMON,CHECK %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -disable-O0-optnone -o - | opt -S -O2 | FileCheck %s --check-prefixes=COMMON,OPT
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -x hip %s -o - | FileCheck -check-prefix=HOST %s
#include "Inputs/cuda.h"
// Coerced struct from `struct S` without all generic pointers lowered into
// global ones.
// On the host-side compilation, generic pointer won't be coerced.
// HOST-NOT: %struct.S.coerce
// HOST-NOT: %struct.T.coerce
// HOST: define{{.*}} void @_Z22__device_stub__kernel1Pi(i32* noundef %x)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel1Pi(i32 addrspace(1)*{{.*}} %x.coerce)
// CHECK: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4, !amdgpu.noclobber !2
// OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4
// OPT: ret void
__global__ void kernel1(int *x) {
x[0]++;
}
// HOST: define{{.*}} void @_Z22__device_stub__kernel2Ri(i32* noundef nonnull align 4 dereferenceable(4) %x)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel2Ri(i32 addrspace(1)*{{.*}} nonnull align 4 dereferenceable(4) %x.coerce)
// CHECK: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4, !amdgpu.noclobber !2
// OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4
// OPT: ret void
__global__ void kernel2(int &x) {
x++;
}
// HOST: define{{.*}} void @_Z22__device_stub__kernel3PU3AS2iPU3AS1i(i32 addrspace(2)* noundef %x, i32 addrspace(1)* noundef %y)
// CHECK-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel3PU3AS2iPU3AS1i(i32 addrspace(2)*{{.*}} %x, i32 addrspace(1)*{{.*}} %y)
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
__global__ void kernel3(__attribute__((address_space(2))) int *x,
__attribute__((address_space(1))) int *y) {
y[0] = x[0];
}
// COMMON-LABEL: define{{.*}} void @_Z4funcPi(i32*{{.*}} %x)
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
__device__ void func(int *x) {
x[0]++;
}
struct S {
int *x;
float *y;
};
// `by-val` struct is passed by-indirect-alias (a mix of by-ref and indirect
// by-val). However, the enhanced address inferring pass should be able to
// assume they are global pointers.
//
// HOST: define{{.*}} void @_Z22__device_stub__kernel41S(i32* %s.coerce0, float* %s.coerce1)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel41S(%struct.S addrspace(4)*{{.*}} byref(%struct.S) align 8 %0)
// OPT: [[R0:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 0
// OPT: [[P0:%.*]] = load i32*, i32* addrspace(4)* [[R0]], align 8
// OPT: [[G0:%.*]] ={{.*}} addrspacecast i32* [[P0]] to i32 addrspace(1)*
// OPT: [[R1:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 1
// OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8
// OPT: [[G1:%.*]] ={{.*}} addrspacecast float* [[P1]] to float addrspace(1)*
// OPT: [[V0:%.*]] = load i32, i32 addrspace(1)* [[G0]], align 4, !amdgpu.noclobber !2
// OPT: [[INC:%.*]] = add nsw i32 [[V0]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* [[G0]], align 4
// OPT: [[V1:%.*]] = load float, float addrspace(1)* [[G1]], align 4
// OPT: [[ADD:%.*]] = fadd contract float [[V1]], 1.000000e+00
// OPT: store float [[ADD]], float addrspace(1)* [[G1]], align 4
// OPT: ret void
__global__ void kernel4(struct S s) {
s.x[0]++;
s.y[0] += 1.f;
}
// If a pointer to struct is passed, only the pointer itself is coerced into the global one.
// HOST: define{{.*}} void @_Z22__device_stub__kernel5P1S(%struct.S* noundef %s)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel5P1S(%struct.S addrspace(1)*{{.*}} %s.coerce)
__global__ void kernel5(struct S *s) {
s->x[0]++;
s->y[0] += 1.f;
}
struct T {
float *x[2];
};
// `by-val` array is passed by-indirect-alias (a mix of by-ref and indirect
// by-val). However, the enhanced address inferring pass should be able to
// assume they are global pointers.
//
// HOST: define{{.*}} void @_Z22__device_stub__kernel61T(float* %t.coerce0, float* %t.coerce1)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel61T(%struct.T addrspace(4)*{{.*}} byref(%struct.T) align 8 %0)
// OPT: [[R0:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 0
// OPT: [[P0:%.*]] = load float*, float* addrspace(4)* [[R0]], align 8
// OPT: [[G0:%.*]] ={{.*}} addrspacecast float* [[P0]] to float addrspace(1)*
// OPT: [[R1:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 1
// OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8
// OPT: [[G1:%.*]] ={{.*}} addrspacecast float* [[P1]] to float addrspace(1)*
// OPT: [[V0:%.*]] = load float, float addrspace(1)* [[G0]], align 4, !amdgpu.noclobber !2
// OPT: [[ADD0:%.*]] = fadd contract float [[V0]], 1.000000e+00
// OPT: store float [[ADD0]], float addrspace(1)* [[G0]], align 4
// OPT: [[V1:%.*]] = load float, float addrspace(1)* [[G1]], align 4
// OPT: [[ADD1:%.*]] = fadd contract float [[V1]], 2.000000e+00
// OPT: store float [[ADD1]], float addrspace(1)* [[G1]], align 4
// OPT: ret void
__global__ void kernel6(struct T t) {
t.x[0][0] += 1.f;
t.x[1][0] += 2.f;
}
// Check that coerced pointers retain the noalias attribute when qualified with __restrict.
// HOST: define{{.*}} void @_Z22__device_stub__kernel7Pi(i32* noalias noundef %x)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel7Pi(i32 addrspace(1)* noalias{{.*}} %x.coerce)
__global__ void kernel7(int *__restrict x) {
x[0]++;
}
// Single element struct.
struct SS {
float *x;
};
// HOST: define{{.*}} void @_Z22__device_stub__kernel82SS(float* %a.coerce)
// COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel82SS(float addrspace(1)*{{.*}} %a.coerce)
// CHECK: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: ={{.*}} addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load float, float addrspace(1)* %a.coerce, align 4, !amdgpu.noclobber !2
// OPT: [[INC:%.*]] = fadd contract float [[VAL]], 3.000000e+00
// OPT: store float [[INC]], float addrspace(1)* %a.coerce, align 4
// OPT: ret void
__global__ void kernel8(struct SS a) {
*a.x += 3.f;
}
|
4407b25f0a31989e88aa9ec7cb3ed61c35f566e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Based on timingFunctions.cu */
#include <stdlib.h>
#ifndef GPUNUMBER
#define GPUNUMBER 0
#endif
#define MAX_THREADS_PER_BLOCK 1024
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
template <typename T>
struct results_t {
float time;
T * vals;
};
template <typename T>
void setupForTiming(hipEvent_t &start, hipEvent_t &stop, T * h_vec, T ** d_vec, results_t<T> ** result, uint numElements, uint kCount) {
hipEventCreate(&start);
hipEventCreate(&stop);
hipMalloc(d_vec, numElements * sizeof(T));
hipMemcpy(*d_vec, h_vec, numElements * sizeof(T), hipMemcpyHostToDevice);
*result = (results_t<T> *) malloc (sizeof (results_t<T>));
(*result)->vals = (T *) malloc (kCount * sizeof (T));
}
template <typename T>
void wrapupForTiming(hipEvent_t &start, hipEvent_t &stop, float time, results_t<T> * result) {
result->time = time;
hipEventDestroy(start);
hipEventDestroy(stop);
// hipDeviceSynchronize();
}
/////////////////////////////////////////////////////////////////
// THE SORT AND CHOOSE TIMING FUNCTION
/////////////////////////////////////////////////////////////////
template <typename T>
__global__ void copyInChunk(T * outputVector, T * inputVector, uint * kList, uint kListCount, uint numElements) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < kListCount)
outputVector[idx] = inputVector[numElements - kList[idx]];
}
template <typename T>
inline void bestSort(T * d_vec, const uint numElements) {
cubDeviceSort<T>(d_vec, numElements);
}
template <>
inline void bestSort<double>(double * d_vec, const uint numElements) {
mgpuDeviceSort<double>(d_vec, numElements);
}
template<typename T>
results_t<T>* timeSortAndChooseMultiselect(T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
bestSort<T>(d_vec, numElements);
T * d_output;
uint * d_kList;
hipMalloc (&d_output, kCount * sizeof (T));
hipMalloc (&d_kList, kCount * sizeof(uint));
hipMemcpy (d_kList, kVals, kCount * sizeof (uint), hipMemcpyHostToDevice);
int threads = MAX_THREADS_PER_BLOCK;
if (kCount < threads)
threads = kCount;
int blocks = (int) ceil (kCount / (float) threads);
hipLaunchKernelGGL(( copyInChunk<T>), dim3(blocks), dim3(threads), 0, 0, d_output, d_vec, d_kList, kCount, numElements);
hipMemcpy (result->vals, d_output, kCount * sizeof (T), hipMemcpyDeviceToHost);
//printf("first result: %u \n", result->vals);
hipFree(d_output);
hipFree(d_kList);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
/////////////////////////////////////////////////////////////////
// BUCKETMULTISELECT TIMING FUNCTION
/////////////////////////////////////////////////////////////////
template<typename T>
results_t<T>* timeBucketMultiselect (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselect::bucketMultiselectWrapper(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
template<typename T>
results_t<T>* timeBucketMultiselectNew2 (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselectNew2::bucketMultiselectWrapper(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
/////////////////////////////////////////////////////////////////
// cpu QUICKMULTISELECT TIMING FUNCTION
/////////////////////////////////////////////////////////////////
// FUNCTION TO TIME CPU Based QUICKMULTISELECT
template<typename T>
results_t<T>* timeQuickMultiselect (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
T * h_vec_copy;
h_vec_copy = (T *) malloc(sizeof(T)*numElements);
results_t<T> * result;
float time;
hipEvent_t start, stop;
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipMemcpy(h_vec_copy, h_vec, numElements*sizeof(T), hipMemcpyHostToHost);
hipEventRecord(start, 0);
quickMultiselectWrapper(h_vec_copy, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
/////////////////////////////////////////////////////////////////
// Library Specific SORT AND CHOOSE TIMING FUNCTIONS
/////////////////////////////////////////////////////////////////
// FUNCTION TO TIME SORT&CHOOSE WITH CUB RADIX SORT
template<typename T>
results_t<T>* timeCUBSortAndChooseMultiselect(T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
cubDeviceSort(d_vec, numElements);
T * d_output;
uint * d_kList;
hipMalloc (&d_output, kCount * sizeof (T));
hipMalloc (&d_kList, kCount * sizeof(uint));
hipMemcpy (d_kList, kVals, kCount * sizeof (uint), hipMemcpyHostToDevice);
int threads = MAX_THREADS_PER_BLOCK;
if (kCount < threads)
threads = kCount;
int blocks = (int) ceil (kCount / (float) threads);
hipLaunchKernelGGL(( copyInChunk<T>), dim3(blocks), dim3(threads), 0, 0, d_output, d_vec, d_kList, kCount, numElements);
hipMemcpy (result->vals, d_output, kCount * sizeof (T), hipMemcpyDeviceToHost);
hipFree(d_output);
hipFree(d_kList);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
//FUNCTION TO TIME ModernGPU Sort and Choose
template<typename T>
results_t<T>* timeMGPUSortAndChooseMultiselect(T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
mgpuDeviceSort(d_vec, numElements);
T * d_output;
uint * d_kList;
hipMalloc (&d_output, kCount * sizeof (T));
hipMalloc (&d_kList, kCount * sizeof(uint));
hipMemcpy (d_kList, kVals, kCount * sizeof (uint), hipMemcpyHostToDevice);
int threads = MAX_THREADS_PER_BLOCK;
if (kCount < threads)
threads = kCount;
int blocks = (int) ceil (kCount / (float) threads);
hipLaunchKernelGGL(( copyInChunk<T>), dim3(blocks), dim3(threads), 0, 0, d_output, d_vec, d_kList, kCount, numElements);
hipMemcpy (result->vals, d_output, kCount * sizeof (T), hipMemcpyDeviceToHost);
hipFree(d_output);
hipFree(d_kList);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
/////////////////////////////////////////////////////////////////
// Library Specific BUCKETMULTISELECT TIMING FUNCTIONS
/////////////////////////////////////////////////////////////////
// FUNCTION TO TIME CUB BUCKET MULTISELECT
template<typename T>
results_t<T>* timeBucketMultiselect_cub (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselect_cub::bucketMultiselectWrapper_cub(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
// FUNCTION TO TIME MGPU BUCKET MULTISELECT
template<typename T>
results_t<T>* timeBucketMultiselect_mgpu (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselect_mgpu::bucketMultiselectWrapper_mgpu(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
// FUNCTION TO TIME THRUST BUCKET MULTISELECT
// This is the original function; it does not use binary search trees.
template<typename T>
results_t<T>* timeBucketMultiselect_thrust (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselect_thrust::bucketMultiselectWrapper_thrust(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
// FUNCTION TO TIME NAIVE BUCKET MULTISELECT (Does not use kernel density estimator nor binary search trees; not recommended)
template<typename T>
results_t<T>* timeNaiveBucketMultiselect (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
thrust::device_ptr<T> dev_ptr(d_vec);
thrust::sort(dev_ptr, dev_ptr + numElements);
for (int i = 0; i < kCount; i++)
hipMemcpy(result->vals + i, d_vec + (numElements - kVals[i]), sizeof (T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
/***************************************
********* TOP K SELECT ALGORITHMS
****************************************/
template<typename T>
results_t<T>* timeSortAndChooseTopkselect(T * h_vec, uint numElements, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
bestSort<T>(d_vec, numElements);
hipMemcpy(result->vals, d_vec, kCount * sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
// FUNCTION TO TIME RANDOMIZED TOP K SELECT
template<typename T>
results_t<T>* timeRandomizedTopkselect (T * h_vec, uint numElements, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
result->vals = randomizedTopkSelectWrapper(d_vec, numElements, kCount);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
// FUNCTION TO TIME BUCKET TOP K SELECT
template<typename T>
results_t<T>* timeBucketTopkselect (T * h_vec, uint numElements, uint kCount) {
// initialize ks
uint * kVals = (uint *) malloc(kCount*sizeof(T));
for (uint i = 0; i < kCount; i++)
kVals[i] = i+1;
T * d_vec;
results_t<T> * result;
float time;
hipEvent_t start, stop;
hipDeviceProp_t dp;
hipGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
hipEventRecord(start, 0);
BucketMultiselect::bucketMultiselectWrapper(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
hipFree(d_vec);
return result;
}
|
4407b25f0a31989e88aa9ec7cb3ed61c35f566e4.cu
|
/* Based on timingFunctions.cu */
#include <stdlib.h>
#ifndef GPUNUMBER
#define GPUNUMBER 0
#endif
#define MAX_THREADS_PER_BLOCK 1024
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
template <typename T>
struct results_t {
float time;
T * vals;
};
template <typename T>
void setupForTiming(cudaEvent_t &start, cudaEvent_t &stop, T * h_vec, T ** d_vec, results_t<T> ** result, uint numElements, uint kCount) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc(d_vec, numElements * sizeof(T));
cudaMemcpy(*d_vec, h_vec, numElements * sizeof(T), cudaMemcpyHostToDevice);
*result = (results_t<T> *) malloc (sizeof (results_t<T>));
(*result)->vals = (T *) malloc (kCount * sizeof (T));
}
template <typename T>
void wrapupForTiming(cudaEvent_t &start, cudaEvent_t &stop, float time, results_t<T> * result) {
result->time = time;
cudaEventDestroy(start);
cudaEventDestroy(stop);
// cudaDeviceSynchronize();
}
/////////////////////////////////////////////////////////////////
// THE SORT AND CHOOSE TIMING FUNCTION
/////////////////////////////////////////////////////////////////
template <typename T>
__global__ void copyInChunk(T * outputVector, T * inputVector, uint * kList, uint kListCount, uint numElements) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < kListCount)
outputVector[idx] = inputVector[numElements - kList[idx]];
}
template <typename T>
inline void bestSort(T * d_vec, const uint numElements) {
cubDeviceSort<T>(d_vec, numElements);
}
template <>
inline void bestSort<double>(double * d_vec, const uint numElements) {
mgpuDeviceSort<double>(d_vec, numElements);
}
template<typename T>
results_t<T>* timeSortAndChooseMultiselect(T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
bestSort<T>(d_vec, numElements);
T * d_output;
uint * d_kList;
cudaMalloc (&d_output, kCount * sizeof (T));
cudaMalloc (&d_kList, kCount * sizeof(uint));
cudaMemcpy (d_kList, kVals, kCount * sizeof (uint), cudaMemcpyHostToDevice);
int threads = MAX_THREADS_PER_BLOCK;
if (kCount < threads)
threads = kCount;
int blocks = (int) ceil (kCount / (float) threads);
copyInChunk<T><<<blocks, threads>>>(d_output, d_vec, d_kList, kCount, numElements);
cudaMemcpy (result->vals, d_output, kCount * sizeof (T), cudaMemcpyDeviceToHost);
//printf("first result: %u \n", result->vals);
cudaFree(d_output);
cudaFree(d_kList);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
/////////////////////////////////////////////////////////////////
// BUCKETMULTISELECT TIMING FUNCTION
/////////////////////////////////////////////////////////////////
template<typename T>
results_t<T>* timeBucketMultiselect (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselect::bucketMultiselectWrapper(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
template<typename T>
results_t<T>* timeBucketMultiselectNew2 (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselectNew2::bucketMultiselectWrapper(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
/////////////////////////////////////////////////////////////////
// cpu QUICKMULTISELECT TIMING FUNCTION
/////////////////////////////////////////////////////////////////
// FUNCTION TO TIME CPU Based QUICKMULTISELECT
template<typename T>
results_t<T>* timeQuickMultiselect (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
T * h_vec_copy;
h_vec_copy = (T *) malloc(sizeof(T)*numElements);
results_t<T> * result;
float time;
cudaEvent_t start, stop;
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaMemcpy(h_vec_copy, h_vec, numElements*sizeof(T), cudaMemcpyHostToHost);
cudaEventRecord(start, 0);
quickMultiselectWrapper(h_vec_copy, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
/////////////////////////////////////////////////////////////////
// Library Specific SORT AND CHOOSE TIMING FUNCTIONS
/////////////////////////////////////////////////////////////////
// FUNCTION TO TIME SORT&CHOOSE WITH CUB RADIX SORT
template<typename T>
results_t<T>* timeCUBSortAndChooseMultiselect(T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
cubDeviceSort(d_vec, numElements);
T * d_output;
uint * d_kList;
cudaMalloc (&d_output, kCount * sizeof (T));
cudaMalloc (&d_kList, kCount * sizeof(uint));
cudaMemcpy (d_kList, kVals, kCount * sizeof (uint), cudaMemcpyHostToDevice);
int threads = MAX_THREADS_PER_BLOCK;
if (kCount < threads)
threads = kCount;
int blocks = (int) ceil (kCount / (float) threads);
copyInChunk<T><<<blocks, threads>>>(d_output, d_vec, d_kList, kCount, numElements);
cudaMemcpy (result->vals, d_output, kCount * sizeof (T), cudaMemcpyDeviceToHost);
cudaFree(d_output);
cudaFree(d_kList);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
//FUNCTION TO TIME ModernGPU Sort and Choose
template<typename T>
results_t<T>* timeMGPUSortAndChooseMultiselect(T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
mgpuDeviceSort(d_vec, numElements);
T * d_output;
uint * d_kList;
cudaMalloc (&d_output, kCount * sizeof (T));
cudaMalloc (&d_kList, kCount * sizeof(uint));
cudaMemcpy (d_kList, kVals, kCount * sizeof (uint), cudaMemcpyHostToDevice);
int threads = MAX_THREADS_PER_BLOCK;
if (kCount < threads)
threads = kCount;
int blocks = (int) ceil (kCount / (float) threads);
copyInChunk<T><<<blocks, threads>>>(d_output, d_vec, d_kList, kCount, numElements);
cudaMemcpy (result->vals, d_output, kCount * sizeof (T), cudaMemcpyDeviceToHost);
cudaFree(d_output);
cudaFree(d_kList);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
/////////////////////////////////////////////////////////////////
// Library Specific BUCKETMULTISELECT TIMING FUNCTIONS
/////////////////////////////////////////////////////////////////
// FUNCTION TO TIME CUB BUCKET MULTISELECT
template<typename T>
results_t<T>* timeBucketMultiselect_cub (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselect_cub::bucketMultiselectWrapper_cub(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
// FUNCTION TO TIME MGPU BUCKET MULTISELECT
template<typename T>
results_t<T>* timeBucketMultiselect_mgpu (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselect_mgpu::bucketMultiselectWrapper_mgpu(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
// FUNCTION TO TIME THRUST BUCKET MULTISELECT
// This is the original function; it does not use binary search trees.
template<typename T>
results_t<T>* timeBucketMultiselect_thrust (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
// bucketMultiselectWrapper (T * d_vector, int length, uint * kVals_ori, uint kCount, T * outputs, int blocks, int threads)
BucketMultiselect_thrust::bucketMultiselectWrapper_thrust(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
// FUNCTION TO TIME NAIVE BUCKET MULTISELECT (Does not use kernel density estimator nor binary search trees; not recommended)
template<typename T>
results_t<T>* timeNaiveBucketMultiselect (T * h_vec, uint numElements, uint * kVals, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
thrust::device_ptr<T> dev_ptr(d_vec);
thrust::sort(dev_ptr, dev_ptr + numElements);
for (int i = 0; i < kCount; i++)
cudaMemcpy(result->vals + i, d_vec + (numElements - kVals[i]), sizeof (T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
/***************************************
********* TOP K SELECT ALGORITHMS
****************************************/
template<typename T>
results_t<T>* timeSortAndChooseTopkselect(T * h_vec, uint numElements, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
bestSort<T>(d_vec, numElements);
cudaMemcpy(result->vals, d_vec, kCount * sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
// FUNCTION TO TIME RANDOMIZED TOP K SELECT
template<typename T>
results_t<T>* timeRandomizedTopkselect (T * h_vec, uint numElements, uint kCount) {
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
result->vals = randomizedTopkSelectWrapper(d_vec, numElements, kCount);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
// FUNCTION TO TIME BUCKET TOP K SELECT
template<typename T>
results_t<T>* timeBucketTopkselect (T * h_vec, uint numElements, uint kCount) {
// initialize ks
uint * kVals = (uint *) malloc(kCount*sizeof(T));
for (uint i = 0; i < kCount; i++)
kVals[i] = i+1;
T * d_vec;
results_t<T> * result;
float time;
cudaEvent_t start, stop;
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, GPUNUMBER);
setupForTiming(start, stop, h_vec, &d_vec, &result, numElements, kCount);
cudaEventRecord(start, 0);
BucketMultiselect::bucketMultiselectWrapper(d_vec, numElements, kVals, kCount, result->vals, dp.multiProcessorCount, dp.maxThreadsPerBlock);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
wrapupForTiming(start, stop, time, result);
cudaFree(d_vec);
return result;
}
|
10844453030a35e30dcc848a5fb05d6a04552121.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define NUM_THREADS 1000000
#define ARRAY_SIZE 16
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(&g[i], 1);
}
int main(int argc,char **argv)
{
// Informative printout
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
// Fill the device memory area with zeros .
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// Call kernel with atomic operation
hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
// Call kernel without atomic operation
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
}
|
10844453030a35e30dcc848a5fb05d6a04552121.cu
|
#include <stdio.h>
#define NUM_THREADS 1000000
#define ARRAY_SIZE 16
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(&g[i], 1);
}
int main(int argc,char **argv)
{
// Informative printout
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
// Fill the device memory area with zeros .
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// Call kernel with atomic operation
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
// Call kernel without atomic operation
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
}
|
3de1d563c246ffb742ecf1e66e131f28480a72e6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "parallelSpmv.h"
#define FATAL(msg) \
do {\
fprintf(stderr, "[%s:%d] %s\n", __FILE__, __LINE__, msg);\
exit(-1);\
} while(0)
#define MAXTHREADS 256
#define REP 1000
void meanAndSd(real *mean, real *sd,real *data, int n)
{
real sum = (real) 0.0;
real standardDeviation = (real) 0.0;
for(int i=0; i<n; ++i) {
sum += data[i];
} // end for //
*mean = sum/n;
for(int i=0; i<n; ++i) {
standardDeviation += pow(data[i] - *mean, 2);
} // end for //
*sd=sqrt(standardDeviation/n);
} // end of meanAndSd //
int main(int argc, char *argv[])
{
if (MAXTHREADS > 512) {
printf("need to adjust the spmv() function to acomodate more than 512 threads per block\nQuitting ....\n");
exit(-1);
} // end if //
#include "parallelSpmvData.h"
// verifing number of input parameters //
char exists='t';
char checkSol='f';
if (argc < 3 ) {
printf("Use: %s Matrix_filename InputVector_filename [SolutionVector_filename [# of streams] ] \n", argv[0]);
exists='f';
} // endif //
FILE *fh=NULL;
// testing if matrix file exists
if((fh = fopen(argv[1], "rb") ) == NULL) {
printf("No matrix file found.\n");
exists='f';
} // end if //
// testing if input file exists
if((fh = fopen(argv[2], "rb") ) == NULL) {
printf("No input vector file found.\n");
exists='f';
} // end if //
// testing if output file exists
if (argc >3 ) {
if((fh = fopen(argv[3], "rb") ) == NULL) {
printf("No output vector file found.\n");
exists='f';
} else {
checkSol='t';
} // end if //
} // end if //
if (exists == 'f') {
printf("Quitting.....\n");
exit(0);
} // end if //
if (argc > 4 && atoi(argv[4]) > 0) {
nStreams = atoi(argv[4]);
} else {
// opening matrix file to read mean and sd of number of nonzeros per row
double tmpMean, tmpSD;
fh = fopen(argv[1], "rb");
// reading laast two values in file: mean and sd //
fseek(fh, 0L, SEEK_END);
long int offset = ftell(fh)-2*sizeof(double);
fseek(fh, offset, SEEK_SET);
if ( !fread(&tmpMean, sizeof(double), (size_t) 1, fh)) exit(0);
if ( !fread(&tmpSD, sizeof(double), (size_t) 1, fh)) exit(0);
// determining number of streams based on mean and sd
real ratio = tmpSD/tmpMean;
if (ratio < 0.5 && tmpSD < 60.0) {
nStreams = 1;
printf("nStreams: %d\n", nStreams);
} else if (tmpSD > 85.0) {
nStreams = 5;
printf("nStreams: %d\n", nStreams);
} else {
nStreams = 4;
} // end if //
} // end if //
if (fh) fclose(fh);
printf("%s Precision. Solving using %d %s\n", (sizeof(real) == sizeof(double)) ? "Double": "Single", nStreams, (nStreams > 1) ? "streams": "stream" );
stream= (hipStream_t *) malloc(sizeof(hipStream_t) * nStreams);
starRow = (int *) malloc(sizeof(int) * nStreams+1);
starRow[0]=0;
reader(&n_global,&nnz_global, starRow,
&row_ptr,&col_idx,&val,
argv[1], nStreams);
// ready to start //
hipError_t cuda_ret;
real *w=NULL;
real *v=NULL; // <-- input vector to be shared later
//real *v_off=NULL; // <-- input vector to be shared later
v = (real *) malloc(n_global*sizeof(real));
w = (real *) malloc(n_global*sizeof(real));
// reading input vector
vectorReader(v, &n_global, argv[2]);
//////////////////////////////////////
// cuda stuff start here
// Allocating device memory for input matrices
cuda_ret = hipMalloc((void **) &rows_d, (n_global+1)*sizeof(int));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for rows_d");
cuda_ret = hipMalloc((void **) &cols_d, (nnz_global)*sizeof(int));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for cols_d");
cuda_ret = hipMalloc((void **) &vals_d, (nnz_global)*sizeof(real));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for vals_d");
cuda_ret = hipMalloc((void **) &v_d, (n_global)*sizeof(real));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for x_d");
cuda_ret = hipMalloc((void **) &w_d, (n_global)*sizeof(real));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory for y_d");
// Copy the input matrices from the host memory to the device memory
cuda_ret = hipMemcpy(rows_d, row_ptr, (n_global+1)*sizeof(int),hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix rows_d");
cuda_ret = hipMemcpy(cols_d, col_idx, (nnz_global)*sizeof(int),hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix cols_d");
cuda_ret = hipMemcpy(vals_d, val, (nnz_global)*sizeof(real),hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix vals_d");
cuda_ret = hipMemcpy(v_d, v, (n_global)*sizeof(real),hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix x_d");
#ifdef USE_TEXTURE
hipTextureDesc td;
memset(&td, 0, sizeof(td));
td.normalizedCoords = 0;
td.addressMode[0] = hipAddressModeClamp;
td.readMode = hipReadModeElementType;
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = v_d;
resDesc.res.linear.sizeInBytes = n_global*sizeof(real);
#ifdef DOUBLE
resDesc.res.linear.desc.f = hipChannelFormatKindUnsigned;
resDesc.res.linear.desc.y = 32;
#else
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
#endif
resDesc.res.linear.desc.x = 32;
hipTextureObject_t v_t;
cuda_ret = hipCreateTextureObject(&v_t, &resDesc, &td, NULL);
if(cuda_ret != hipSuccess) FATAL("Unable to create text memory v_t");
/*
cuda_ret = hipBindTexture(NULL, v_t, v_d, n_global*sizeof(real));
//cuda_ret = hipBindTexture(NULL, valTex, vals_d, nnz_global*sizeof(real));
*/
#endif
//hipDeviceSetCacheConfig(hipFuncCachePreferL1);
meanNnzPerRow = (real*) malloc(nStreams*sizeof(real));
sd = (real*) malloc(nStreams*sizeof(real ));
block = (dim3 *) malloc(nStreams*sizeof(dim3 ));
grid = (dim3 *) malloc(nStreams*sizeof(dim3 ));
sharedMemorySize = (size_t *) calloc(nStreams, sizeof(size_t));
for (int s=0; s<nStreams; ++s) {
block[s].x = 1;
block[s].y = 1;
block[s].z = 1;
grid[s].x = 1;
grid[s].y = 1;
grid[s].z = 1;
} // end for //
for (int s=0; s<nStreams; ++s) {
int nrows = starRow[s+1]-starRow[s];
/////////////////////////////////////////////////////
// determining the standard deviation of the nnz per row
real *temp=(real *) calloc(nrows,sizeof(real));
for (int row=starRow[s], i=0; row<starRow[s]+nrows; ++row, ++i) {
temp[i] = row_ptr[row+1] - row_ptr[row];
} // end for //
meanAndSd(&meanNnzPerRow[s],&sd[s],temp, nrows);
//printf("file: %s, line: %d, gpu on-prcoc: %d, mean: %7.3f, sd: %7.3f using: %s\n", __FILE__, __LINE__, s , meanNnzPerRow[s], sd[s], (meanNnzPerRow[s] + 0.5*sd[s] < 32) ? "spmv0": "spmv1" );
free(temp);
/////////////////////////////////////////////////////
//cuda_ret = hipStreamCreateWithFlags(&stream0[gpu], hipStreamDefault);
cuda_ret = hipStreamCreateWithFlags(&stream[s], hipStreamNonBlocking ) ;
if(cuda_ret != hipSuccess) FATAL("Unable to create stream0 ");
printf("In Stream: %d\n",s);
/*
if (meanNnzPerRow[s] < 10 && parameter2Adjust*sd[s] < warpSize) {
// these mean use scalar spmv
if (meanNnzPerRow[s] < (real) 4.5) {
block[s].x=128;
} else if (meanNnzPerRow[s] < (real) 14.4) {
block[s].x=64;
} else {
block[s].x=32;
} // end if //
grid[s].x = ( ( nrows + block[s].x -1) /block[s].x );
printf("using scalar spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block[s].x,block[s].y, meanNnzPerRow[s], sd[s]) ;
} else {
// these mean use vector spmv
if (meanNnzPerRow[s] > 10.0*warpSize) {
block[s].x=2*warpSize;
} else if (meanNnzPerRow[s] > 4.0*warpSize) {
block[s].x=warpSize/2;
} else {
block[s].x=warpSize/4;
} // end if //
block[s].y=MAXTHREADS/block[s].x;
grid[s].x = ( (nrows + block[s].y - 1) / block[s].y ) ;
sharedMemorySize[s]=block[s].x*block[s].y*sizeof(real);
printf("using vector spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block[s].x,block[s].y, meanNnzPerRow[s], sd[s]) ;
} // end if //
*/
// these mean use vector spmv
real limit=meanNnzPerRow[s] + parameter2Adjust*sd[s];
if ( limit < 4.5 ) {
block[s].x=warpSize/32;
} else if (limit < 6.95 ) {
block[s].x=warpSize/16;
} else if (limit < 15.5 ) {
block[s].x=warpSize/8;
} else if (limit < 74.0 ) {
block[s].x=warpSize/4;
} else if (limit < 300.0 ) {
block[s].x=warpSize/2;
} else if (limit < 350.0 ) {
block[s].x=warpSize;
} else if (limit < 1000.0 ) {
block[s].x=warpSize*2;
} else if (limit < 2000.0 ) {
block[s].x=warpSize*4;
} else if (limit < 3000.0 ) {
block[s].x=warpSize*8;
} else {
block[s].x=warpSize*16;
} // end if //
if (block[s].x > MAXTHREADS) {
block[s].x=512;
block[s].y=1;
} else {
block[s].y=MAXTHREADS/block[s].x;
} // end if //
grid[s].x = ( (nrows + block[s].y - 1) / block[s].y ) ;
sharedMemorySize[s]=block[s].x*block[s].y*sizeof(real);
printf("using vector spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block[s].x,block[s].y, meanNnzPerRow[s], sd[s]) ;
} // end for //
// Timing should begin here//
struct timeval tp; // timer
double elapsed_time;
gettimeofday(&tp,NULL); // Unix timer
elapsed_time = -(tp.tv_sec*1.0e6 + tp.tv_usec);
for (int t=0; t<REP; ++t) {
//cuda_ret = hipMemset(w_d, 0, (size_t) n_global*sizeof(real) );
//if(cuda_ret != hipSuccess) FATAL("Unable to set device for matrix w_d");
for (int s=0; s<nStreams; ++s) {
const int sRow = starRow[s];
const int nrows = starRow[s+1]-starRow[s];
#ifdef USE_TEXTURE
hipLaunchKernelGGL(( spmv), dim3(grid[s]), dim3(block[s]), sharedMemorySize[s], stream[s] , (w_d+sRow), v_t, vals_d, (rows_d+sRow), (cols_d), nrows, 1.0,0.0);
#else
hipLaunchKernelGGL(( spmv), dim3(grid[s]), dim3(block[s]), sharedMemorySize[s], stream[s] , (w_d+sRow), v_d, vals_d, (rows_d+sRow), (cols_d), nrows, 1.0,0.0);
#endif
} // end for //
for (int s=0; s<nStreams; ++s) {
//hipStreamSynchronize(NULL);
hipStreamSynchronize(stream[s]);
} // end for //
} // end for //
gettimeofday(&tp,NULL);
elapsed_time += (tp.tv_sec*1.0e6 + tp.tv_usec);
printf ("Total time was %f seconds, GFLOPS: %f, GBytes/s: %f\n", elapsed_time*1.0e-6,
(2.0*nnz_global+ 3.0*n_global)*REP*1.0e-3/elapsed_time,
(nnz_global*(2*sizeof(real) + sizeof(int))+n_global*(sizeof(real)+sizeof(int)))*REP*1.0e-3/elapsed_time );
cuda_ret = hipMemcpy(w, w_d, (n_global)*sizeof(real),hipMemcpyDeviceToHost);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device matrix y_d back to host");
// cuda stuff ends here
//////////////////////////////////////
if (checkSol=='t') {
real *sol=NULL;
sol = (real *) malloc((n_global)*sizeof(real));
// reading input vector
vectorReader(sol, &n_global, argv[3]);
int row=0;
real tolerance = 1.0e-08;
if (sizeof(real) != sizeof(double) ) {
tolerance = 1.0e-02;
} // end if //
real error;
do {
error = fabs(sol[row] - w[row]) /fabs(sol[row]);
if ( error > tolerance ) break;
++row;
} while (row < n_global); // end do-while //
if (row == n_global) {
printf("Solution match in GPU\n");
} else {
printf("For Matrix %s, solution does not match at element %d in GPU %20.13e --> %20.13e error -> %20.13e, tolerance: %20.13e \n",
argv[1], (row+1), sol[row], w[row], error , tolerance );
} // end if //
free(sol);
} // end if //
free(w);
free(v);
#ifdef USE_TEXTURE
hipDestroyTextureObject(v_t);
#endif
#include "parallelSpmvCleanData.h"
return 0;
} // end main() //
|
3de1d563c246ffb742ecf1e66e131f28480a72e6.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "parallelSpmv.h"
#define FATAL(msg) \
do {\
fprintf(stderr, "[%s:%d] %s\n", __FILE__, __LINE__, msg);\
exit(-1);\
} while(0)
#define MAXTHREADS 256
#define REP 1000
void meanAndSd(real *mean, real *sd,real *data, int n)
{
real sum = (real) 0.0;
real standardDeviation = (real) 0.0;
for(int i=0; i<n; ++i) {
sum += data[i];
} // end for //
*mean = sum/n;
for(int i=0; i<n; ++i) {
standardDeviation += pow(data[i] - *mean, 2);
} // end for //
*sd=sqrt(standardDeviation/n);
} // end of meanAndSd //
int main(int argc, char *argv[])
{
if (MAXTHREADS > 512) {
printf("need to adjust the spmv() function to acomodate more than 512 threads per block\nQuitting ....\n");
exit(-1);
} // end if //
#include "parallelSpmvData.h"
// verifing number of input parameters //
char exists='t';
char checkSol='f';
if (argc < 3 ) {
printf("Use: %s Matrix_filename InputVector_filename [SolutionVector_filename [# of streams] ] \n", argv[0]);
exists='f';
} // endif //
FILE *fh=NULL;
// testing if matrix file exists
if((fh = fopen(argv[1], "rb") ) == NULL) {
printf("No matrix file found.\n");
exists='f';
} // end if //
// testing if input file exists
if((fh = fopen(argv[2], "rb") ) == NULL) {
printf("No input vector file found.\n");
exists='f';
} // end if //
// testing if output file exists
if (argc >3 ) {
if((fh = fopen(argv[3], "rb") ) == NULL) {
printf("No output vector file found.\n");
exists='f';
} else {
checkSol='t';
} // end if //
} // end if //
if (exists == 'f') {
printf("Quitting.....\n");
exit(0);
} // end if //
if (argc > 4 && atoi(argv[4]) > 0) {
nStreams = atoi(argv[4]);
} else {
// opening matrix file to read mean and sd of number of nonzeros per row
double tmpMean, tmpSD;
fh = fopen(argv[1], "rb");
// reading laast two values in file: mean and sd //
fseek(fh, 0L, SEEK_END);
long int offset = ftell(fh)-2*sizeof(double);
fseek(fh, offset, SEEK_SET);
if ( !fread(&tmpMean, sizeof(double), (size_t) 1, fh)) exit(0);
if ( !fread(&tmpSD, sizeof(double), (size_t) 1, fh)) exit(0);
// determining number of streams based on mean and sd
real ratio = tmpSD/tmpMean;
if (ratio < 0.5 && tmpSD < 60.0) {
nStreams = 1;
printf("nStreams: %d\n", nStreams);
} else if (tmpSD > 85.0) {
nStreams = 5;
printf("nStreams: %d\n", nStreams);
} else {
nStreams = 4;
} // end if //
} // end if //
if (fh) fclose(fh);
printf("%s Precision. Solving using %d %s\n", (sizeof(real) == sizeof(double)) ? "Double": "Single", nStreams, (nStreams > 1) ? "streams": "stream" );
stream= (cudaStream_t *) malloc(sizeof(cudaStream_t) * nStreams);
starRow = (int *) malloc(sizeof(int) * nStreams+1);
starRow[0]=0;
reader(&n_global,&nnz_global, starRow,
&row_ptr,&col_idx,&val,
argv[1], nStreams);
// ready to start //
cudaError_t cuda_ret;
real *w=NULL;
real *v=NULL; // <-- input vector to be shared later
//real *v_off=NULL; // <-- input vector to be shared later
v = (real *) malloc(n_global*sizeof(real));
w = (real *) malloc(n_global*sizeof(real));
// reading input vector
vectorReader(v, &n_global, argv[2]);
//////////////////////////////////////
// cuda stuff start here
// Allocating device memory for input matrices
cuda_ret = cudaMalloc((void **) &rows_d, (n_global+1)*sizeof(int));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for rows_d");
cuda_ret = cudaMalloc((void **) &cols_d, (nnz_global)*sizeof(int));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for cols_d");
cuda_ret = cudaMalloc((void **) &vals_d, (nnz_global)*sizeof(real));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for vals_d");
cuda_ret = cudaMalloc((void **) &v_d, (n_global)*sizeof(real));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for x_d");
cuda_ret = cudaMalloc((void **) &w_d, (n_global)*sizeof(real));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory for y_d");
// Copy the input matrices from the host memory to the device memory
cuda_ret = cudaMemcpy(rows_d, row_ptr, (n_global+1)*sizeof(int),cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix rows_d");
cuda_ret = cudaMemcpy(cols_d, col_idx, (nnz_global)*sizeof(int),cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix cols_d");
cuda_ret = cudaMemcpy(vals_d, val, (nnz_global)*sizeof(real),cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix vals_d");
cuda_ret = cudaMemcpy(v_d, v, (n_global)*sizeof(real),cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix x_d");
#ifdef USE_TEXTURE
cudaTextureDesc td;
memset(&td, 0, sizeof(td));
td.normalizedCoords = 0;
td.addressMode[0] = cudaAddressModeClamp;
td.readMode = cudaReadModeElementType;
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = v_d;
resDesc.res.linear.sizeInBytes = n_global*sizeof(real);
#ifdef DOUBLE
resDesc.res.linear.desc.f = cudaChannelFormatKindUnsigned;
resDesc.res.linear.desc.y = 32;
#else
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
#endif
resDesc.res.linear.desc.x = 32;
cudaTextureObject_t v_t;
cuda_ret = cudaCreateTextureObject(&v_t, &resDesc, &td, NULL);
if(cuda_ret != cudaSuccess) FATAL("Unable to create text memory v_t");
/*
cuda_ret = cudaBindTexture(NULL, v_t, v_d, n_global*sizeof(real));
//cuda_ret = cudaBindTexture(NULL, valTex, vals_d, nnz_global*sizeof(real));
*/
#endif
//cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
meanNnzPerRow = (real*) malloc(nStreams*sizeof(real));
sd = (real*) malloc(nStreams*sizeof(real ));
block = (dim3 *) malloc(nStreams*sizeof(dim3 ));
grid = (dim3 *) malloc(nStreams*sizeof(dim3 ));
sharedMemorySize = (size_t *) calloc(nStreams, sizeof(size_t));
for (int s=0; s<nStreams; ++s) {
block[s].x = 1;
block[s].y = 1;
block[s].z = 1;
grid[s].x = 1;
grid[s].y = 1;
grid[s].z = 1;
} // end for //
for (int s=0; s<nStreams; ++s) {
int nrows = starRow[s+1]-starRow[s];
/////////////////////////////////////////////////////
// determining the standard deviation of the nnz per row
real *temp=(real *) calloc(nrows,sizeof(real));
for (int row=starRow[s], i=0; row<starRow[s]+nrows; ++row, ++i) {
temp[i] = row_ptr[row+1] - row_ptr[row];
} // end for //
meanAndSd(&meanNnzPerRow[s],&sd[s],temp, nrows);
//printf("file: %s, line: %d, gpu on-prcoc: %d, mean: %7.3f, sd: %7.3f using: %s\n", __FILE__, __LINE__, s , meanNnzPerRow[s], sd[s], (meanNnzPerRow[s] + 0.5*sd[s] < 32) ? "spmv0": "spmv1" );
free(temp);
/////////////////////////////////////////////////////
//cuda_ret = cudaStreamCreateWithFlags(&stream0[gpu], cudaStreamDefault);
cuda_ret = cudaStreamCreateWithFlags(&stream[s], cudaStreamNonBlocking ) ;
if(cuda_ret != cudaSuccess) FATAL("Unable to create stream0 ");
printf("In Stream: %d\n",s);
/*
if (meanNnzPerRow[s] < 10 && parameter2Adjust*sd[s] < warpSize) {
// these mean use scalar spmv
if (meanNnzPerRow[s] < (real) 4.5) {
block[s].x=128;
} else if (meanNnzPerRow[s] < (real) 14.4) {
block[s].x=64;
} else {
block[s].x=32;
} // end if //
grid[s].x = ( ( nrows + block[s].x -1) /block[s].x );
printf("using scalar spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block[s].x,block[s].y, meanNnzPerRow[s], sd[s]) ;
} else {
// these mean use vector spmv
if (meanNnzPerRow[s] > 10.0*warpSize) {
block[s].x=2*warpSize;
} else if (meanNnzPerRow[s] > 4.0*warpSize) {
block[s].x=warpSize/2;
} else {
block[s].x=warpSize/4;
} // end if //
block[s].y=MAXTHREADS/block[s].x;
grid[s].x = ( (nrows + block[s].y - 1) / block[s].y ) ;
sharedMemorySize[s]=block[s].x*block[s].y*sizeof(real);
printf("using vector spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block[s].x,block[s].y, meanNnzPerRow[s], sd[s]) ;
} // end if //
*/
// these mean use vector spmv
real limit=meanNnzPerRow[s] + parameter2Adjust*sd[s];
if ( limit < 4.5 ) {
block[s].x=warpSize/32;
} else if (limit < 6.95 ) {
block[s].x=warpSize/16;
} else if (limit < 15.5 ) {
block[s].x=warpSize/8;
} else if (limit < 74.0 ) {
block[s].x=warpSize/4;
} else if (limit < 300.0 ) {
block[s].x=warpSize/2;
} else if (limit < 350.0 ) {
block[s].x=warpSize;
} else if (limit < 1000.0 ) {
block[s].x=warpSize*2;
} else if (limit < 2000.0 ) {
block[s].x=warpSize*4;
} else if (limit < 3000.0 ) {
block[s].x=warpSize*8;
} else {
block[s].x=warpSize*16;
} // end if //
if (block[s].x > MAXTHREADS) {
block[s].x=512;
block[s].y=1;
} else {
block[s].y=MAXTHREADS/block[s].x;
} // end if //
grid[s].x = ( (nrows + block[s].y - 1) / block[s].y ) ;
sharedMemorySize[s]=block[s].x*block[s].y*sizeof(real);
printf("using vector spmv for on matrix, blockSize: [%d, %d] %f, %f\n",block[s].x,block[s].y, meanNnzPerRow[s], sd[s]) ;
} // end for //
// Timing should begin here//
struct timeval tp; // timer
double elapsed_time;
gettimeofday(&tp,NULL); // Unix timer
elapsed_time = -(tp.tv_sec*1.0e6 + tp.tv_usec);
for (int t=0; t<REP; ++t) {
//cuda_ret = cudaMemset(w_d, 0, (size_t) n_global*sizeof(real) );
//if(cuda_ret != cudaSuccess) FATAL("Unable to set device for matrix w_d");
for (int s=0; s<nStreams; ++s) {
const int sRow = starRow[s];
const int nrows = starRow[s+1]-starRow[s];
#ifdef USE_TEXTURE
spmv<<<grid[s], block[s], sharedMemorySize[s], stream[s] >>>((w_d+sRow), v_t, vals_d, (rows_d+sRow), (cols_d), nrows, 1.0,0.0);
#else
spmv<<<grid[s], block[s], sharedMemorySize[s], stream[s] >>>((w_d+sRow), v_d, vals_d, (rows_d+sRow), (cols_d), nrows, 1.0,0.0);
#endif
} // end for //
for (int s=0; s<nStreams; ++s) {
//cudaStreamSynchronize(NULL);
cudaStreamSynchronize(stream[s]);
} // end for //
} // end for //
gettimeofday(&tp,NULL);
elapsed_time += (tp.tv_sec*1.0e6 + tp.tv_usec);
printf ("Total time was %f seconds, GFLOPS: %f, GBytes/s: %f\n", elapsed_time*1.0e-6,
(2.0*nnz_global+ 3.0*n_global)*REP*1.0e-3/elapsed_time,
(nnz_global*(2*sizeof(real) + sizeof(int))+n_global*(sizeof(real)+sizeof(int)))*REP*1.0e-3/elapsed_time );
cuda_ret = cudaMemcpy(w, w_d, (n_global)*sizeof(real),cudaMemcpyDeviceToHost);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device matrix y_d back to host");
// cuda stuff ends here
//////////////////////////////////////
if (checkSol=='t') {
real *sol=NULL;
sol = (real *) malloc((n_global)*sizeof(real));
// reading input vector
vectorReader(sol, &n_global, argv[3]);
int row=0;
real tolerance = 1.0e-08;
if (sizeof(real) != sizeof(double) ) {
tolerance = 1.0e-02;
} // end if //
real error;
do {
error = fabs(sol[row] - w[row]) /fabs(sol[row]);
if ( error > tolerance ) break;
++row;
} while (row < n_global); // end do-while //
if (row == n_global) {
printf("Solution match in GPU\n");
} else {
printf("For Matrix %s, solution does not match at element %d in GPU %20.13e --> %20.13e error -> %20.13e, tolerance: %20.13e \n",
argv[1], (row+1), sol[row], w[row], error , tolerance );
} // end if //
free(sol);
} // end if //
free(w);
free(v);
#ifdef USE_TEXTURE
cudaDestroyTextureObject(v_t);
#endif
#include "parallelSpmvCleanData.h"
return 0;
} // end main() //
|
ca99d9ad9fd4087f130abcaaed00cc53235e7656.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "../ginkgo/GOrder.h"
#include "../include/lglist.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
const int MAX_LENGTH = 20;
typedef gpu_ginkgo::Order gorder;
__device__ void printList(gpu_linearized_stl::list<gpu_ginkgo::Order, MAX_LENGTH> &ol){
if(ol.empty()){
printf("\n===== This is an empty order list =====\n\n");
return;
}
for(auto p=ol.begin(); p!=ol.end(); ol.increment(p)) ol.at(p).showOrderInfo();
printf("\n");
}
__global__ void test(){
gpu_linearized_stl::list<gpu_ginkgo::Order, MAX_LENGTH> ol;
printList(ol);
//
printf("Adding New Order\n");
ol.push_back(gorder(1024, 15, 100, 0));
printf("ol.push_back(gorder(1024, 15, 100, 0));\n");
printList(ol);
//
printf("Adding New Order\n");
ol.push_back(gorder(1024, 15, 101, 15));
printf("ol.push_back(gorder(1024, 15, 101, 15));\n");
printList(ol);
//
printf("Adding New Order\n");
ol.push_back(gorder(1024, 15, 102, 30));
printf("ol.push_back(gorder(1024, 15, 102, 30));\n");
printList(ol);
//
printf("Adding New Order\n");
ol.push_back(gorder(1024, 15, 103, 45));
printf("ol.push_back(gorder(1024, 15, 103, 45));\n");
printList(ol);
//
int p = ol.begin();
printf("Acking The first Order\n");
ol.at(p).getAcked(25);
printf("int p = ol.begin();ol.at(p).getAcked(25);");
printList(ol);
//
ol.at(p).qUpdateAgainstBU(50);
ol.increment(p);
ol.at(p).getAcked(50);
printf("ol.at(p).qUpdateAgainstBU(50);ol.increment(p);ol.at(p).getAcked(50);");
printList(ol);
//
ol.at(ol.begin()).qUpdateAgainstBU(75);
ol.at(p).qUpdateAgainstBU(75);
ol.increment(p);
ol.at(p).getAcked(75);
printf("ol.at(p).qUpdateAgainstBU(75);ol.increment(p);ol.at(p).getAcked(75);");
printList(ol);
//
int j = ol.begin();
for(int i=0;i<3;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(100);
ol.at(j).getAcked(100);
printf("for(int i=0, j= ol.begin();i<3;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(100);ol.at(j).getAcked(100);");
printList(ol);
//
printf("Book Volume increases to 150\n");
j = ol.begin();
for(int i=0;i<4;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(150);
printf("for(int i=0;i<4;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(150);");
printList(ol);
//
printf("Book Volume decreases to 45\n");
j = ol.begin();
for(int i=0;i<4;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(45);
printf("for(int i=0;i<4;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(45);");
printList(ol);
//
printf("A trade of 60 with price == 1024 is coming against us!!!\n");
int filled_qty = 0, filled_pnl = 0, dq = 0, dqs = 0, tz = 60, prc = 1024;
int bz = 0;
for(j=ol.begin(); j!=ol.end();){
if(ol.at(j).price != prc){
ol.increment(j);
continue;
}
ol.at(j).qUpdateAgainstTrade(dq, dqs);
if(ol.at(j).filledAgainstTrade(tz, filled_qty, filled_pnl, dq, dqs, 1024, bz)){
j = ol.erase(j);
}
else ol.increment(j);
}
printList(ol);
}
int main(){
def_dvec(float) dev_out(1, 0);
hipLaunchKernelGGL(( test), dim3(1), dim3(1), 0, 0, );
return 0;
}
|
ca99d9ad9fd4087f130abcaaed00cc53235e7656.cu
|
#include <iostream>
#include "../ginkgo/GOrder.h"
#include "../include/lglist.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
const int MAX_LENGTH = 20;
typedef gpu_ginkgo::Order gorder;
__device__ void printList(gpu_linearized_stl::list<gpu_ginkgo::Order, MAX_LENGTH> &ol){
if(ol.empty()){
printf("\n===== This is an empty order list =====\n\n");
return;
}
for(auto p=ol.begin(); p!=ol.end(); ol.increment(p)) ol.at(p).showOrderInfo();
printf("\n");
}
__global__ void test(){
gpu_linearized_stl::list<gpu_ginkgo::Order, MAX_LENGTH> ol;
printList(ol);
//
printf("Adding New Order\n");
ol.push_back(gorder(1024, 15, 100, 0));
printf("ol.push_back(gorder(1024, 15, 100, 0));\n");
printList(ol);
//
printf("Adding New Order\n");
ol.push_back(gorder(1024, 15, 101, 15));
printf("ol.push_back(gorder(1024, 15, 101, 15));\n");
printList(ol);
//
printf("Adding New Order\n");
ol.push_back(gorder(1024, 15, 102, 30));
printf("ol.push_back(gorder(1024, 15, 102, 30));\n");
printList(ol);
//
printf("Adding New Order\n");
ol.push_back(gorder(1024, 15, 103, 45));
printf("ol.push_back(gorder(1024, 15, 103, 45));\n");
printList(ol);
//
int p = ol.begin();
printf("Acking The first Order\n");
ol.at(p).getAcked(25);
printf("int p = ol.begin();ol.at(p).getAcked(25);");
printList(ol);
//
ol.at(p).qUpdateAgainstBU(50);
ol.increment(p);
ol.at(p).getAcked(50);
printf("ol.at(p).qUpdateAgainstBU(50);ol.increment(p);ol.at(p).getAcked(50);");
printList(ol);
//
ol.at(ol.begin()).qUpdateAgainstBU(75);
ol.at(p).qUpdateAgainstBU(75);
ol.increment(p);
ol.at(p).getAcked(75);
printf("ol.at(p).qUpdateAgainstBU(75);ol.increment(p);ol.at(p).getAcked(75);");
printList(ol);
//
int j = ol.begin();
for(int i=0;i<3;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(100);
ol.at(j).getAcked(100);
printf("for(int i=0, j= ol.begin();i<3;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(100);ol.at(j).getAcked(100);");
printList(ol);
//
printf("Book Volume increases to 150\n");
j = ol.begin();
for(int i=0;i<4;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(150);
printf("for(int i=0;i<4;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(150);");
printList(ol);
//
printf("Book Volume decreases to 45\n");
j = ol.begin();
for(int i=0;i<4;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(45);
printf("for(int i=0;i<4;i++,ol.increment(j)) ol.at(j).qUpdateAgainstBU(45);");
printList(ol);
//
printf("A trade of 60 with price == 1024 is coming against us!!!\n");
int filled_qty = 0, filled_pnl = 0, dq = 0, dqs = 0, tz = 60, prc = 1024;
int bz = 0;
for(j=ol.begin(); j!=ol.end();){
if(ol.at(j).price != prc){
ol.increment(j);
continue;
}
ol.at(j).qUpdateAgainstTrade(dq, dqs);
if(ol.at(j).filledAgainstTrade(tz, filled_qty, filled_pnl, dq, dqs, 1024, bz)){
j = ol.erase(j);
}
else ol.increment(j);
}
printList(ol);
}
int main(){
def_dvec(float) dev_out(1, 0);
test<<<1, 1>>>();
return 0;
}
|
e2f9fb8fb17163c1f2ddc3bb7bcc457baaad06b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include<time.h>
__global__ void device_mulmatrix(int **dev_matrixA, int **dev_matrixB, int **dev_matrixRES)
{
int x,y;
x= blockIdx.x;
y= blockIdx.y;
int i;
dev_matrixRES[x][y]=0;
for(i=0;i<100;i++){
dev_matrixRES[x][y]+=dev_matrixA[x][i]*dev_matrixB[i][x];
}
}
int main(void)
{
clock_t begin, end;
double time_spent;
begin = clock(); //End of begin time measurement routine here...
int *dev_matrixA, *dev_matrixB,*dev_matrixRES;
int **host_matA, **host_matB;
int i,j;
host_matA= (int **) malloc(sizeof(int)*100*100);
host_matB= (int **) malloc(sizeof(int)*100*100);
for(i=0;i<100;i++){
for(j=0;j<100;j++){
host_matA[100][100]= 2*i+j;
host_matB[100][100]=2*j+i*i;
}
}
hipMalloc((void**)&dev_matrixA,sizeof(int)*100*100);
hipMalloc((void**)&dev_matrixB,sizeof(int)*100*100);
hipMalloc((void**)&dev_matrixRES,sizeof(int)*100*100);
dim3 grid(100,100);
hipMemcpy((int *)host_matA ,dev_matrixA,sizeof(int)*100*100,hipMemcpyHostToDevice);
hipMemcpy((int *)host_matA,dev_matrixA,sizeof(int)*100*100,cudaMemcpyHosttoDevice);
hipLaunchKernelGGL(( device_mulmatrix), dim3(grid),dim3(1), 0, 0, dev_matrixA,dev_matrixB, dev_matrixRES);
//Measure end time and hence time req for app execution
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Execution time: %lf",time_spent);
return 0;
}
|
e2f9fb8fb17163c1f2ddc3bb7bcc457baaad06b1.cu
|
#include <stdio.h>
#include<time.h>
__global__ void device_mulmatrix(int **dev_matrixA, int **dev_matrixB, int **dev_matrixRES)
{
int x,y;
x= blockIdx.x;
y= blockIdx.y;
int i;
dev_matrixRES[x][y]=0;
for(i=0;i<100;i++){
dev_matrixRES[x][y]+=dev_matrixA[x][i]*dev_matrixB[i][x];
}
}
int main(void)
{
clock_t begin, end;
double time_spent;
begin = clock(); //End of begin time measurement routine here...
int *dev_matrixA, *dev_matrixB,*dev_matrixRES;
int **host_matA, **host_matB;
int i,j;
host_matA= (int **) malloc(sizeof(int)*100*100);
host_matB= (int **) malloc(sizeof(int)*100*100);
for(i=0;i<100;i++){
for(j=0;j<100;j++){
host_matA[100][100]= 2*i+j;
host_matB[100][100]=2*j+i*i;
}
}
cudaMalloc((void**)&dev_matrixA,sizeof(int)*100*100);
cudaMalloc((void**)&dev_matrixB,sizeof(int)*100*100);
cudaMalloc((void**)&dev_matrixRES,sizeof(int)*100*100);
dim3 grid(100,100);
cudaMemcpy((int *)host_matA ,dev_matrixA,sizeof(int)*100*100,cudaMemcpyHostToDevice);
cudaMemcpy((int *)host_matA,dev_matrixA,sizeof(int)*100*100,cudaMemcpyHosttoDevice);
device_mulmatrix<<<grid,1>>>(dev_matrixA,dev_matrixB, dev_matrixRES);
//Measure end time and hence time req for app execution
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Execution time: %lf",time_spent);
return 0;
}
|
9456d6b0b1f244bf7a1ab9f1f0e0c7076a1b5e8b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int main(int argc, char *argv[])
{
if(argc != 2)
exit(2);
int N = atoi(argv[1]);
int first[N][N], second[N][N], multiply[N][N];
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
first[i][j] = rand()%10;
second[i][j] = rand()%10;
multiply[i][j] = 1;
}
}
//clock_t begin = clock();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
int sum = 0;
for (int k = 0; k < N; k++)
sum += first[i][k] * second[k][j];
multiply[i][j] = sum;
}
}
//clock_t end = clock();
//double time_spent = (double)(end - begin) / (CLOCKS_PER_SEC / 1000);
//fprintf(stdout, "%f", time_spent);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
fprintf(stdout, "%f", milliseconds);
return 0;
}
|
9456d6b0b1f244bf7a1ab9f1f0e0c7076a1b5e8b.cu
|
#include <assert.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int main(int argc, char *argv[])
{
if(argc != 2)
exit(2);
int N = atoi(argv[1]);
int first[N][N], second[N][N], multiply[N][N];
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
first[i][j] = rand()%10;
second[i][j] = rand()%10;
multiply[i][j] = 1;
}
}
//clock_t begin = clock();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
int sum = 0;
for (int k = 0; k < N; k++)
sum += first[i][k] * second[k][j];
multiply[i][j] = sum;
}
}
//clock_t end = clock();
//double time_spent = (double)(end - begin) / (CLOCKS_PER_SEC / 1000);
//fprintf(stdout, "%f", time_spent);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
fprintf(stdout, "%f", milliseconds);
return 0;
}
|
24dfe6378283e24b58767414e3fab584be86e886.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Sample code for edge softmax.*/
#include <iostream>
#include <cstdlib>
#include <limits>
#include <time.h>
#include <hip/hip_runtime.h>
#include <minigun/minigun.h>
#include "../samples_utils.h"
#include "../samples_io.h"
struct EdgeMax;
struct MinusMaxExpSum;
struct GData {
int32_t dim = 0;
float* sum{nullptr}; // ndata
float* max{nullptr}; // ndata
float* score{nullptr};
int* eid_mapping{nullptr};
};
// Max
struct EdgeMax {
static __device__ __forceinline__ void ApplyEdge(
int32_t src, int32_t dst, int32_t eid, GData* gdata) {}
static __device__ __forceinline__ void ApplyEdgeReduce(
int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float& val, GData* gdata) {
const int32_t dim = gdata->dim;
val = max(val, gdata->score[gdata->eid_mapping[eid] * dim + feat_idx]);
}
static __device__ __forceinline__ int32_t GetFeatSize(GData* gdata) {
return gdata->dim;
}
static __device__ __forceinline__ float* GetOutBuf(GData* gdata) {
return gdata->max;
}
};
// minus max, exp and sum
struct MinuxMaxExpSum {
static __device__ __forceinline__ void ApplyEdge(
int32_t src, int32_t dst, int32_t eid, GData* gdata) {}
static __device__ __forceinline__ void ApplyEdgeReduce(
int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float& val, GData* gdata) {
const int dim = gdata->dim;
gdata->score[gdata->eid_mapping[eid] * dim + feat_idx] =
expf(gdata->score[gdata->eid_mapping[eid] * dim + feat_idx] - gdata->max[dst * dim + feat_idx]);
val += gdata->score[gdata->eid_mapping[eid] * dim + feat_idx];
}
static __device__ __forceinline__ int32_t GetFeatSize(GData* gdata) {
return gdata->dim;
}
static __device__ __forceinline__ float* GetOutBuf(GData* gdata) {
return gdata->sum;
}
};
// norm
struct Norm {
static __device__ __forceinline__ void ApplyEdge(
int32_t src, int32_t dst, int32_t eid, GData* gdata) {
int32_t tx = blockIdx.x * blockDim.x + threadIdx.x;
int32_t stride_x = blockDim.x * gridDim.x;
const int32_t dim = gdata->dim;
while (tx < dim) {
gdata->score[eid * dim + tx] /= gdata->sum[dst * dim + tx];
tx += stride_x;
}
}
static __device__ __forceinline__ void ApplyEdgeReduce(
int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float& val, GData* gdata) {}
static __device__ __forceinline__ int32_t GetFeatSize(GData* gdata) {
return -1;
}
static __device__ __forceinline__ float* GetOutBuf(GData* gdata) {
return nullptr;
}
};
const int32_t D = 8; // number of heads
std::vector<float> GroundTruth(
const std::vector<int32_t>& row_offsets,
const std::vector<int32_t>& column_indices,
std::vector<float> score) {
const size_t N = row_offsets.size() - 1;
std::vector<float> tmp(N * D, 0.);
for (size_t i = 0; i < score.size(); ++i) {
score[i] = ::exp(score[i]);
}
for (size_t u = 0; u < row_offsets.size() - 1; ++u) {
for (int32_t eid = row_offsets[u]; eid < row_offsets[u+1]; ++eid) {
int32_t v = column_indices[eid];
for (int32_t idx = 0; idx < D; ++idx) {
tmp[v * D + idx] += score[eid * D + idx];
}
}
}
for (size_t eid = 0; eid < column_indices.size(); ++eid) {
for (int32_t i = 0; i < D; ++i) {
score[eid * D + i] /= tmp[column_indices[eid] * D + i];
}
}
return score;
}
int main(int argc, char** argv) {
srand(42);
// create graph
std::vector<int32_t> row_offsets, column_indices;
utils::CreateNPGraph(1000, 0.01, row_offsets, column_indices);
const int32_t N = row_offsets.size() - 1;
const int32_t M = column_indices.size();
std::cout << "#nodes: " << N << " #edges: " << M
<< " #feats: " << D << std::endl;
// copy graph to gpu
CUDA_CALL(hipSetDevice(0));
minigun::IntCsr csr;
csr.row_offsets.length = row_offsets.size();
CUDA_CALL(hipMalloc(&csr.row_offsets.data, sizeof(int32_t) * row_offsets.size()));
CUDA_CALL(hipMemcpy(csr.row_offsets.data, &row_offsets[0],
sizeof(int32_t) * row_offsets.size(), hipMemcpyHostToDevice));
csr.column_indices.length = column_indices.size();
CUDA_CALL(hipMalloc(&csr.column_indices.data, sizeof(int32_t) * column_indices.size()));
CUDA_CALL(hipMemcpy(csr.column_indices.data, &column_indices[0],
sizeof(int32_t) * column_indices.size(), hipMemcpyHostToDevice));
// Create raw eid_mapping
minigun::IntArray csr_mapping = utils::arange(0, M, kDLGPU);
// Create csr_t and coo
minigun::IntCsr csr_t;
auto pack = utils::ToReverseCsr(csr, csr_mapping, kDLGPU);
csr_t = pack.first;
minigun::IntArray csr_t_mapping = pack.second;
minigun::IntCoo coo;
coo = utils::ToCoo(csr, kDLGPU);
minigun::IntSpMat spmat = {&csr, &coo, &csr_t};
// Create stream
minigun::advance::RuntimeConfig config;
config.ctx = {kDLGPU, 0};
int nt = utils::_FindNumThreads(D, 32);
config.data_num_threads = nt;
config.data_num_blocks = (M + nt - 1) / nt;
CUDA_CALL(hipStreamCreate(&config.stream));
// Create feature data
std::vector<float> vvec(N * D), evec(M * D);
for (int32_t i = 0; i < N * D; ++i) {
vvec[i] = std::numeric_limits<float>::lowest();
}
for (int32_t i = 0; i < M * D; ++i) {
evec[i] = (float)rand() / RAND_MAX - 0.5;
}
//utils::VecPrint(evec);
// Copy feature data to gpu
GData gdata;
gdata.dim = D;
CUDA_CALL(hipMalloc(&gdata.sum, sizeof(float) * N * D));
CUDA_CALL(hipMemset(gdata.sum, 0, sizeof(float) * N * D));
CUDA_CALL(hipMalloc(&gdata.max, sizeof(float) * N * D));
CUDA_CALL(hipMemcpy(gdata.max, &vvec[0], sizeof(float) * N * D, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&gdata.score, sizeof(float) * M * D));
CUDA_CALL(hipMemcpy(gdata.score, &evec[0], sizeof(float) * M * D, hipMemcpyHostToDevice));
gdata.eid_mapping = csr_t_mapping.data;
CUDA_CALL(hipDeviceSynchronize());
// Compute ground truth
std::vector<float> truth = GroundTruth(row_offsets, column_indices, evec);
//utils::VecPrint(truth);
typedef minigun::advance::Config<minigun::advance::kDst> ConfigDst;
typedef minigun::advance::Config<minigun::advance::kEdge> ConfigEdge;
minigun::advance::Advance<kDLGPU, int32_t, float, ConfigDst, GData, EdgeMax>(
config, spmat, &gdata);
minigun::advance::Advance<kDLGPU, int32_t, float, ConfigDst, GData, MinuxMaxExpSum>(
config, spmat, &gdata);
minigun::advance::Advance<kDLGPU, int32_t, float, ConfigEdge, GData, Norm>(
config, spmat, &gdata);
CUDA_CALL(hipDeviceSynchronize());
// verify output
std::vector<float> rst(M * D);
CUDA_CALL(hipMemcpy(&rst[0], gdata.score, sizeof(float) * M * D, hipMemcpyDeviceToHost));
//utils::VecPrint(rst);
std::cout << "Correct? " << utils::VecEqual(truth, rst) << std::endl;
// free
return 0;
}
|
24dfe6378283e24b58767414e3fab584be86e886.cu
|
/* Sample code for edge softmax.*/
#include <iostream>
#include <cstdlib>
#include <limits>
#include <time.h>
#include <cuda_runtime.h>
#include <minigun/minigun.h>
#include "../samples_utils.h"
#include "../samples_io.h"
struct EdgeMax;
struct MinusMaxExpSum;
struct GData {
int32_t dim = 0;
float* sum{nullptr}; // ndata
float* max{nullptr}; // ndata
float* score{nullptr};
int* eid_mapping{nullptr};
};
// Max
struct EdgeMax {
static __device__ __forceinline__ void ApplyEdge(
int32_t src, int32_t dst, int32_t eid, GData* gdata) {}
static __device__ __forceinline__ void ApplyEdgeReduce(
int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float& val, GData* gdata) {
const int32_t dim = gdata->dim;
val = max(val, gdata->score[gdata->eid_mapping[eid] * dim + feat_idx]);
}
static __device__ __forceinline__ int32_t GetFeatSize(GData* gdata) {
return gdata->dim;
}
static __device__ __forceinline__ float* GetOutBuf(GData* gdata) {
return gdata->max;
}
};
// minus max, exp and sum
struct MinuxMaxExpSum {
static __device__ __forceinline__ void ApplyEdge(
int32_t src, int32_t dst, int32_t eid, GData* gdata) {}
static __device__ __forceinline__ void ApplyEdgeReduce(
int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float& val, GData* gdata) {
const int dim = gdata->dim;
gdata->score[gdata->eid_mapping[eid] * dim + feat_idx] =
expf(gdata->score[gdata->eid_mapping[eid] * dim + feat_idx] - gdata->max[dst * dim + feat_idx]);
val += gdata->score[gdata->eid_mapping[eid] * dim + feat_idx];
}
static __device__ __forceinline__ int32_t GetFeatSize(GData* gdata) {
return gdata->dim;
}
static __device__ __forceinline__ float* GetOutBuf(GData* gdata) {
return gdata->sum;
}
};
// norm
struct Norm {
static __device__ __forceinline__ void ApplyEdge(
int32_t src, int32_t dst, int32_t eid, GData* gdata) {
int32_t tx = blockIdx.x * blockDim.x + threadIdx.x;
int32_t stride_x = blockDim.x * gridDim.x;
const int32_t dim = gdata->dim;
while (tx < dim) {
gdata->score[eid * dim + tx] /= gdata->sum[dst * dim + tx];
tx += stride_x;
}
}
static __device__ __forceinline__ void ApplyEdgeReduce(
int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float& val, GData* gdata) {}
static __device__ __forceinline__ int32_t GetFeatSize(GData* gdata) {
return -1;
}
static __device__ __forceinline__ float* GetOutBuf(GData* gdata) {
return nullptr;
}
};
const int32_t D = 8; // number of heads
std::vector<float> GroundTruth(
const std::vector<int32_t>& row_offsets,
const std::vector<int32_t>& column_indices,
std::vector<float> score) {
const size_t N = row_offsets.size() - 1;
std::vector<float> tmp(N * D, 0.);
for (size_t i = 0; i < score.size(); ++i) {
score[i] = std::exp(score[i]);
}
for (size_t u = 0; u < row_offsets.size() - 1; ++u) {
for (int32_t eid = row_offsets[u]; eid < row_offsets[u+1]; ++eid) {
int32_t v = column_indices[eid];
for (int32_t idx = 0; idx < D; ++idx) {
tmp[v * D + idx] += score[eid * D + idx];
}
}
}
for (size_t eid = 0; eid < column_indices.size(); ++eid) {
for (int32_t i = 0; i < D; ++i) {
score[eid * D + i] /= tmp[column_indices[eid] * D + i];
}
}
return score;
}
int main(int argc, char** argv) {
srand(42);
// create graph
std::vector<int32_t> row_offsets, column_indices;
utils::CreateNPGraph(1000, 0.01, row_offsets, column_indices);
const int32_t N = row_offsets.size() - 1;
const int32_t M = column_indices.size();
std::cout << "#nodes: " << N << " #edges: " << M
<< " #feats: " << D << std::endl;
// copy graph to gpu
CUDA_CALL(cudaSetDevice(0));
minigun::IntCsr csr;
csr.row_offsets.length = row_offsets.size();
CUDA_CALL(cudaMalloc(&csr.row_offsets.data, sizeof(int32_t) * row_offsets.size()));
CUDA_CALL(cudaMemcpy(csr.row_offsets.data, &row_offsets[0],
sizeof(int32_t) * row_offsets.size(), cudaMemcpyHostToDevice));
csr.column_indices.length = column_indices.size();
CUDA_CALL(cudaMalloc(&csr.column_indices.data, sizeof(int32_t) * column_indices.size()));
CUDA_CALL(cudaMemcpy(csr.column_indices.data, &column_indices[0],
sizeof(int32_t) * column_indices.size(), cudaMemcpyHostToDevice));
// Create raw eid_mapping
minigun::IntArray csr_mapping = utils::arange(0, M, kDLGPU);
// Create csr_t and coo
minigun::IntCsr csr_t;
auto pack = utils::ToReverseCsr(csr, csr_mapping, kDLGPU);
csr_t = pack.first;
minigun::IntArray csr_t_mapping = pack.second;
minigun::IntCoo coo;
coo = utils::ToCoo(csr, kDLGPU);
minigun::IntSpMat spmat = {&csr, &coo, &csr_t};
// Create stream
minigun::advance::RuntimeConfig config;
config.ctx = {kDLGPU, 0};
int nt = utils::_FindNumThreads(D, 32);
config.data_num_threads = nt;
config.data_num_blocks = (M + nt - 1) / nt;
CUDA_CALL(cudaStreamCreate(&config.stream));
// Create feature data
std::vector<float> vvec(N * D), evec(M * D);
for (int32_t i = 0; i < N * D; ++i) {
vvec[i] = std::numeric_limits<float>::lowest();
}
for (int32_t i = 0; i < M * D; ++i) {
evec[i] = (float)rand() / RAND_MAX - 0.5;
}
//utils::VecPrint(evec);
// Copy feature data to gpu
GData gdata;
gdata.dim = D;
CUDA_CALL(cudaMalloc(&gdata.sum, sizeof(float) * N * D));
CUDA_CALL(cudaMemset(gdata.sum, 0, sizeof(float) * N * D));
CUDA_CALL(cudaMalloc(&gdata.max, sizeof(float) * N * D));
CUDA_CALL(cudaMemcpy(gdata.max, &vvec[0], sizeof(float) * N * D, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&gdata.score, sizeof(float) * M * D));
CUDA_CALL(cudaMemcpy(gdata.score, &evec[0], sizeof(float) * M * D, cudaMemcpyHostToDevice));
gdata.eid_mapping = csr_t_mapping.data;
CUDA_CALL(cudaDeviceSynchronize());
// Compute ground truth
std::vector<float> truth = GroundTruth(row_offsets, column_indices, evec);
//utils::VecPrint(truth);
typedef minigun::advance::Config<minigun::advance::kDst> ConfigDst;
typedef minigun::advance::Config<minigun::advance::kEdge> ConfigEdge;
minigun::advance::Advance<kDLGPU, int32_t, float, ConfigDst, GData, EdgeMax>(
config, spmat, &gdata);
minigun::advance::Advance<kDLGPU, int32_t, float, ConfigDst, GData, MinuxMaxExpSum>(
config, spmat, &gdata);
minigun::advance::Advance<kDLGPU, int32_t, float, ConfigEdge, GData, Norm>(
config, spmat, &gdata);
CUDA_CALL(cudaDeviceSynchronize());
// verify output
std::vector<float> rst(M * D);
CUDA_CALL(cudaMemcpy(&rst[0], gdata.score, sizeof(float) * M * D, cudaMemcpyDeviceToHost));
//utils::VecPrint(rst);
std::cout << "Correct? " << utils::VecEqual(truth, rst) << std::endl;
// free
return 0;
}
|
c54bbcdba4ef92ca0a1440e5674e15296652bdc4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <time.h>
#include <math.h>
#include <string.h>
static void HandleError(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define WIDTH 256
#define HEIGHT 256
#define MR 3
#define MC 3
#define MASK_N 3
const int N = 1024;
const int threadsPerBlock = 1024;
const int blocksperGrid = 5;
int MaskSobelX[3][3] = { { -1,0,1 },
{ -2,0,2 },
{ -1,0,1 } };
int MaskSobelY[3][3] = { { 1,2,1 },
{ 0,0,0 },
{ -1,-2,-1 } };
__global__ void Sobel_Conv(unsigned char *d_InImg, int *d_ConvX, int *d_ConvY, int width, int height, int mr, int mc, int size) {
int outputX = 0, outputY = 0;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int i, j;
int MaskSobelX[3][3] = { { -1,0,1 },
{ -2,0,2 },
{ -1,0,1 } };
int MaskSobelY[3][3] = { { 1,2,1 },
{ 0,0,0 },
{ -1,-2,-1 } };
if ((0 < row && row < height -1) && (0 < col && col < width-1)) {
for (i = 0; i < mr; i++) {
for (j = 0; j < mc; j++) {
outputX += MaskSobelX[i][j] * d_InImg[(row + i - 1) * width + (col + j - 1)];
outputY += MaskSobelY[i][j] * d_InImg[(row + i - 1) * width + (col + j - 1)];
}
}
d_ConvX[row*width + col] = outputX;
d_ConvY[row*width + col] = outputY;
}
else {
d_ConvX[row*width + col] = 0;
d_ConvY[row*width + col] = 0;
}
}
__global__ void Detect_Edge(unsigned char *d_OrgImg, int *d_ConvX, int *d_ConvY,int *d_pImgSobel ,int width, int height,int *d_min,int *d_max) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int temp1, temp2;
if ((row != 1 && row != height - 1) && (col != 1 && col != width - 1)) {
if (d_ConvX[row*width + col] < 0)
d_ConvX[row*width + col] = -d_ConvX[row*width + col];
if (d_ConvY[row*width + col] < 0)
d_ConvY[row*width + col] = -d_ConvY[row*width + col];
d_pImgSobel[row*width + col] = d_ConvX[row*width + col] + d_ConvY[row*width + col];
if (d_pImgSobel[row*width + col] < *d_min)
*d_min = d_pImgSobel[row*width + col];
if (d_pImgSobel[row*width + col] > *d_max)
*d_max = d_pImgSobel[row*width + col];
}
__syncthreads();
temp1 = (float)(255.0 / (*d_max - *d_min));
temp2 = (float)(-255.0**d_min / (*d_max - *d_min));
if ((row != 1 && row != height - 1) && (col != 1 && col != width - 1))
d_OrgImg[row*width + col] = (unsigned char)(temp1*d_pImgSobel[row*width + col] + temp2);
}
int main()
{
int min, max;
int *dev_min, *dev_max;
int *ConvX, *ConvY, *pImgSobel;
int *dev_ConvX, *dev_ConvY,*dev_pImgSobel;
float temp1, temp2;
unsigned char *InImg;
unsigned char *dev_InImg;
unsigned char *OrgImg;
unsigned char *dev_OrgImg;
int newValue, Xval, Yval;
int mr, mc;
clock_t PL_start, PL_end;
min = (int)10e10;
max = (int)-10e10;
InImg = (unsigned char*)malloc(sizeof(unsigned char)*(HEIGHT*WIDTH));
OrgImg = (unsigned char*)malloc(sizeof(unsigned char)*(HEIGHT*WIDTH));
FILE *infile = fopen("256x256.raw", "rb");
fread(InImg, sizeof(unsigned char), HEIGHT * WIDTH, infile);
fclose(infile);
pImgSobel = (int*)malloc(sizeof(int)*(HEIGHT*WIDTH));
ConvX = (int*)malloc(sizeof(int)*(HEIGHT*WIDTH));
ConvY = (int*)malloc(sizeof(int)*(HEIGHT*WIDTH));
memset(ConvX, 0, (HEIGHT*WIDTH) * sizeof(int));
memset(ConvY, 0, (HEIGHT*WIDTH) * sizeof(int));
memset(OrgImg, 0, (HEIGHT*WIDTH) * sizeof(unsigned char));
memset(pImgSobel, 0, (HEIGHT*WIDTH) * sizeof(int));
PL_start = clock();
HANDLE_ERROR(hipMalloc((void**)&dev_InImg, WIDTH * HEIGHT * sizeof(unsigned char)));
HANDLE_ERROR(hipMalloc((void**)&dev_ConvX, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_ConvY, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(hipMemcpy(dev_InImg, InImg, WIDTH * HEIGHT * sizeof(unsigned char), hipMemcpyHostToDevice));
Sobel_Conv << <blocksperGrid, threadsPerBlock >> > (dev_InImg, dev_ConvX, dev_ConvY, WIDTH, HEIGHT, MR, MC, WIDTH * HEIGHT);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(ConvX, dev_ConvX, WIDTH * HEIGHT * sizeof(int), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(ConvY, dev_ConvY, WIDTH * HEIGHT * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 1000; i < HEIGHT; i++)
for (int j = 1000; j < WIDTH; j++)
printf("[%d][%d] %d %d\n",i,j,ConvX[i*WIDTH + j],ConvY[i*WIDTH + j]);
/*for (int i = 1; i < HEIGHT - 1; i++) {
for (int j = 1; j < WIDTH - 1; j++) {
Xval = 0;
Yval = 0;
for (mr = 0; mr < MASK_N; mr++) {
for (mc = 0; mc < MASK_N; mc++) {
Xval += (MaskSobelX[mr][mc] * InImg[(i + mr - 1) * WIDTH + (j + mc - 1)]);
Yval += (MaskSobelY[mr][mc] * InImg[(i + mr - 1) * WIDTH + (j + mc - 1)]);
}
}
ConvX[i*WIDTH + j] = Xval;
ConvY[i*WIDTH + j] = Yval;
}
}*/
for (int i = 1; i < HEIGHT - 1; i++) {
for (int j = 1; j < WIDTH - 1; j++) {
temp1 = ConvX[i*WIDTH + j];
temp2 = ConvY[i*WIDTH + j];
if (temp1 < 0)
temp1 = -temp1;
if (temp2 < 0)
temp2 = -temp2;
pImgSobel[i*WIDTH + j] = temp1 + temp2;
}
}
//,
for (int i = 1; i < HEIGHT - 1; i++) {
for (int j = 1; j < WIDTH - 1; j++) {
if (pImgSobel[i*WIDTH + j]<min)
min = pImgSobel[i*WIDTH + j];
if (pImgSobel[i*WIDTH + j]>max)
max = pImgSobel[i*WIDTH + j];
}
}
//
temp1 = (float)(255.0 / (max - min));
temp2 = (float)(-255.0*min / (max - min));
for (int i = 1; i<HEIGHT - 1; i++) {
for (int j = 1; j<WIDTH - 1; j++) {
//[min,max] [0,255]
newValue = pImgSobel[i*WIDTH + j];
newValue = temp1 * newValue + temp2;
OrgImg[i*WIDTH + j] = (unsigned char)newValue;
}
}
/*HANDLE_ERROR(hipMalloc((void**)&dev_OrgImg, WIDTH * HEIGHT * sizeof(unsigned char)));
HANDLE_ERROR(hipMalloc((void**)&dev_ConvX, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_ConvY, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_pImgSobel, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_min, HEIGHT * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_max, HEIGHT * sizeof(int)));
HANDLE_ERROR(hipMemset(dev_min,0,sizeof(int)));
HANDLE_ERROR(hipMemset(dev_max, 0, sizeof(int)));
HANDLE_ERROR(hipMemcpy(dev_ConvX, ConvX, WIDTH * HEIGHT * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_ConvY, ConvY, WIDTH * HEIGHT * sizeof(int), hipMemcpyHostToDevice));
Detect_Edge << <HEIGHT, WIDTH>> > (dev_OrgImg, dev_ConvX, dev_ConvY, dev_pImgSobel, WIDTH, HEIGHT,dev_min,dev_max);
HANDLE_ERROR(hipMemcpy(OrgImg, dev_OrgImg, WIDTH * HEIGHT * sizeof(unsigned char), hipMemcpyDeviceToHost));*/
PL_end = clock();
FILE *outfile = fopen("CS.raw", "wb");
fwrite(OrgImg, sizeof(unsigned char), HEIGHT * WIDTH, outfile);
fclose(outfile);
printf("Finish\n");
printf("Block per Grid : %d\n",blocksperGrid);
printf("Thread per Block : %d\n",threadsPerBlock);
printf("Average Parallel Runtime : %f\n", (float)(PL_end - PL_start) / CLOCKS_PER_SEC);
hipFree(dev_ConvX);
hipFree(dev_ConvY);
//hipFree(dev_OrgImg);
//hipFree(dev_pImgSobel);
//hipFree(dev_min);
//hipFree(dev_max);
free(InImg);
free(OrgImg);
free(pImgSobel);
free(ConvX);
free(ConvY);
}
|
c54bbcdba4ef92ca0a1440e5674e15296652bdc4.cu
|
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <time.h>
#include <math.h>
#include <string.h>
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define WIDTH 256
#define HEIGHT 256
#define MR 3
#define MC 3
#define MASK_N 3
const int N = 1024;
const int threadsPerBlock = 1024;
const int blocksperGrid = 5;
int MaskSobelX[3][3] = { { -1,0,1 },
{ -2,0,2 },
{ -1,0,1 } };
int MaskSobelY[3][3] = { { 1,2,1 },
{ 0,0,0 },
{ -1,-2,-1 } };
__global__ void Sobel_Conv(unsigned char *d_InImg, int *d_ConvX, int *d_ConvY, int width, int height, int mr, int mc, int size) {
int outputX = 0, outputY = 0;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int i, j;
int MaskSobelX[3][3] = { { -1,0,1 },
{ -2,0,2 },
{ -1,0,1 } };
int MaskSobelY[3][3] = { { 1,2,1 },
{ 0,0,0 },
{ -1,-2,-1 } };
if ((0 < row && row < height -1) && (0 < col && col < width-1)) {
for (i = 0; i < mr; i++) {
for (j = 0; j < mc; j++) {
outputX += MaskSobelX[i][j] * d_InImg[(row + i - 1) * width + (col + j - 1)];
outputY += MaskSobelY[i][j] * d_InImg[(row + i - 1) * width + (col + j - 1)];
}
}
d_ConvX[row*width + col] = outputX;
d_ConvY[row*width + col] = outputY;
}
else {
d_ConvX[row*width + col] = 0;
d_ConvY[row*width + col] = 0;
}
}
__global__ void Detect_Edge(unsigned char *d_OrgImg, int *d_ConvX, int *d_ConvY,int *d_pImgSobel ,int width, int height,int *d_min,int *d_max) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int temp1, temp2;
if ((row != 1 && row != height - 1) && (col != 1 && col != width - 1)) {
if (d_ConvX[row*width + col] < 0)
d_ConvX[row*width + col] = -d_ConvX[row*width + col];
if (d_ConvY[row*width + col] < 0)
d_ConvY[row*width + col] = -d_ConvY[row*width + col];
d_pImgSobel[row*width + col] = d_ConvX[row*width + col] + d_ConvY[row*width + col];
if (d_pImgSobel[row*width + col] < *d_min)
*d_min = d_pImgSobel[row*width + col];
if (d_pImgSobel[row*width + col] > *d_max)
*d_max = d_pImgSobel[row*width + col];
}
__syncthreads();
temp1 = (float)(255.0 / (*d_max - *d_min));
temp2 = (float)(-255.0**d_min / (*d_max - *d_min));
if ((row != 1 && row != height - 1) && (col != 1 && col != width - 1))
d_OrgImg[row*width + col] = (unsigned char)(temp1*d_pImgSobel[row*width + col] + temp2);
}
int main()
{
int min, max;
int *dev_min, *dev_max;
int *ConvX, *ConvY, *pImgSobel;
int *dev_ConvX, *dev_ConvY,*dev_pImgSobel;
float temp1, temp2;
unsigned char *InImg;
unsigned char *dev_InImg;
unsigned char *OrgImg;
unsigned char *dev_OrgImg;
int newValue, Xval, Yval;
int mr, mc;
clock_t PL_start, PL_end;
min = (int)10e10;
max = (int)-10e10;
InImg = (unsigned char*)malloc(sizeof(unsigned char)*(HEIGHT*WIDTH));
OrgImg = (unsigned char*)malloc(sizeof(unsigned char)*(HEIGHT*WIDTH));
FILE *infile = fopen("256x256.raw", "rb");
fread(InImg, sizeof(unsigned char), HEIGHT * WIDTH, infile);
fclose(infile);
pImgSobel = (int*)malloc(sizeof(int)*(HEIGHT*WIDTH));
ConvX = (int*)malloc(sizeof(int)*(HEIGHT*WIDTH));
ConvY = (int*)malloc(sizeof(int)*(HEIGHT*WIDTH));
memset(ConvX, 0, (HEIGHT*WIDTH) * sizeof(int));
memset(ConvY, 0, (HEIGHT*WIDTH) * sizeof(int));
memset(OrgImg, 0, (HEIGHT*WIDTH) * sizeof(unsigned char));
memset(pImgSobel, 0, (HEIGHT*WIDTH) * sizeof(int));
PL_start = clock();
HANDLE_ERROR(cudaMalloc((void**)&dev_InImg, WIDTH * HEIGHT * sizeof(unsigned char)));
HANDLE_ERROR(cudaMalloc((void**)&dev_ConvX, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_ConvY, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(cudaMemcpy(dev_InImg, InImg, WIDTH * HEIGHT * sizeof(unsigned char), cudaMemcpyHostToDevice));
Sobel_Conv << <blocksperGrid, threadsPerBlock >> > (dev_InImg, dev_ConvX, dev_ConvY, WIDTH, HEIGHT, MR, MC, WIDTH * HEIGHT);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(ConvX, dev_ConvX, WIDTH * HEIGHT * sizeof(int), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(ConvY, dev_ConvY, WIDTH * HEIGHT * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 1000; i < HEIGHT; i++)
for (int j = 1000; j < WIDTH; j++)
printf("[%d][%d] %d %d\n",i,j,ConvX[i*WIDTH + j],ConvY[i*WIDTH + j]);
/*for (int i = 1; i < HEIGHT - 1; i++) {
for (int j = 1; j < WIDTH - 1; j++) {
Xval = 0;
Yval = 0;
for (mr = 0; mr < MASK_N; mr++) {
for (mc = 0; mc < MASK_N; mc++) {
Xval += (MaskSobelX[mr][mc] * InImg[(i + mr - 1) * WIDTH + (j + mc - 1)]);
Yval += (MaskSobelY[mr][mc] * InImg[(i + mr - 1) * WIDTH + (j + mc - 1)]);
}
}
ConvX[i*WIDTH + j] = Xval;
ConvY[i*WIDTH + j] = Yval;
}
}*/
for (int i = 1; i < HEIGHT - 1; i++) {
for (int j = 1; j < WIDTH - 1; j++) {
temp1 = ConvX[i*WIDTH + j];
temp2 = ConvY[i*WIDTH + j];
if (temp1 < 0)
temp1 = -temp1;
if (temp2 < 0)
temp2 = -temp2;
pImgSobel[i*WIDTH + j] = temp1 + temp2;
}
}
//최대값,최소값 탐색
for (int i = 1; i < HEIGHT - 1; i++) {
for (int j = 1; j < WIDTH - 1; j++) {
if (pImgSobel[i*WIDTH + j]<min)
min = pImgSobel[i*WIDTH + j];
if (pImgSobel[i*WIDTH + j]>max)
max = pImgSobel[i*WIDTH + j];
}
}
//변환시 상수값을 미리 계산
temp1 = (float)(255.0 / (max - min));
temp2 = (float)(-255.0*min / (max - min));
for (int i = 1; i<HEIGHT - 1; i++) {
for (int j = 1; j<WIDTH - 1; j++) {
//[min,max]사이의 값을 [0,255]값으로 변환
newValue = pImgSobel[i*WIDTH + j];
newValue = temp1 * newValue + temp2;
OrgImg[i*WIDTH + j] = (unsigned char)newValue;
}
}
/*HANDLE_ERROR(cudaMalloc((void**)&dev_OrgImg, WIDTH * HEIGHT * sizeof(unsigned char)));
HANDLE_ERROR(cudaMalloc((void**)&dev_ConvX, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_ConvY, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_pImgSobel, WIDTH * HEIGHT * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_min, HEIGHT * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_max, HEIGHT * sizeof(int)));
HANDLE_ERROR(cudaMemset(dev_min,0,sizeof(int)));
HANDLE_ERROR(cudaMemset(dev_max, 0, sizeof(int)));
HANDLE_ERROR(cudaMemcpy(dev_ConvX, ConvX, WIDTH * HEIGHT * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_ConvY, ConvY, WIDTH * HEIGHT * sizeof(int), cudaMemcpyHostToDevice));
Detect_Edge << <HEIGHT, WIDTH>> > (dev_OrgImg, dev_ConvX, dev_ConvY, dev_pImgSobel, WIDTH, HEIGHT,dev_min,dev_max);
HANDLE_ERROR(cudaMemcpy(OrgImg, dev_OrgImg, WIDTH * HEIGHT * sizeof(unsigned char), cudaMemcpyDeviceToHost));*/
PL_end = clock();
FILE *outfile = fopen("CS.raw", "wb");
fwrite(OrgImg, sizeof(unsigned char), HEIGHT * WIDTH, outfile);
fclose(outfile);
printf("Finish\n");
printf("Block per Grid : %d\n",blocksperGrid);
printf("Thread per Block : %d\n",threadsPerBlock);
printf("Average Parallel Runtime : %f\n", (float)(PL_end - PL_start) / CLOCKS_PER_SEC);
cudaFree(dev_ConvX);
cudaFree(dev_ConvY);
//cudaFree(dev_OrgImg);
//cudaFree(dev_pImgSobel);
//cudaFree(dev_min);
//cudaFree(dev_max);
free(InImg);
free(OrgImg);
free(pImgSobel);
free(ConvX);
free(ConvY);
}
|
3c2841bc55a803514df6fbbef518a7994d842319.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <random>
#include "utils.hpp"
template <class T, class ErrorCorrection>
constexpr double error_threshold = 0.0;
template <>
constexpr double error_threshold<half , mtk::wmma::tcec::with_ec > = 1e-5;
template <>
constexpr double error_threshold<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::with_ec > = 1e-5;
template <>
constexpr double error_threshold<half , mtk::wmma::tcec::without_ec> = 1e-2;
template <>
constexpr double error_threshold<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::without_ec> = 1e-2;
template <>
constexpr double error_threshold<float , mtk::wmma::tcec::without_ec> = 1e-6;
template <unsigned N, class T, class Policy>
__global__ void matvec_kernel(float* const y_ptr, const float* const a_ptr, const float* const x_ptr) {
__shared__ float smem[N * N];
mtk::test_utils::fill_zero(smem, N * N);
mtk::wmma::tcec::fragment<nvcuda::wmma::matrix_a , N, N, N, T, nvcuda::wmma::col_major, Policy> frag_a;
mtk::wmma::tcec::fragment<nvcuda::wmma::matrix_b , N, N, N, T, nvcuda::wmma::col_major, Policy> frag_x;
mtk::wmma::tcec::fragment<nvcuda::wmma::accumulator, N, N, N, T, void , Policy> frag_y;
// Load A
mtk::test_utils::copy_matrix(smem, N, a_ptr, N, N, N);
mtk::wmma::tcec::load_matrix_sync(frag_a, smem, N);
// Load X
mtk::test_utils::copy_matrix(smem, N, x_ptr, N, N, 1);
mtk::wmma::tcec::fill_zero(frag_x);
mtk::wmma::tcec::load_vector(frag_x, smem);
// mma
mtk::wmma::tcec::mma_sync(frag_y, frag_a, frag_x);
// Store D
mtk::wmma::tcec::store_vector(smem, frag_y, nvcuda::wmma::mem_col_major);
mtk::test_utils::copy_matrix(y_ptr, N, smem, N, N, 1);
}
template <unsigned N, class T, class Policy>
void test_matvec() {
float *hX, *hY, *hA;
hipHostMalloc(&hX, N * sizeof(float));
hipHostMalloc(&hY, N * sizeof(float));
hipHostMalloc(&hA, N * N * sizeof(float));
std::mt19937 mt(std::random_device{}());
std::uniform_real_distribution<float> dist(-1.0f, 1.0f);
for (unsigned i = 0; i < N * N; i++) {
hA[i] = dist(mt);
}
for (unsigned i = 0; i < N; i++) {
hX[i] = dist(mt);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( matvec_kernel<N, T, Policy>), dim3(1), dim3(mtk::test_utils::warp_size), 0, 0, hY, hA, hX);
hipDeviceSynchronize();
double max_error = 0.;
for (unsigned n = 0; n < N; n++) {
double cor_d = 0.;
for (unsigned k = 0; k < N; k++) {
cor_d += static_cast<double>(hA[k * N + n]) * static_cast<double>(hX[k]);
}
max_error = ::max(max_error, std::abs(cor_d - hY[n]));
}
std::printf(
"[Type:%5s, N:%3u, Policy<%7s,%9s,%2u,%2u,%2u>] max_error: %e (%6s)\n",
mtk::test_utils::to_string<T>().c_str(),
N,
mtk::test_utils::to_string<typename Policy::op>().c_str(),
std::is_same<typename Policy::error_correction, mtk::wmma::tcec::with_ec>::value ? "{w/ ec}" : "{w/o ec}",
Policy::m,
Policy::n,
Policy::k,
max_error,
(max_error < error_threshold<T, typename Policy::error_correction> ? "PASSED" : "FAILED")
);
hipHostFree(hA);
hipHostFree(hX);
hipHostFree(hY);
}
int main() {
// wmma FP16 test
test_matvec<32, half, typename mtk::wmma::tcec::detail::default_policy<half, mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_wmma>::type>();
test_matvec<32, half, typename mtk::wmma::tcec::detail::default_policy<half, mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_wmma>::type>();
#ifdef TEST_SIMT
// simt test
test_matvec<32, float, typename mtk::wmma::tcec::detail::default_policy<float, mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_simt>::type>();
#endif
#ifdef TEST_TF32
// wmma TF32 test
test_matvec<32, nvcuda::wmma::precision::tf32, typename mtk::wmma::tcec::detail::default_policy<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_wmma>::type>();
test_matvec<32, nvcuda::wmma::precision::tf32, typename mtk::wmma::tcec::detail::default_policy<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_wmma>::type>();
#endif
}
|
3c2841bc55a803514df6fbbef518a7994d842319.cu
|
#include <iostream>
#include <random>
#include "utils.hpp"
template <class T, class ErrorCorrection>
constexpr double error_threshold = 0.0;
template <>
constexpr double error_threshold<half , mtk::wmma::tcec::with_ec > = 1e-5;
template <>
constexpr double error_threshold<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::with_ec > = 1e-5;
template <>
constexpr double error_threshold<half , mtk::wmma::tcec::without_ec> = 1e-2;
template <>
constexpr double error_threshold<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::without_ec> = 1e-2;
template <>
constexpr double error_threshold<float , mtk::wmma::tcec::without_ec> = 1e-6;
template <unsigned N, class T, class Policy>
__global__ void matvec_kernel(float* const y_ptr, const float* const a_ptr, const float* const x_ptr) {
__shared__ float smem[N * N];
mtk::test_utils::fill_zero(smem, N * N);
mtk::wmma::tcec::fragment<nvcuda::wmma::matrix_a , N, N, N, T, nvcuda::wmma::col_major, Policy> frag_a;
mtk::wmma::tcec::fragment<nvcuda::wmma::matrix_b , N, N, N, T, nvcuda::wmma::col_major, Policy> frag_x;
mtk::wmma::tcec::fragment<nvcuda::wmma::accumulator, N, N, N, T, void , Policy> frag_y;
// Load A
mtk::test_utils::copy_matrix(smem, N, a_ptr, N, N, N);
mtk::wmma::tcec::load_matrix_sync(frag_a, smem, N);
// Load X
mtk::test_utils::copy_matrix(smem, N, x_ptr, N, N, 1);
mtk::wmma::tcec::fill_zero(frag_x);
mtk::wmma::tcec::load_vector(frag_x, smem);
// mma
mtk::wmma::tcec::mma_sync(frag_y, frag_a, frag_x);
// Store D
mtk::wmma::tcec::store_vector(smem, frag_y, nvcuda::wmma::mem_col_major);
mtk::test_utils::copy_matrix(y_ptr, N, smem, N, N, 1);
}
template <unsigned N, class T, class Policy>
void test_matvec() {
float *hX, *hY, *hA;
cudaMallocHost(&hX, N * sizeof(float));
cudaMallocHost(&hY, N * sizeof(float));
cudaMallocHost(&hA, N * N * sizeof(float));
std::mt19937 mt(std::random_device{}());
std::uniform_real_distribution<float> dist(-1.0f, 1.0f);
for (unsigned i = 0; i < N * N; i++) {
hA[i] = dist(mt);
}
for (unsigned i = 0; i < N; i++) {
hX[i] = dist(mt);
}
cudaDeviceSynchronize();
matvec_kernel<N, T, Policy><<<1, mtk::test_utils::warp_size>>>(hY, hA, hX);
cudaDeviceSynchronize();
double max_error = 0.;
for (unsigned n = 0; n < N; n++) {
double cor_d = 0.;
for (unsigned k = 0; k < N; k++) {
cor_d += static_cast<double>(hA[k * N + n]) * static_cast<double>(hX[k]);
}
max_error = std::max(max_error, std::abs(cor_d - hY[n]));
}
std::printf(
"[Type:%5s, N:%3u, Policy<%7s,%9s,%2u,%2u,%2u>] max_error: %e (%6s)\n",
mtk::test_utils::to_string<T>().c_str(),
N,
mtk::test_utils::to_string<typename Policy::op>().c_str(),
std::is_same<typename Policy::error_correction, mtk::wmma::tcec::with_ec>::value ? "{w/ ec}" : "{w/o ec}",
Policy::m,
Policy::n,
Policy::k,
max_error,
(max_error < error_threshold<T, typename Policy::error_correction> ? "PASSED" : "FAILED")
);
cudaFreeHost(hA);
cudaFreeHost(hX);
cudaFreeHost(hY);
}
int main() {
// wmma FP16 test
test_matvec<32, half, typename mtk::wmma::tcec::detail::default_policy<half, mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_wmma>::type>();
test_matvec<32, half, typename mtk::wmma::tcec::detail::default_policy<half, mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_wmma>::type>();
#ifdef TEST_SIMT
// simt test
test_matvec<32, float, typename mtk::wmma::tcec::detail::default_policy<float, mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_simt>::type>();
#endif
#ifdef TEST_TF32
// wmma TF32 test
test_matvec<32, nvcuda::wmma::precision::tf32, typename mtk::wmma::tcec::detail::default_policy<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_wmma>::type>();
test_matvec<32, nvcuda::wmma::precision::tf32, typename mtk::wmma::tcec::detail::default_policy<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_wmma>::type>();
#endif
}
|
2127daa2ecb0b89b4e71a9672b2520a15329d11d.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* kernel.cu: the implementations for the methods stated in kernel.h. Each operation has one (or more than one) host method(s) to call the kernel and one
* (or more than one) global kernel method(s) to run the parallel algorithm on GPU.
*/
#include <math.h>
#include <iostream>
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "dev_array.h"
#include <stdlib.h>
//reference: https://www.quantstart.com/articles/Matrix-Matrix-Multiplication-on-the-GPU-with-Nvidia-CUDA/
using namespace std;
/**
* Do the atomic multiplication based on the atomicCAS given by CUDA
*
* @param address the address where the target variable is at
* val the multiplier
* @return the old value before being updated
*/
__device__ float atomicMul(float* address, float val)
{
int* address_as_i = (int*)address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = atomicCAS(address_as_i, assumed,
__float_as_int(val *
__int_as_float(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
/**
* matrixAdditionKernel
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void matrixAdditionKernel(float* A, float* B, float* C, int N) {
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
C[ROW * N + COL] = A[ROW * N + COL]+B[ROW * N + COL];
//prfloatf("C[%d]==A[%d]+B[%d],%d",ROW * N + COL,ROW * N + COL,ROW * N + COL,C[ROW * N + COL] );
}
}
/**
* matrixSubtractionKernel
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void matrixSubtractionKernel(float* A, float* B, float* C, int N) {
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
C[ROW * N + COL] = A[ROW * N + COL] - B[ROW * N + COL];
}
}
/**
* scalarMultiplicationKernel
*
* @param A the first Matrix (N*N)
* scalar the multiplier
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void scalarMultiplicationKernel(float* A, float scalar, float* C, int N) {
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
C[ROW * N + COL] = A[ROW * N + COL] * scalar;
}
}
/**
* matrixMultiplicationKernel
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) {
extern __shared__ float local[];
if(threadIdx.x==0 && threadIdx.y==0){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
local[i*N+j]=A[i*N+j];
}
}
for(int i=N;i<N*2;i++){
for(int j=0;j<N;j++){
local[i*N+j]=B[(i-N)*N+j];
}
}
}
__syncthreads();
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
float tmpSum = 0;
//reduce: to be updated (limited by the total number of threads that can run concurrently, we didn't implement reduce method here.)
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++) {
tmpSum += local[ROW * N + i] * local[(i+N) * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
}
/**
* matrixTranspositionKernel
*
* @param A the given Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void matrixTranspositionKernel(float* A, float* C, int N) {
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
C[COL * N + ROW] = A[ROW * N + COL];
}
}
/**
* decompose_multipliers
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_multipliers(float *A, int rows_per_thread, int i, int N) {
extern __shared__ float local[];
if(threadIdx.x==0){
local[0]=A[i * N + i];
}
__syncthreads();
float tid = blockIdx.x * blockDim.x + threadIdx.x;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
for (int j = jstart; j < jend && j < N; j++) {
A[j * N + i] = A[j * N + i] / local[0]; // Computes the multipliers and updates L in A
//printf("new L in A[%d][%d] is %d\n", j, i, A[j*N+i]);
//printf("A[%d][%d] is %d\n",i,i,A[i*N+i]);
}
}
/**
* decompose_elimination
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_elimination(float *A, int rows_per_thread, int i, int N) {
extern __shared__ float local[];
if(threadIdx.x==0){
for(int iteration=0;iteration<N;iteration++){
local[0*N+iteration]=A[i*N+iteration];
}
for(int iteration=0;iteration<N;iteration++){
local[1*N+iteration]=A[iteration*N+i];
}
}
__syncthreads();
float tid = blockIdx.x * blockDim.x + threadIdx.x;
float eid = blockIdx.y * blockDim.y + threadIdx.y;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
int kstart = (i + 1) + eid * rows_per_thread;
int kend = kstart + rows_per_thread;
for (int j = jstart; j < jend && j < N; j++) { // Iterates over the remaining rows
for (int k = kstart; k < kend && k < N; k++) { // iterates over the remaining columns
A[j * N + k] -= local[1 * N + j] * local[0 * N +k ]; // Updates U and L
//printf("new L and U in A[%d][%d] is %d\n", j, i, A[j*N+i]);
}
}
}
/**
* decompose_right_looking
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_right_looking(float *A, int rows_per_thread, int i, int N){
float tid = blockIdx.x * blockDim.x + threadIdx.x;
float eid = blockIdx.y * blockDim.y + threadIdx.y;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
//int k = (i + 1) + eid;
//int kend = kstart + rows_per_thread;
for (int j = jstart; j < jend && j < N; j++) {
//update L
A[j * N + i] = A[j * N + i] / A[i * N + i];
for(int k = i+1; k < N; k++){
// iterates over the remaining columns
A[j * N + k] -= A[j * N + i] * A[i * N + k]; // Updates U and L
}
}
}
/**
* decompose_left_looking
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_left_looking(float *A, int rows_per_thread, int i, int N){
float tid = blockIdx.x * blockDim.x + threadIdx.x;
//int jstart_0 = 0 + tid * rows_per_thread;
int jstart = (i+1) + tid * rows_per_thread;
//int jend_0 = jstart_0 + rows_per_thread;
int jend = jstart + rows_per_thread;
for (int j = 0; j < i; j++) {
//update L
//A[j * N + i] = A[j * N + i] / A[i * N + i];
for (int k = j + 1; k < N; k++) { // iterates over the remaining columns
A[k * N + i] -= A[k * N + j] * A[j * N + i]; // Updates U and L
}
}
//A[i * N + i] = 1/A[i * N + i];
for(int j=jstart; j < jend && j<N; j++){
A[j * N + i] = A[j * N + i] / A[i * N + i];
}
}
/**
* decompose_onepass
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_onepass(float *A, int rows_per_thread, int i, int N){
extern __shared__ float local[];
if(threadIdx.x==0){
for(int iteration=0;iteration<N;iteration++){
local[0*N+iteration]=A[i*N+iteration];
}
for(int iteration=0;iteration<N;iteration++){
local[1*N+iteration]=A[iteration*N+i];
}
}
__syncthreads();
float tid = blockIdx.x * blockDim.x + threadIdx.x;
float eid = blockIdx.y * blockDim.y + threadIdx.y;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
int kstart = i + eid * rows_per_thread;
int kend = kstart + rows_per_thread;
for (int j = jstart; j < jend && j < N; j++) {
for (int k =i;k < N; k++) {// iterates over the remaining columns
if(i == k){//update L
A[j * N + i] = A[j * N + i] / local[0*N+i];
}
else{
A[j * N + k] -= local[1 * N + j] * local[0 * N +k ]/local[0*N+i]; // Updates U and L
}
}
}
}
/**
* getMatrixDeterminantKernel
*
* @param U the upper triangular matrix (N*N)
* determ the determinant to be calculated (initialized as 1)
* N the side of the array
*/
__global__ void getMatrixDeterminantKernel(float*U, float* determ,int N){
int ROW = blockIdx.x * blockDim.x + threadIdx.x;
//printf("cur det is %f\n",*determ);
if(ROW< N){
atomicMul(determ,U[ROW*N+ROW]);
//printf("cur det is %f, times %f\n",*determ,U[ROW*N+ROW]);
}
}
__global__ void pre_crout(float* A, int N) {
int COL = blockIdx.x * blockDim.x + threadIdx.x;
if(COL==0) A[0*N+0] = 1/A[0*N+0];
__syncthreads();
//there may be some redundant threads which won't be assigned any task
if (COL < N && COL > 1) {
// each thread computes one element of the block sub-matrix
A[0*N + COL] = A[0*N + COL] * A[0*N+0];
}
}
/**
* decompose_crout (deprecated)
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_crout(float *A, int rows_per_thread, int i, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
int jstart_0 = 0 + tid * rows_per_thread;
int jend_0 = jstart_0 + rows_per_thread;
for (int j = jstart_0; j < jend_0 && j < i; j++) {
for (int k = i; k < N; k++) {// iterates over the remaining columns
A[k * N + i] -= A[k * N + j] * A[j * N + i];
}
}
for(int k = 0; k < i; k++){
for(int j = jstart;j < jend && j < N; j++){
A[i * N + j] -= A[i * N + k] * A[k * N + j];
}
}
for(int k = 0;k < i; k++){
A[i * N + k] /= A[i * N + i];
}
}
/**
* upperTriangleInversionKernel
*
* @param U the upper triangular matrix (N*N)
* prev_U the original version of matrix U (N*N)
* N the side of the array
*/
__global__ void upperTriangleInversionKernel (float* U, float* prev_U,int N){
extern __shared__ float local[];
if(threadIdx.x==0 && threadIdx.y==0){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
local[i*N+j]=U[i*N+j];
}
}
for(int i=N;i<N*2;i++){
for(int j=0;j<N;j++){
local[i*N+j]=prev_U[(i-N)*N+j];
}
}
}
__syncthreads();
for(int dert=0;dert<N;dert++){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = row+dert;
if(dert==0){
U[row*N+col]=1/local[(row+N)*N+col];
local[row*N+col]=U[row*N+col];
}
else{
if(row+dert<N){
float sum=0;
for(int k=row+1;k<=col;k++){
sum+=local[k*N+col]*local[(row+N)*N+k];
}
float update_val;
update_val=-sum/local[(row+N)*N+row];
U[row*N+col]=update_val;
local[row*N+col]=update_val;
}
}
__syncthreads();
}
}
/**
* solveUpperTriangleEquationsKernel
*
* @param U the upper triangular matrix (N*N
* x the solution vector for "Ux=y"
* y the right part of the equation
* N the side of the array
*/
__global__ void solveUpperTriangleEquationsKernel(float* U,float* x,float *y,int N){
extern __shared__ float local_x[];
local_x[threadIdx.x]=x[threadIdx.x];
__syncthreads();
for(int row=N-1;row>=0;row--){
if(threadIdx.x>row){
atomicAdd(&y[row],-local_x[threadIdx.x]*U[row*N+threadIdx.x]);
//printf("current_x is %f\n",y[row]);
}
__syncthreads();
if(threadIdx.x==N-1){
float update_val=y[row]/U[row*N+row];
x[row]=update_val;
local_x[row]=update_val;
//printf("x[%d]is %f\n",row,x[row]);
}
__syncthreads();
}
}
/**
* solveLowerTriangleEquationsKernel
*
* @param L the lower triangular matrix (N*N
* y the solution vector for "Ly=b"
* b the right part of the equation
* N the side of the array
*/
__global__ void solveLowerTriangleEquationsKernel(float* L,float* y,float *b,int N){
extern __shared__ float local_y[];
local_y[threadIdx.x]=y[threadIdx.x];
__syncthreads();
for(int row=0;row<N;row++){
if(threadIdx.x<row){
atomicAdd(&b[row],-local_y[threadIdx.x]*L[row*N+threadIdx.x]);
//printf("current_y is %f\n",b[row]);
}
__syncthreads();
if(threadIdx.x==0){
float update_val=b[row]/L[row*N+row];
y[row]=update_val;
local_y[row]=update_val;
//printf("y[%d]is %f\n",row,y[row]);
}
__syncthreads();
}
}
/**
* matrixAddition
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
void matrixAddition(float* A, float* B, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32 * 32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
matrixAdditionKernel << <blocksPerGrid, threadsPerBlock >> > (A, B, C, N);
}
/**
* matrixSubtraction
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
void matrixSubtraction(float* A, float* B, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32 * 32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
matrixSubtractionKernel << <blocksPerGrid, threadsPerBlock >> > (A, B, C, N);
}
/**
* scalarMultiplication
*
* @param A the first Matrix (N*N)
* scalar the multiplier
* C the result Matrix (N*N)
* N the side of the array
*/
void scalarMultiplication(float* A, float scalar, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32 * 32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
scalarMultiplicationKernel << <blocksPerGrid, threadsPerBlock >> > (A, scalar, C, N);
}
/**
* matrixMultiplication
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
void matrixMultiplication(float* A, float* B, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32*32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
matrixMultiplicationKernel << <blocksPerGrid, threadsPerBlock,N*N*2*sizeof(float)>> > (A, B, C, N);
}
/**
* matrixTransposition
*
* @param A the given Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
void matrixTransposition(float* A, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
// use 1 to 512 threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32 * 32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
matrixTranspositionKernel << <blocksPerGrid, threadsPerBlock >> > (A, C, N);
}
/**
* LU_base
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_base(float* A, int N) {
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
dim3 threadsPerBlockU(N,N);
dim3 blocksPerGridU(1,1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
if (N * N > 32 * 32) {
threadsPerBlockU.x = 32;
threadsPerBlockU.y = 32;
blocksPerGridU.x = ceil(double(N) / double(threadsPerBlockU.x));
blocksPerGridU.y = ceil(double(N) / double(threadsPerBlockU.y));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_multipliers << <blocksPerGrid, threadsPerBlock,sizeof(float)>> > (A, ops_per_thread, i, N);
decompose_elimination << <blocksPerGridU, threadsPerBlockU,2*N*sizeof(float)>> > (A, ops_per_thread, i, N);
}
}
/**
* LU_right_looking
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_right_looking(float*A, int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_right_looking << <blocksPerGrid, threadsPerBlock >> > (A, ops_per_thread, i, N);
}
}
/**
* LU_left_looking
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_left_looking(float*A, int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_left_looking << <blocksPerGrid, threadsPerBlock >> > (A, ops_per_thread, i, N);
}
}
/**
* LU_onepass
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_onepass(float*A, int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_onepass << <blocksPerGrid, threadsPerBlock,2*N*sizeof(float)>> > (A, ops_per_thread, i, N);
}
}
/**
* LU_crout (deprecated)
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_crout(float*A, int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
//pre_crout <<<blocksPerGrid, threadsPerBlock >> > (A, N);
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_crout << <blocksPerGrid, threadsPerBlock >> > (A, ops_per_thread, i, N);
}
}
/**
* getMatrixDeterminant
*
* @param U the upper triangular matrix (N*N)
* N the side of the array
* @return the determinant value
*/
float getMatrixDeterminant(float* U,int N) {
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
float* ans=(float*)malloc(sizeof(float));
*ans=1;
float* d_ans;
hipMalloc((void**)&d_ans, sizeof(float));
hipMemcpy(d_ans, ans, sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( getMatrixDeterminantKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, U,d_ans,N);
hipDeviceSynchronize();
hipMemcpy(ans, d_ans, sizeof(float), hipMemcpyDeviceToHost);
return *ans;
}
/**
* upperTriangleInversion
*
* @param U the upper triangular matrix (N*N)
* N the side of the array
*/
void upperTriangleInversion(float *U,int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
dev_array<float> prev_U(N*N);
hipMemcpy(prev_U.getData(), U, N*N*sizeof(float), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( upperTriangleInversionKernel), dim3(blocksPerGrid), dim3(threadsPerBlock),2*N*N*sizeof(float), 0, U,prev_U.getData(),N);
}
/**
* matrixInversion
*
* @param C the result matrix (N*N)
* L the lower triangular matrix (N*N)
* U the upper triangular matrix (N*N)
* N the side of the array
*/
void matrixInversion(float* C,float* L,float *U,int N){
dev_array<float> d_trans_L(N*N);
upperTriangleInversion(U,N);
matrixTransposition(L,d_trans_L.getData(),N);
upperTriangleInversion(d_trans_L.getData(),N);
matrixTransposition(d_trans_L.getData(),L,N);
matrixMultiplication(U,L,C,N);
}
/**
* solveUpperTriangleEquations
*
* @param U the upper triangular matrix (N*N)
* x the solution vector for "Ux=y"
* y the right part of the equation
* N the side of the array
*/
void solveUpperTriangleEquations(float* U,float* x,float *y,int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
hipLaunchKernelGGL(( solveUpperTriangleEquationsKernel), dim3(blocksPerGrid), dim3(threadsPerBlock),N*sizeof(float), 0, U,x,y,N);
}
/**
* solveLowerTriangleEquations
*
* @param L the lower triangular matrix (N*N
* y the solution vector for "Ly=b"
* b the right part of the equation
* N the side of the array
*/
void solveLowerTriangleEquations(float* L,float* y,float *b,int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
hipLaunchKernelGGL(( solveLowerTriangleEquationsKernel), dim3(blocksPerGrid), dim3(threadsPerBlock),N*sizeof(float), 0, L,y,b,N);
}
/**
* solveEquations
*
* @param L the lower triangular matrix (N*N
* U the upper triangular matrix (N*N)
* x the solution vector for "Ax=b"
* b the right part of the equation
* N the side of the array
*/
void solveEquations(float* L,float* U,float* x,float *b,int N){
dev_array<float> d_y(N);
solveLowerTriangleEquations(L,d_y.getData(),b,N);
solveUpperTriangleEquations(U,x,d_y.getData(),N);
}
|
2127daa2ecb0b89b4e71a9672b2520a15329d11d.cu
|
/**
* kernel.cu: the implementations for the methods stated in kernel.h. Each operation has one (or more than one) host method(s) to call the kernel and one
* (or more than one) global kernel method(s) to run the parallel algorithm on GPU.
*/
#include <math.h>
#include <iostream>
#include "cuda_runtime.h"
#include "kernel.h"
#include "dev_array.h"
#include <stdlib.h>
//reference: https://www.quantstart.com/articles/Matrix-Matrix-Multiplication-on-the-GPU-with-Nvidia-CUDA/
using namespace std;
/**
* Do the atomic multiplication based on the atomicCAS given by CUDA
*
* @param address the address where the target variable is at
* val the multiplier
* @return the old value before being updated
*/
__device__ float atomicMul(float* address, float val)
{
int* address_as_i = (int*)address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = atomicCAS(address_as_i, assumed,
__float_as_int(val *
__int_as_float(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
/**
* matrixAdditionKernel
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void matrixAdditionKernel(float* A, float* B, float* C, int N) {
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
C[ROW * N + COL] = A[ROW * N + COL]+B[ROW * N + COL];
//prfloatf("C[%d]==A[%d]+B[%d],%d",ROW * N + COL,ROW * N + COL,ROW * N + COL,C[ROW * N + COL] );
}
}
/**
* matrixSubtractionKernel
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void matrixSubtractionKernel(float* A, float* B, float* C, int N) {
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
C[ROW * N + COL] = A[ROW * N + COL] - B[ROW * N + COL];
}
}
/**
* scalarMultiplicationKernel
*
* @param A the first Matrix (N*N)
* scalar the multiplier
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void scalarMultiplicationKernel(float* A, float scalar, float* C, int N) {
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
C[ROW * N + COL] = A[ROW * N + COL] * scalar;
}
}
/**
* matrixMultiplicationKernel
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) {
extern __shared__ float local[];
if(threadIdx.x==0 && threadIdx.y==0){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
local[i*N+j]=A[i*N+j];
}
}
for(int i=N;i<N*2;i++){
for(int j=0;j<N;j++){
local[i*N+j]=B[(i-N)*N+j];
}
}
}
__syncthreads();
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
float tmpSum = 0;
//reduce: to be updated (limited by the total number of threads that can run concurrently, we didn't implement reduce method here.)
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++) {
tmpSum += local[ROW * N + i] * local[(i+N) * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
}
/**
* matrixTranspositionKernel
*
* @param A the given Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
__global__ void matrixTranspositionKernel(float* A, float* C, int N) {
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
//there may be some redundant threads which won't be assigned any task
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
C[COL * N + ROW] = A[ROW * N + COL];
}
}
/**
* decompose_multipliers
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_multipliers(float *A, int rows_per_thread, int i, int N) {
extern __shared__ float local[];
if(threadIdx.x==0){
local[0]=A[i * N + i];
}
__syncthreads();
float tid = blockIdx.x * blockDim.x + threadIdx.x;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
for (int j = jstart; j < jend && j < N; j++) {
A[j * N + i] = A[j * N + i] / local[0]; // Computes the multipliers and updates L in A
//printf("new L in A[%d][%d] is %d\n", j, i, A[j*N+i]);
//printf("A[%d][%d] is %d\n",i,i,A[i*N+i]);
}
}
/**
* decompose_elimination
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_elimination(float *A, int rows_per_thread, int i, int N) {
extern __shared__ float local[];
if(threadIdx.x==0){
for(int iteration=0;iteration<N;iteration++){
local[0*N+iteration]=A[i*N+iteration];
}
for(int iteration=0;iteration<N;iteration++){
local[1*N+iteration]=A[iteration*N+i];
}
}
__syncthreads();
float tid = blockIdx.x * blockDim.x + threadIdx.x;
float eid = blockIdx.y * blockDim.y + threadIdx.y;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
int kstart = (i + 1) + eid * rows_per_thread;
int kend = kstart + rows_per_thread;
for (int j = jstart; j < jend && j < N; j++) { // Iterates over the remaining rows
for (int k = kstart; k < kend && k < N; k++) { // iterates over the remaining columns
A[j * N + k] -= local[1 * N + j] * local[0 * N +k ]; // Updates U and L
//printf("new L and U in A[%d][%d] is %d\n", j, i, A[j*N+i]);
}
}
}
/**
* decompose_right_looking
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_right_looking(float *A, int rows_per_thread, int i, int N){
float tid = blockIdx.x * blockDim.x + threadIdx.x;
float eid = blockIdx.y * blockDim.y + threadIdx.y;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
//int k = (i + 1) + eid;
//int kend = kstart + rows_per_thread;
for (int j = jstart; j < jend && j < N; j++) {
//update L
A[j * N + i] = A[j * N + i] / A[i * N + i];
for(int k = i+1; k < N; k++){
// iterates over the remaining columns
A[j * N + k] -= A[j * N + i] * A[i * N + k]; // Updates U and L
}
}
}
/**
* decompose_left_looking
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_left_looking(float *A, int rows_per_thread, int i, int N){
float tid = blockIdx.x * blockDim.x + threadIdx.x;
//int jstart_0 = 0 + tid * rows_per_thread;
int jstart = (i+1) + tid * rows_per_thread;
//int jend_0 = jstart_0 + rows_per_thread;
int jend = jstart + rows_per_thread;
for (int j = 0; j < i; j++) {
//update L
//A[j * N + i] = A[j * N + i] / A[i * N + i];
for (int k = j + 1; k < N; k++) { // iterates over the remaining columns
A[k * N + i] -= A[k * N + j] * A[j * N + i]; // Updates U and L
}
}
//A[i * N + i] = 1/A[i * N + i];
for(int j=jstart; j < jend && j<N; j++){
A[j * N + i] = A[j * N + i] / A[i * N + i];
}
}
/**
* decompose_onepass
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_onepass(float *A, int rows_per_thread, int i, int N){
extern __shared__ float local[];
if(threadIdx.x==0){
for(int iteration=0;iteration<N;iteration++){
local[0*N+iteration]=A[i*N+iteration];
}
for(int iteration=0;iteration<N;iteration++){
local[1*N+iteration]=A[iteration*N+i];
}
}
__syncthreads();
float tid = blockIdx.x * blockDim.x + threadIdx.x;
float eid = blockIdx.y * blockDim.y + threadIdx.y;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
int kstart = i + eid * rows_per_thread;
int kend = kstart + rows_per_thread;
for (int j = jstart; j < jend && j < N; j++) {
for (int k =i;k < N; k++) {// iterates over the remaining columns
if(i == k){//update L
A[j * N + i] = A[j * N + i] / local[0*N+i];
}
else{
A[j * N + k] -= local[1 * N + j] * local[0 * N +k ]/local[0*N+i]; // Updates U and L
}
}
}
}
/**
* getMatrixDeterminantKernel
*
* @param U the upper triangular matrix (N*N)
* determ the determinant to be calculated (initialized as 1)
* N the side of the array
*/
__global__ void getMatrixDeterminantKernel(float*U, float* determ,int N){
int ROW = blockIdx.x * blockDim.x + threadIdx.x;
//printf("cur det is %f\n",*determ);
if(ROW< N){
atomicMul(determ,U[ROW*N+ROW]);
//printf("cur det is %f, times %f\n",*determ,U[ROW*N+ROW]);
}
}
__global__ void pre_crout(float* A, int N) {
int COL = blockIdx.x * blockDim.x + threadIdx.x;
if(COL==0) A[0*N+0] = 1/A[0*N+0];
__syncthreads();
//there may be some redundant threads which won't be assigned any task
if (COL < N && COL > 1) {
// each thread computes one element of the block sub-matrix
A[0*N + COL] = A[0*N + COL] * A[0*N+0];
}
}
/**
* decompose_crout (deprecated)
*
* @param A the given Matrix (N*N)
* rows_per_thread the number of threads each thread takes care of
* i the iterator for the outer loop (in the caller for this method)
* N the side of the array
*/
__global__ void decompose_crout(float *A, int rows_per_thread, int i, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int jstart = (i + 1) + tid * rows_per_thread;
int jend = jstart + rows_per_thread;
int jstart_0 = 0 + tid * rows_per_thread;
int jend_0 = jstart_0 + rows_per_thread;
for (int j = jstart_0; j < jend_0 && j < i; j++) {
for (int k = i; k < N; k++) {// iterates over the remaining columns
A[k * N + i] -= A[k * N + j] * A[j * N + i];
}
}
for(int k = 0; k < i; k++){
for(int j = jstart;j < jend && j < N; j++){
A[i * N + j] -= A[i * N + k] * A[k * N + j];
}
}
for(int k = 0;k < i; k++){
A[i * N + k] /= A[i * N + i];
}
}
/**
* upperTriangleInversionKernel
*
* @param U the upper triangular matrix (N*N)
* prev_U the original version of matrix U (N*N)
* N the side of the array
*/
__global__ void upperTriangleInversionKernel (float* U, float* prev_U,int N){
extern __shared__ float local[];
if(threadIdx.x==0 && threadIdx.y==0){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
local[i*N+j]=U[i*N+j];
}
}
for(int i=N;i<N*2;i++){
for(int j=0;j<N;j++){
local[i*N+j]=prev_U[(i-N)*N+j];
}
}
}
__syncthreads();
for(int dert=0;dert<N;dert++){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = row+dert;
if(dert==0){
U[row*N+col]=1/local[(row+N)*N+col];
local[row*N+col]=U[row*N+col];
}
else{
if(row+dert<N){
float sum=0;
for(int k=row+1;k<=col;k++){
sum+=local[k*N+col]*local[(row+N)*N+k];
}
float update_val;
update_val=-sum/local[(row+N)*N+row];
U[row*N+col]=update_val;
local[row*N+col]=update_val;
}
}
__syncthreads();
}
}
/**
* solveUpperTriangleEquationsKernel
*
* @param U the upper triangular matrix (N*N
* x the solution vector for "Ux=y"
* y the right part of the equation
* N the side of the array
*/
__global__ void solveUpperTriangleEquationsKernel(float* U,float* x,float *y,int N){
extern __shared__ float local_x[];
local_x[threadIdx.x]=x[threadIdx.x];
__syncthreads();
for(int row=N-1;row>=0;row--){
if(threadIdx.x>row){
atomicAdd(&y[row],-local_x[threadIdx.x]*U[row*N+threadIdx.x]);
//printf("current_x is %f\n",y[row]);
}
__syncthreads();
if(threadIdx.x==N-1){
float update_val=y[row]/U[row*N+row];
x[row]=update_val;
local_x[row]=update_val;
//printf("x[%d]is %f\n",row,x[row]);
}
__syncthreads();
}
}
/**
* solveLowerTriangleEquationsKernel
*
* @param L the lower triangular matrix (N*N
* y the solution vector for "Ly=b"
* b the right part of the equation
* N the side of the array
*/
__global__ void solveLowerTriangleEquationsKernel(float* L,float* y,float *b,int N){
extern __shared__ float local_y[];
local_y[threadIdx.x]=y[threadIdx.x];
__syncthreads();
for(int row=0;row<N;row++){
if(threadIdx.x<row){
atomicAdd(&b[row],-local_y[threadIdx.x]*L[row*N+threadIdx.x]);
//printf("current_y is %f\n",b[row]);
}
__syncthreads();
if(threadIdx.x==0){
float update_val=b[row]/L[row*N+row];
y[row]=update_val;
local_y[row]=update_val;
//printf("y[%d]is %f\n",row,y[row]);
}
__syncthreads();
}
}
/**
* matrixAddition
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
void matrixAddition(float* A, float* B, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32 * 32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
matrixAdditionKernel << <blocksPerGrid, threadsPerBlock >> > (A, B, C, N);
}
/**
* matrixSubtraction
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
void matrixSubtraction(float* A, float* B, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32 * 32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
matrixSubtractionKernel << <blocksPerGrid, threadsPerBlock >> > (A, B, C, N);
}
/**
* scalarMultiplication
*
* @param A the first Matrix (N*N)
* scalar the multiplier
* C the result Matrix (N*N)
* N the side of the array
*/
void scalarMultiplication(float* A, float scalar, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32 * 32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
scalarMultiplicationKernel << <blocksPerGrid, threadsPerBlock >> > (A, scalar, C, N);
}
/**
* matrixMultiplication
*
* @param A the first Matrix (N*N)
* B the second Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
void matrixMultiplication(float* A, float* B, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32*32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
matrixMultiplicationKernel << <blocksPerGrid, threadsPerBlock,N*N*2*sizeof(float)>> > (A, B, C, N);
}
/**
* matrixTransposition
*
* @param A the given Matrix (N*N)
* C the result Matrix (N*N)
* N the side of the array
*/
void matrixTransposition(float* A, float* C, int N) {
// declare the number of blocks per grid and the number of threads per block
// use 1 to 512 threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N * N > 32 * 32) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N) / double(threadsPerBlock.y));
}
matrixTranspositionKernel << <blocksPerGrid, threadsPerBlock >> > (A, C, N);
}
/**
* LU_base
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_base(float* A, int N) {
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
dim3 threadsPerBlockU(N,N);
dim3 blocksPerGridU(1,1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
if (N * N > 32 * 32) {
threadsPerBlockU.x = 32;
threadsPerBlockU.y = 32;
blocksPerGridU.x = ceil(double(N) / double(threadsPerBlockU.x));
blocksPerGridU.y = ceil(double(N) / double(threadsPerBlockU.y));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_multipliers << <blocksPerGrid, threadsPerBlock,sizeof(float)>> > (A, ops_per_thread, i, N);
decompose_elimination << <blocksPerGridU, threadsPerBlockU,2*N*sizeof(float)>> > (A, ops_per_thread, i, N);
}
}
/**
* LU_right_looking
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_right_looking(float*A, int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_right_looking << <blocksPerGrid, threadsPerBlock >> > (A, ops_per_thread, i, N);
}
}
/**
* LU_left_looking
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_left_looking(float*A, int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_left_looking << <blocksPerGrid, threadsPerBlock >> > (A, ops_per_thread, i, N);
}
}
/**
* LU_onepass
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_onepass(float*A, int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_onepass << <blocksPerGrid, threadsPerBlock,2*N*sizeof(float)>> > (A, ops_per_thread, i, N);
}
}
/**
* LU_crout (deprecated)
*
* @param A the given Matrix (N*N)
* N the side of the array
*/
void LU_crout(float*A, int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 32 * 32){
threadsPerBlock.x=32*32;
blocksPerGrid.x=ceil(double(N) / double(threadsPerBlock.x));
}
float ops_per_thread = ceil(double(N) / (double)(threadsPerBlock.x*blocksPerGrid.x));
//pre_crout <<<blocksPerGrid, threadsPerBlock >> > (A, N);
for (int i = 0; i < N; i++) { // Iterates over the columns to remove
decompose_crout << <blocksPerGrid, threadsPerBlock >> > (A, ops_per_thread, i, N);
}
}
/**
* getMatrixDeterminant
*
* @param U the upper triangular matrix (N*N)
* N the side of the array
* @return the determinant value
*/
float getMatrixDeterminant(float* U,int N) {
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
float* ans=(float*)malloc(sizeof(float));
*ans=1;
float* d_ans;
cudaMalloc((void**)&d_ans, sizeof(float));
cudaMemcpy(d_ans, ans, sizeof(float), cudaMemcpyHostToDevice);
getMatrixDeterminantKernel<<<blocksPerGrid, threadsPerBlock>>>(U,d_ans,N);
cudaDeviceSynchronize();
cudaMemcpy(ans, d_ans, sizeof(float), cudaMemcpyDeviceToHost);
return *ans;
}
/**
* upperTriangleInversion
*
* @param U the upper triangular matrix (N*N)
* N the side of the array
*/
void upperTriangleInversion(float *U,int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
dev_array<float> prev_U(N*N);
cudaMemcpy(prev_U.getData(), U, N*N*sizeof(float), cudaMemcpyDeviceToDevice);
upperTriangleInversionKernel<<<blocksPerGrid, threadsPerBlock,2*N*N*sizeof(float)>>>(U,prev_U.getData(),N);
}
/**
* matrixInversion
*
* @param C the result matrix (N*N)
* L the lower triangular matrix (N*N)
* U the upper triangular matrix (N*N)
* N the side of the array
*/
void matrixInversion(float* C,float* L,float *U,int N){
dev_array<float> d_trans_L(N*N);
upperTriangleInversion(U,N);
matrixTransposition(L,d_trans_L.getData(),N);
upperTriangleInversion(d_trans_L.getData(),N);
matrixTransposition(d_trans_L.getData(),L,N);
matrixMultiplication(U,L,C,N);
}
/**
* solveUpperTriangleEquations
*
* @param U the upper triangular matrix (N*N)
* x the solution vector for "Ux=y"
* y the right part of the equation
* N the side of the array
*/
void solveUpperTriangleEquations(float* U,float* x,float *y,int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
solveUpperTriangleEquationsKernel<<<blocksPerGrid, threadsPerBlock,N*sizeof(float)>>>(U,x,y,N);
}
/**
* solveLowerTriangleEquations
*
* @param L the lower triangular matrix (N*N
* y the solution vector for "Ly=b"
* b the right part of the equation
* N the side of the array
*/
void solveLowerTriangleEquations(float* L,float* y,float *b,int N){
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
solveLowerTriangleEquationsKernel<<<blocksPerGrid, threadsPerBlock,N*sizeof(float)>>>(L,y,b,N);
}
/**
* solveEquations
*
* @param L the lower triangular matrix (N*N
* U the upper triangular matrix (N*N)
* x the solution vector for "Ax=b"
* b the right part of the equation
* N the side of the array
*/
void solveEquations(float* L,float* U,float* x,float *b,int N){
dev_array<float> d_y(N);
solveLowerTriangleEquations(L,d_y.getData(),b,N);
solveUpperTriangleEquations(U,x,d_y.getData(),N);
}
|
642f03f2afef814ba04e4cbd619b88a5f882d31f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "spoc_max.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
double *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
const int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
spoc_max), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
spoc_max), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
spoc_max), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
642f03f2afef814ba04e4cbd619b88a5f882d31f.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "spoc_max.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
double *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
const int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
spoc_max<<<gridBlock,threadBlock>>>(input,output,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
spoc_max<<<gridBlock,threadBlock>>>(input,output,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
spoc_max<<<gridBlock,threadBlock>>>(input,output,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4e874a536ac2bf7faf1376dc6bdd7321b3c31985.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// System includes
#include "cudaRidge.h"
#include "mctable.h"
#include "kernels.hip"
//#define CUDA_OUTPUT_STEPS
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
extern "C" bool cudaGradient(const double* d, uint dim[3], double** gradients) {
// for (uint i=0; i<dim[0]*dim[1]*dim[2]; ++i)
// cout << d[i] << endl;
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Starting cuda method..." << std::endl;
#endif
if (hipSetDevice(0) != hipSuccess) {
std::cout << " CUDA: Could not set cuda device." << std::endl;
}
else {
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Cuda device set." << std::endl;
#endif
}
//timespec start, end;
uint numPoints=dim[0]*dim[1]*dim[2];
uint sg = 3*numPoints*sizeof(double);
uint sd = numPoints*sizeof(double);
uint sdim = 3*sizeof(uint);
uint *dim_d;
double *g_h = new double[3*numPoints];
double *g_d, *d_d;
//clock_gettime(CLOCK_MONOTONIC, &start);
gpuErrchk(hipMalloc((void**)&d_d, sd));
gpuErrchk(hipMalloc((void**)&g_d, sg));
gpuErrchk(hipMalloc((void**)&dim_d, sdim));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Memory allocated." << std::endl;
#endif
gpuErrchk(hipMemcpy(d_d, d, sd, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dim_d, dim, sdim, hipMemcpyHostToDevice));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Memory copied." << std::endl;
#endif
uint blockSize = 128;
int nBlocks = numPoints/blockSize + (numPoints%blockSize == 0?0:1);
hipLaunchKernelGGL(( processPoints) , dim3(nBlocks), dim3(blockSize) , 0, 0, numPoints, d_d, g_d, dim_d);
gpuErrchk(hipMemcpy(g_h, g_d, sg, hipMemcpyDeviceToHost));
//clock_gettime(CLOCK_MONOTONIC, &end);
gpuErrchk(hipFree(dim_d));
gpuErrchk(hipFree(d_d));
gpuErrchk(hipFree(g_d));
*gradients = g_h;
//cout << "Time used for GPU calculation in milliseconds:" << double(end.tv_nsec-start.tv_nsec)/1000000.0 << endl;
return true;
}
extern "C" bool cudaIsosurface(const double c, const double* in_data, double* in_grads, int in_size[3], uint* out_numVerts, double** out_verts, double** out_grads)
{
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Starting cuda method..." << std::endl;
#endif
if (hipSetDevice(0) != hipSuccess) {
std::cout << " CUDA: Could not set cuda device." << std::endl;
}
else {
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Cuda device set." << std::endl;
#endif
}
//---Init---------------------------------------------------------------------
//timespec start, end;
uint sdataB, sgradsB, sdimB, sbitmasksB, eTB, tTB, nVTB;
double *data, *g;
int *dim;
uint *bitmasks, bitmaskCnt, *eT, *tT, *nVT;
bitmaskCnt = in_size[0]*in_size[1]*in_size[2];
uint *bm = new uint[2*bitmaskCnt];
sdataB = in_size[0]*in_size[1]*in_size[2]*sizeof(double);
sgradsB = 3*in_size[0]*in_size[1]*in_size[2]*sizeof(double);
sdimB = 3*sizeof(int);
sbitmasksB = 2*bitmaskCnt*sizeof(uint);
eTB = 256*sizeof(uint);
tTB = 256*16*sizeof(uint);
nVTB = 256*sizeof(uint);
gpuErrchk(hipMalloc( (void**)&data, sdataB));
gpuErrchk(hipMalloc( (void**)&dim, sdimB));
gpuErrchk(hipMalloc( (void**)&bitmasks, sbitmasksB));
gpuErrchk(hipMalloc( (void**)&eT, eTB));
gpuErrchk(hipMalloc( (void**)&tT, tTB));
gpuErrchk(hipMalloc( (void**)&nVT, nVTB));
gpuErrchk(hipMalloc( (void**)&g, sgradsB));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Memory allocated." << std::endl;
#endif
gpuErrchk(hipMemcpy( data, in_data, sdataB, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( dim, in_size, sdimB, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( bitmasks, bm, sbitmasksB, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( eT, edgeTable, eTB, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( tT, triTable, tTB, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( nVT, numVertsTable, nVTB, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy( g, in_grads, sgradsB, hipMemcpyHostToDevice));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Memory copied." << std::endl;
#endif
//---Calculation--------------------------------------------------------------
//clock_gettime(CLOCK_MONOTONIC, &start);
int blockSize = 128;
int nBlocks = bitmaskCnt/blockSize + (bitmaskCnt%blockSize == 0?0:1);
hipLaunchKernelGGL(( processDataArray) , dim3(nBlocks), dim3(blockSize) , 0, 0, bitmaskCnt, c, dim, data, bitmasks, nVT);
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Calculation done." << std::endl;
#endif
gpuErrchk(hipMemcpy(bm, bitmasks, sbitmasksB, hipMemcpyDeviceToHost));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Result retrieved." << std::endl;
#endif
int addOne = 0;
int addTris = 0;
if (bm[2*bitmaskCnt-1]) {
addOne = 1; //doing exclusive scan, if last element is set, we need one more space
addTris = bm[2*bitmaskCnt-1]; //and the tris of course
}
uint *pfArrays = new uint[2*bitmaskCnt];
thrust::exclusive_scan(&bm[bitmaskCnt], bm+2*bitmaskCnt, &pfArrays[bitmaskCnt]); //index of next cubes
thrust::exclusive_scan(&bm[0], bm+bitmaskCnt, &pfArrays[0]);
uint numIsoSurfaceCubes=pfArrays[2*bitmaskCnt-1]+addOne;
uint numVertices=pfArrays[bitmaskCnt-1]+addTris;
#ifdef CUDA_OUTPUT_STEPS
cout << " CUDA: numIsoSurfaceCubes: " << numIsoSurfaceCubes << endl;
cout << " CUDA: numVertices: " << numVertices << endl;
#endif
size_t iAB, vAB, gAB, pfAB;
uint *pfA, *iA;
double *vA, *vertexArray = new double[numVertices*3];
double *gA, *gradientArray = new double[numVertices*3];
iAB = numIsoSurfaceCubes*sizeof(uint);
vAB = numVertices*3*sizeof(double);
gAB = numVertices*3*sizeof(double);
pfAB = sbitmasksB;
gpuErrchk(hipMalloc( (void**)&iA, iAB));
gpuErrchk(hipMalloc( (void**)&vA, vAB));
gpuErrchk(hipMalloc( (void**)&gA, gAB));
gpuErrchk(hipMalloc((void**)&pfA, pfAB));
gpuErrchk(hipMemcpy( pfA, pfArrays, pfAB, hipMemcpyHostToDevice)); //copy prefix array for second pass
hipLaunchKernelGGL(( getIsoIndices) , dim3(nBlocks), dim3(blockSize) , 0, 0, bitmaskCnt, iA, bitmasks, pfA);
nBlocks = numIsoSurfaceCubes/blockSize + (numIsoSurfaceCubes%blockSize == 0?0:1);
hipLaunchKernelGGL(( processIsoCubes) , dim3(nBlocks), dim3(blockSize) , 0, 0, numIsoSurfaceCubes, c, iA, pfA, data, g, vA, gA, nVT, tT, bitmasks, dim);
gpuErrchk(hipMemcpy(vertexArray, vA, vAB, hipMemcpyDeviceToHost ));
gpuErrchk(hipMemcpy(gradientArray, gA, gAB, hipMemcpyDeviceToHost ));
//clock_gettime(CLOCK_MONOTONIC, &end);
//---Cleanup------------------------------------------------------------------
hipFree(data);
hipFree(g);
hipFree(dim);
hipFree(bitmasks);
hipFree(eT);
hipFree(tT);
hipFree(nVT);
hipFree(pfA);
hipFree(iA);
hipFree(vA);
hipFree(gA);
delete [] pfArrays;
delete [] bm;
*out_verts = vertexArray;
*out_numVerts = numVertices;
#ifdef CUDA_OUTPUT_STEPS
cout << " CUDA: Cuda calculation done." << endl;
#endif
//cout << "Time used for GPU calculation in milliseconds:" << double(end.tv_nsec-start.tv_nsec)/1000000.0 << endl;
return true;
}
|
4e874a536ac2bf7faf1376dc6bdd7321b3c31985.cu
|
// System includes
#include "cudaRidge.h"
#include "mctable.h"
#include "kernels.cu"
//#define CUDA_OUTPUT_STEPS
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
extern "C" bool cudaGradient(const double* d, uint dim[3], double** gradients) {
// for (uint i=0; i<dim[0]*dim[1]*dim[2]; ++i)
// cout << d[i] << endl;
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Starting cuda method..." << std::endl;
#endif
if (cudaSetDevice(0) != cudaSuccess) {
std::cout << " CUDA: Could not set cuda device." << std::endl;
}
else {
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Cuda device set." << std::endl;
#endif
}
//timespec start, end;
uint numPoints=dim[0]*dim[1]*dim[2];
uint sg = 3*numPoints*sizeof(double);
uint sd = numPoints*sizeof(double);
uint sdim = 3*sizeof(uint);
uint *dim_d;
double *g_h = new double[3*numPoints];
double *g_d, *d_d;
//clock_gettime(CLOCK_MONOTONIC, &start);
gpuErrchk(cudaMalloc((void**)&d_d, sd));
gpuErrchk(cudaMalloc((void**)&g_d, sg));
gpuErrchk(cudaMalloc((void**)&dim_d, sdim));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Memory allocated." << std::endl;
#endif
gpuErrchk(cudaMemcpy(d_d, d, sd, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dim_d, dim, sdim, cudaMemcpyHostToDevice));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Memory copied." << std::endl;
#endif
uint blockSize = 128;
int nBlocks = numPoints/blockSize + (numPoints%blockSize == 0?0:1);
processPoints <<< nBlocks, blockSize >>> (numPoints, d_d, g_d, dim_d);
gpuErrchk(cudaMemcpy(g_h, g_d, sg, cudaMemcpyDeviceToHost));
//clock_gettime(CLOCK_MONOTONIC, &end);
gpuErrchk(cudaFree(dim_d));
gpuErrchk(cudaFree(d_d));
gpuErrchk(cudaFree(g_d));
*gradients = g_h;
//cout << "Time used for GPU calculation in milliseconds:" << double(end.tv_nsec-start.tv_nsec)/1000000.0 << endl;
return true;
}
extern "C" bool cudaIsosurface(const double c, const double* in_data, double* in_grads, int in_size[3], uint* out_numVerts, double** out_verts, double** out_grads)
{
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Starting cuda method..." << std::endl;
#endif
if (cudaSetDevice(0) != cudaSuccess) {
std::cout << " CUDA: Could not set cuda device." << std::endl;
}
else {
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Cuda device set." << std::endl;
#endif
}
//---Init---------------------------------------------------------------------
//timespec start, end;
uint sdataB, sgradsB, sdimB, sbitmasksB, eTB, tTB, nVTB;
double *data, *g;
int *dim;
uint *bitmasks, bitmaskCnt, *eT, *tT, *nVT;
bitmaskCnt = in_size[0]*in_size[1]*in_size[2];
uint *bm = new uint[2*bitmaskCnt];
sdataB = in_size[0]*in_size[1]*in_size[2]*sizeof(double);
sgradsB = 3*in_size[0]*in_size[1]*in_size[2]*sizeof(double);
sdimB = 3*sizeof(int);
sbitmasksB = 2*bitmaskCnt*sizeof(uint);
eTB = 256*sizeof(uint);
tTB = 256*16*sizeof(uint);
nVTB = 256*sizeof(uint);
gpuErrchk(cudaMalloc( (void**)&data, sdataB));
gpuErrchk(cudaMalloc( (void**)&dim, sdimB));
gpuErrchk(cudaMalloc( (void**)&bitmasks, sbitmasksB));
gpuErrchk(cudaMalloc( (void**)&eT, eTB));
gpuErrchk(cudaMalloc( (void**)&tT, tTB));
gpuErrchk(cudaMalloc( (void**)&nVT, nVTB));
gpuErrchk(cudaMalloc( (void**)&g, sgradsB));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Memory allocated." << std::endl;
#endif
gpuErrchk(cudaMemcpy( data, in_data, sdataB, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( dim, in_size, sdimB, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( bitmasks, bm, sbitmasksB, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( eT, edgeTable, eTB, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( tT, triTable, tTB, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( nVT, numVertsTable, nVTB, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy( g, in_grads, sgradsB, cudaMemcpyHostToDevice));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Memory copied." << std::endl;
#endif
//---Calculation--------------------------------------------------------------
//clock_gettime(CLOCK_MONOTONIC, &start);
int blockSize = 128;
int nBlocks = bitmaskCnt/blockSize + (bitmaskCnt%blockSize == 0?0:1);
processDataArray <<< nBlocks, blockSize >>> (bitmaskCnt, c, dim, data, bitmasks, nVT);
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Calculation done." << std::endl;
#endif
gpuErrchk(cudaMemcpy(bm, bitmasks, sbitmasksB, cudaMemcpyDeviceToHost));
#ifdef CUDA_OUTPUT_STEPS
std::cout << " CUDA: Result retrieved." << std::endl;
#endif
int addOne = 0;
int addTris = 0;
if (bm[2*bitmaskCnt-1]) {
addOne = 1; //doing exclusive scan, if last element is set, we need one more space
addTris = bm[2*bitmaskCnt-1]; //and the tris of course
}
uint *pfArrays = new uint[2*bitmaskCnt];
thrust::exclusive_scan(&bm[bitmaskCnt], bm+2*bitmaskCnt, &pfArrays[bitmaskCnt]); //index of next cubes
thrust::exclusive_scan(&bm[0], bm+bitmaskCnt, &pfArrays[0]);
uint numIsoSurfaceCubes=pfArrays[2*bitmaskCnt-1]+addOne;
uint numVertices=pfArrays[bitmaskCnt-1]+addTris;
#ifdef CUDA_OUTPUT_STEPS
cout << " CUDA: numIsoSurfaceCubes: " << numIsoSurfaceCubes << endl;
cout << " CUDA: numVertices: " << numVertices << endl;
#endif
size_t iAB, vAB, gAB, pfAB;
uint *pfA, *iA;
double *vA, *vertexArray = new double[numVertices*3];
double *gA, *gradientArray = new double[numVertices*3];
iAB = numIsoSurfaceCubes*sizeof(uint);
vAB = numVertices*3*sizeof(double);
gAB = numVertices*3*sizeof(double);
pfAB = sbitmasksB;
gpuErrchk(cudaMalloc( (void**)&iA, iAB));
gpuErrchk(cudaMalloc( (void**)&vA, vAB));
gpuErrchk(cudaMalloc( (void**)&gA, gAB));
gpuErrchk(cudaMalloc((void**)&pfA, pfAB));
gpuErrchk(cudaMemcpy( pfA, pfArrays, pfAB, cudaMemcpyHostToDevice)); //copy prefix array for second pass
getIsoIndices <<< nBlocks, blockSize >>> (bitmaskCnt, iA, bitmasks, pfA);
nBlocks = numIsoSurfaceCubes/blockSize + (numIsoSurfaceCubes%blockSize == 0?0:1);
processIsoCubes <<< nBlocks, blockSize >>> (numIsoSurfaceCubes, c, iA, pfA, data, g, vA, gA, nVT, tT, bitmasks, dim);
gpuErrchk(cudaMemcpy(vertexArray, vA, vAB, cudaMemcpyDeviceToHost ));
gpuErrchk(cudaMemcpy(gradientArray, gA, gAB, cudaMemcpyDeviceToHost ));
//clock_gettime(CLOCK_MONOTONIC, &end);
//---Cleanup------------------------------------------------------------------
cudaFree(data);
cudaFree(g);
cudaFree(dim);
cudaFree(bitmasks);
cudaFree(eT);
cudaFree(tT);
cudaFree(nVT);
cudaFree(pfA);
cudaFree(iA);
cudaFree(vA);
cudaFree(gA);
delete [] pfArrays;
delete [] bm;
*out_verts = vertexArray;
*out_numVerts = numVertices;
#ifdef CUDA_OUTPUT_STEPS
cout << " CUDA: Cuda calculation done." << endl;
#endif
//cout << "Time used for GPU calculation in milliseconds:" << double(end.tv_nsec-start.tv_nsec)/1000000.0 << endl;
return true;
}
|
458a1d008a0d9c10141ab3f936210709670c752c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2021 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <GPUTreeShap/gpu_treeshap.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t, bst_row_t, size_t)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start) :
batch{batch},
columns{num_features},
use_shared{use_shared} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value;
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
return batch.GetElement(ridx * columns + fidx).value;
}
};
template <typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, const RegTree::Node* tree,
common::Span<FeatureType const> split_types,
common::Span<RegTree::Segment const> d_cat_ptrs,
common::Span<uint32_t const> d_categories,
Loader* loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
// Missing value
if (common::CheckNAN(fvalue)) {
nidx = n.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(split_types, nidx)) {
auto categories = d_categories.subspan(d_cat_ptrs[nidx].beg,
d_cat_ptrs[nidx].size);
go_left = Decision(categories, common::AsCat(fvalue));
} else {
go_left = fvalue < n.SplitCond();
}
if (go_left) {
nidx = n.LeftChild();
} else {
nidx = n.RightChild();
}
}
n = tree[nidx];
}
return tree[nidx].LeafValue();
}
template <typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, const RegTree::Node* tree,
Loader const& loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader.GetElement(ridx, n.SplitIndex());
// Missing value
if (isnan(fvalue)) {
nidx = n.DefaultChild();
n = tree[nidx];
} else {
if (fvalue < n.SplitCond()) {
nidx = n.LeftChild();
n = tree[nidx];
} else {
nidx = n.RightChild();
n = tree[nidx];
}
}
}
return nidx;
}
template <typename Loader, typename Data>
__global__ void PredictLeafKernel(Data data,
common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start);
for (int tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]];
auto leaf = GetLeafIndex(ridx, d_tree, loader);
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
auto tree_cat_ptrs = d_cat_node_segments.subspan(
d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
auto tree_split_types =
d_tree_split_types.subspan(d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
float leaf = GetLeafWeight(global_idx, d_tree, tree_split_types,
tree_cat_ptrs,
tree_categories,
&loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
auto tree_cat_ptrs = d_cat_node_segments.subspan(
d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, d_tree_split_types,
tree_cat_ptrs,
tree_categories,
&loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(hipMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), hipMemcpyDefault));
dh::safe_cuda(hipMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), hipMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(dh::device_vector<gpu_treeshap::PathElement>* paths,
const gbm::GBTreeModel& model, size_t tree_limit,
int gpu_id) {
DeviceModel device_model;
device_model.Init(model, 0, tree_limit, gpu_id);
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{int64_t(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::hip::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::hip::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = paths->data().get();
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
dh::LaunchN(gpu_id, info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = d_nodes[parent_idx];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = is_left_path ? -inf : parent.SplitCond();
float upper_bound = is_left_path ? parent.SplitCond() : inf;
d_paths[output_position--] = {
idx, parent.SplitIndex(), group, lower_bound,
upper_bound, is_missing_path, zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, -inf, inf, false, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset) const {
batch.offset.SetDevice(generic_param_->gpu_id);
batch.data.SetDevice(generic_param_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(generic_param_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<SparsePageLoader, SparsePageView>, data,
model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group);
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group);
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(generic_param_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, generic_param_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset);
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>()) {
this->PredictInternal(
page.Impl()->GetDeviceAccessor(generic_param_->gpu_id),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (generic_param_->gpu_id >= 0 && generic_param_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = generic_param_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) {
CHECK_EQ(predts->version, 0);
}
if (tree_end == 0) {
tree_end = model.trees.size();
}
if (predts->version == 0) {
// out_preds->Size() can be non-zero as it's initialized here before any tree is
// built at the 0^th iterator.
this->InitOutPredictions(dmat->Info(), out_preds, model);
}
if (tree_end - tree_begin == 0) {
return;
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->generic_param_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups);
}
bool InplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<
data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<
data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float>*,
bool approximate, int,
unsigned) const override {
if (approximate) {
LOG(FATAL) << "Approximated contribution is not implemented in GPU Predictor.";
}
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement> device_paths;
ExtractPaths(&device_paths, model, tree_end, generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
gpu_treeshap::GPUTreeShap(
X, device_paths.begin(), device_paths.end(), ngroup,
phis.data() + batch.base_rowid * contributions_columns, phis.size());
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
dh::LaunchN(
generic_param_->gpu_id,
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float>*,
bool approximate) const override {
if (approximate) {
LOG(FATAL) << "[Internal error]: " << __func__
<< " approximate is not implemented in GPU Predictor.";
}
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement> device_paths;
ExtractPaths(&device_paths, model, tree_end, generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
gpu_treeshap::GPUTreeShapInteractions(
X, device_paths.begin(), device_paths.end(), ngroup,
phis.data() + batch.base_rowid * contributions_columns, phis.size());
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(
generic_param_->gpu_id,
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(
row_idx, ngroup, group, n_features, n_features, n_features)] +=
margin.empty() ? base_score : margin[idx];
});
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.learner_model_param->num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->SetDevice(generic_param_->gpu_id);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.learner_model_param->base_score);
}
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(generic_param_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(generic_param_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->generic_param_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared);
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>()) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(generic_param_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared);
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
|
458a1d008a0d9c10141ab3f936210709670c752c.cu
|
/*!
* Copyright 2017-2021 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <GPUTreeShap/gpu_treeshap.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t, bst_row_t, size_t)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start) :
batch{batch},
columns{num_features},
use_shared{use_shared} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value;
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
return batch.GetElement(ridx * columns + fidx).value;
}
};
template <typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, const RegTree::Node* tree,
common::Span<FeatureType const> split_types,
common::Span<RegTree::Segment const> d_cat_ptrs,
common::Span<uint32_t const> d_categories,
Loader* loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
// Missing value
if (common::CheckNAN(fvalue)) {
nidx = n.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(split_types, nidx)) {
auto categories = d_categories.subspan(d_cat_ptrs[nidx].beg,
d_cat_ptrs[nidx].size);
go_left = Decision(categories, common::AsCat(fvalue));
} else {
go_left = fvalue < n.SplitCond();
}
if (go_left) {
nidx = n.LeftChild();
} else {
nidx = n.RightChild();
}
}
n = tree[nidx];
}
return tree[nidx].LeafValue();
}
template <typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, const RegTree::Node* tree,
Loader const& loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader.GetElement(ridx, n.SplitIndex());
// Missing value
if (isnan(fvalue)) {
nidx = n.DefaultChild();
n = tree[nidx];
} else {
if (fvalue < n.SplitCond()) {
nidx = n.LeftChild();
n = tree[nidx];
} else {
nidx = n.RightChild();
n = tree[nidx];
}
}
}
return nidx;
}
template <typename Loader, typename Data>
__global__ void PredictLeafKernel(Data data,
common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start);
for (int tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]];
auto leaf = GetLeafIndex(ridx, d_tree, loader);
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
auto tree_cat_ptrs = d_cat_node_segments.subspan(
d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
auto tree_split_types =
d_tree_split_types.subspan(d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
float leaf = GetLeafWeight(global_idx, d_tree, tree_split_types,
tree_cat_ptrs,
tree_categories,
&loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
auto tree_cat_ptrs = d_cat_node_segments.subspan(
d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, d_tree_split_types,
tree_cat_ptrs,
tree_categories,
&loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(cudaMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), cudaMemcpyDefault));
dh::safe_cuda(cudaMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(dh::device_vector<gpu_treeshap::PathElement>* paths,
const gbm::GBTreeModel& model, size_t tree_limit,
int gpu_id) {
DeviceModel device_model;
device_model.Init(model, 0, tree_limit, gpu_id);
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{int64_t(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::cuda::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::cuda::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = paths->data().get();
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
dh::LaunchN(gpu_id, info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = d_nodes[parent_idx];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = is_left_path ? -inf : parent.SplitCond();
float upper_bound = is_left_path ? parent.SplitCond() : inf;
d_paths[output_position--] = {
idx, parent.SplitIndex(), group, lower_bound,
upper_bound, is_missing_path, zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, -inf, inf, false, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset) const {
batch.offset.SetDevice(generic_param_->gpu_id);
batch.data.SetDevice(generic_param_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(generic_param_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<SparsePageLoader, SparsePageView>, data,
model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group);
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group);
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(generic_param_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, generic_param_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset);
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>()) {
this->PredictInternal(
page.Impl()->GetDeviceAccessor(generic_param_->gpu_id),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (generic_param_->gpu_id >= 0 && generic_param_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = generic_param_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) {
CHECK_EQ(predts->version, 0);
}
if (tree_end == 0) {
tree_end = model.trees.size();
}
if (predts->version == 0) {
// out_preds->Size() can be non-zero as it's initialized here before any tree is
// built at the 0^th iterator.
this->InitOutPredictions(dmat->Info(), out_preds, model);
}
if (tree_end - tree_begin == 0) {
return;
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->generic_param_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups);
}
bool InplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<
data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<
data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float>*,
bool approximate, int,
unsigned) const override {
if (approximate) {
LOG(FATAL) << "Approximated contribution is not implemented in GPU Predictor.";
}
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement> device_paths;
ExtractPaths(&device_paths, model, tree_end, generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
gpu_treeshap::GPUTreeShap(
X, device_paths.begin(), device_paths.end(), ngroup,
phis.data() + batch.base_rowid * contributions_columns, phis.size());
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
dh::LaunchN(
generic_param_->gpu_id,
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float>*,
bool approximate) const override {
if (approximate) {
LOG(FATAL) << "[Internal error]: " << __func__
<< " approximate is not implemented in GPU Predictor.";
}
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement> device_paths;
ExtractPaths(&device_paths, model, tree_end, generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
gpu_treeshap::GPUTreeShapInteractions(
X, device_paths.begin(), device_paths.end(), ngroup,
phis.data() + batch.base_rowid * contributions_columns, phis.size());
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(
generic_param_->gpu_id,
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(
row_idx, ngroup, group, n_features, n_features, n_features)] +=
margin.empty() ? base_score : margin[idx];
});
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.learner_model_param->num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->SetDevice(generic_param_->gpu_id);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.learner_model_param->base_score);
}
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(generic_param_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(generic_param_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->generic_param_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared);
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>()) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(generic_param_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared);
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
|
1b594324850db03bd8ea4f86ea2b175e3728030d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include "dot_adder.h"
#include "main.h"
CUDA_CALLABLE_MEMBER void Dot_Adder::add(int id, int _b)
{
dot_sum[id] += dot_sum[id + _b];
}
CUDA_CALLABLE_MEMBER Dot_Adder::Dot_Adder()
{
}
CUDA_CALLABLE_MEMBER void Dot_Adder::init(int base)
{
dot_sum = new double[(int)exp2f(base)];
}
__device__ Dot_Adder dot_adders[12];
__global__ void Dot_Adder_Initialize()
{
for (int i = 1; i < 12; i++)
{
dot_adders[i - 1].init(i);
}
}
__global__ void dot_adder(int base, int _b)
{
dot_adders[base - 1].add(threadIdx.x, _b);
}
__global__ void dot_adder_C(Dot_Adder* d_a, int _b)
{
d_a->add(threadIdx.x, _b);
}
|
1b594324850db03bd8ea4f86ea2b175e3728030d.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include "dot_adder.h"
#include "main.h"
CUDA_CALLABLE_MEMBER void Dot_Adder::add(int id, int _b)
{
dot_sum[id] += dot_sum[id + _b];
}
CUDA_CALLABLE_MEMBER Dot_Adder::Dot_Adder()
{
}
CUDA_CALLABLE_MEMBER void Dot_Adder::init(int base)
{
dot_sum = new double[(int)exp2f(base)];
}
__device__ Dot_Adder dot_adders[12];
__global__ void Dot_Adder_Initialize()
{
for (int i = 1; i < 12; i++)
{
dot_adders[i - 1].init(i);
}
}
__global__ void dot_adder(int base, int _b)
{
dot_adders[base - 1].add(threadIdx.x, _b);
}
__global__ void dot_adder_C(Dot_Adder* d_a, int _b)
{
d_a->add(threadIdx.x, _b);
}
|
c84aafee842968cb403f730340f71619ef206bf6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <R.h>
#include <stdint.h>
#define min(A,B) ((A) < (B) ? (A) : (B))
#define max(A,B) ((A) > (B) ? (A) : (B))
#define Data(i,j) data[(j) * (*row) + (i)] //R uses column-major order
#define Func(i,j) func[(i) * m + (j)]
#define numThread 256
#define MaxN 65536
#define MaxCol 256
static unsigned *count, *tmpCount, *f1, *f2, **gpuCount, **gpuF1, **gpuF2;
static float *func, **gpuFunc;
__global__
void kernel(float *func, unsigned *count, unsigned n, unsigned m,
unsigned *f1, unsigned *f2) {
__shared__ float minVector[MaxCol];
__shared__ float maxVector[MaxCol];
unsigned myFunc, i, j;
float funcValue;
if (threadIdx.x < m) {
funcValue = Func(f1[blockIdx.x], threadIdx.x); //func 1
minVector[threadIdx.x] = funcValue;
maxVector[threadIdx.x] = funcValue;
funcValue = Func(f2[blockIdx.x], threadIdx.x); //func 2
minVector[threadIdx.x] = min(minVector[threadIdx.x], funcValue);
maxVector[threadIdx.x] = max(maxVector[threadIdx.x], funcValue);
}
__syncthreads();
for (i = 0; i < n; i += blockDim.x) {
myFunc = i + threadIdx.x;
if (myFunc < n) {
for (j = 0; j < m; j++) {
funcValue = Func(myFunc, j);
if (funcValue < minVector[j] || funcValue > maxVector[j])
break;
}
if (j == m)
atomicAdd(count + myFunc, 1);
}
}
}
extern "C"
void multigpuBD2(int *row, int *col, double *data, double *depth) {
unsigned n, m, chunk, size;
uint64_t i, j, k, numPairs;
int numGPU;
n = *row;
m = *col;
hipGetDeviceCount(&numGPU);
if (n > MaxN) {
fprintf(stderr, "number of rows cannot be more than %u\n", MaxN);
exit(1);
}
if (m > MaxCol) {
fprintf(stderr, "number of columns cannot be more than %u\n", MaxCol);
exit(1);
}
if (numGPU < 2) {
fprintf(stderr, "need more than 1 GPU\n");
exit(1);
}
count = (unsigned*)malloc(sizeof(unsigned) * n);
tmpCount = (unsigned*)malloc(sizeof(unsigned) * n);
func = (float*)malloc(sizeof(float) * n * m);
for (i = 0; i < n; i++) {
count[i] = 0;
for (j = 0; j < m; j++)
Func(i, j) = Data(i, j);
//data: column major, double
//func: row major, float
}
numPairs = (uint64_t)n * (n - 1) / 2;
f1 = (unsigned*)malloc(sizeof(unsigned) * numPairs);
f2 = (unsigned*)malloc(sizeof(unsigned) * numPairs);
for (i = 0, k = 0; i < n; i++)
for (j = i + 1; j < n; j++)
f1[k] = i, f2[k++] = j;
chunk = (numPairs + numGPU - 1) / numGPU;
gpuCount = (unsigned**)malloc(numGPU * sizeof(unsigned*));
gpuF1 = (unsigned**)malloc(numGPU * sizeof(unsigned*));
gpuF2 = (unsigned**)malloc(numGPU * sizeof(unsigned*));
gpuFunc = (float**)malloc(numGPU * sizeof(float*));
for (i = 0; i < numGPU; i++) {
hipSetDevice(i);
hipMalloc((void**)&gpuCount[i], sizeof(unsigned) * n);
hipMalloc((void**)&gpuFunc[i], sizeof(float) * n * m);
hipMalloc((void**)&gpuF1[i], sizeof(unsigned) * chunk);
hipMalloc((void**)&gpuF2[i], sizeof(unsigned) * chunk);
size = (i == numGPU - 1) ? (numPairs - i * chunk) : chunk;
hipMemcpy(gpuCount[i], count, sizeof(unsigned) * n,
hipMemcpyHostToDevice);
hipMemcpy(gpuFunc[i], func, sizeof(float) * n * m,
hipMemcpyHostToDevice);
hipMemcpy(gpuF1[i], &f1[i * chunk], sizeof(unsigned) * size,
hipMemcpyHostToDevice);
hipMemcpy(gpuF2[i], &f2[i * chunk], sizeof(unsigned) * size,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3(size), dim3(numThread), 0, 0, gpuFunc[i], gpuCount[i], n, m,
gpuF1[i], gpuF2[i]);
}
for (i = 0; i < numGPU; i++) {
hipSetDevice(i);
hipDeviceSynchronize();
hipMemcpy(tmpCount, gpuCount[i], sizeof(unsigned) * n,
hipMemcpyDeviceToHost);
for (j = 0; j < n; j++)
count[j] += tmpCount[j];
hipFree(gpuCount[i]);
hipFree(gpuFunc[i]);
hipFree(gpuF1[i]);
hipFree(gpuF2[i]);
}
for (i = 0; i < n; i++)
depth[i] = (double)count[i] / (n * (n - 1.0) / 2.0);
free(count);
free(tmpCount);
free(func);
free(f1);
free(f2);
free(gpuCount);
free(gpuFunc);
free(gpuF1);
free(gpuF2);
}
|
c84aafee842968cb403f730340f71619ef206bf6.cu
|
#include <R.h>
#include <stdint.h>
#define min(A,B) ((A) < (B) ? (A) : (B))
#define max(A,B) ((A) > (B) ? (A) : (B))
#define Data(i,j) data[(j) * (*row) + (i)] //R uses column-major order
#define Func(i,j) func[(i) * m + (j)]
#define numThread 256
#define MaxN 65536
#define MaxCol 256
static unsigned *count, *tmpCount, *f1, *f2, **gpuCount, **gpuF1, **gpuF2;
static float *func, **gpuFunc;
__global__
void kernel(float *func, unsigned *count, unsigned n, unsigned m,
unsigned *f1, unsigned *f2) {
__shared__ float minVector[MaxCol];
__shared__ float maxVector[MaxCol];
unsigned myFunc, i, j;
float funcValue;
if (threadIdx.x < m) {
funcValue = Func(f1[blockIdx.x], threadIdx.x); //func 1
minVector[threadIdx.x] = funcValue;
maxVector[threadIdx.x] = funcValue;
funcValue = Func(f2[blockIdx.x], threadIdx.x); //func 2
minVector[threadIdx.x] = min(minVector[threadIdx.x], funcValue);
maxVector[threadIdx.x] = max(maxVector[threadIdx.x], funcValue);
}
__syncthreads();
for (i = 0; i < n; i += blockDim.x) {
myFunc = i + threadIdx.x;
if (myFunc < n) {
for (j = 0; j < m; j++) {
funcValue = Func(myFunc, j);
if (funcValue < minVector[j] || funcValue > maxVector[j])
break;
}
if (j == m)
atomicAdd(count + myFunc, 1);
}
}
}
extern "C"
void multigpuBD2(int *row, int *col, double *data, double *depth) {
unsigned n, m, chunk, size;
uint64_t i, j, k, numPairs;
int numGPU;
n = *row;
m = *col;
cudaGetDeviceCount(&numGPU);
if (n > MaxN) {
fprintf(stderr, "number of rows cannot be more than %u\n", MaxN);
exit(1);
}
if (m > MaxCol) {
fprintf(stderr, "number of columns cannot be more than %u\n", MaxCol);
exit(1);
}
if (numGPU < 2) {
fprintf(stderr, "need more than 1 GPU\n");
exit(1);
}
count = (unsigned*)malloc(sizeof(unsigned) * n);
tmpCount = (unsigned*)malloc(sizeof(unsigned) * n);
func = (float*)malloc(sizeof(float) * n * m);
for (i = 0; i < n; i++) {
count[i] = 0;
for (j = 0; j < m; j++)
Func(i, j) = Data(i, j);
//data: column major, double
//func: row major, float
}
numPairs = (uint64_t)n * (n - 1) / 2;
f1 = (unsigned*)malloc(sizeof(unsigned) * numPairs);
f2 = (unsigned*)malloc(sizeof(unsigned) * numPairs);
for (i = 0, k = 0; i < n; i++)
for (j = i + 1; j < n; j++)
f1[k] = i, f2[k++] = j;
chunk = (numPairs + numGPU - 1) / numGPU;
gpuCount = (unsigned**)malloc(numGPU * sizeof(unsigned*));
gpuF1 = (unsigned**)malloc(numGPU * sizeof(unsigned*));
gpuF2 = (unsigned**)malloc(numGPU * sizeof(unsigned*));
gpuFunc = (float**)malloc(numGPU * sizeof(float*));
for (i = 0; i < numGPU; i++) {
cudaSetDevice(i);
cudaMalloc((void**)&gpuCount[i], sizeof(unsigned) * n);
cudaMalloc((void**)&gpuFunc[i], sizeof(float) * n * m);
cudaMalloc((void**)&gpuF1[i], sizeof(unsigned) * chunk);
cudaMalloc((void**)&gpuF2[i], sizeof(unsigned) * chunk);
size = (i == numGPU - 1) ? (numPairs - i * chunk) : chunk;
cudaMemcpy(gpuCount[i], count, sizeof(unsigned) * n,
cudaMemcpyHostToDevice);
cudaMemcpy(gpuFunc[i], func, sizeof(float) * n * m,
cudaMemcpyHostToDevice);
cudaMemcpy(gpuF1[i], &f1[i * chunk], sizeof(unsigned) * size,
cudaMemcpyHostToDevice);
cudaMemcpy(gpuF2[i], &f2[i * chunk], sizeof(unsigned) * size,
cudaMemcpyHostToDevice);
kernel<<<size, numThread>>>(gpuFunc[i], gpuCount[i], n, m,
gpuF1[i], gpuF2[i]);
}
for (i = 0; i < numGPU; i++) {
cudaSetDevice(i);
cudaThreadSynchronize();
cudaMemcpy(tmpCount, gpuCount[i], sizeof(unsigned) * n,
cudaMemcpyDeviceToHost);
for (j = 0; j < n; j++)
count[j] += tmpCount[j];
cudaFree(gpuCount[i]);
cudaFree(gpuFunc[i]);
cudaFree(gpuF1[i]);
cudaFree(gpuF2[i]);
}
for (i = 0; i < n; i++)
depth[i] = (double)count[i] / (n * (n - 1.0) / 2.0);
free(count);
free(tmpCount);
free(func);
free(f1);
free(f2);
free(gpuCount);
free(gpuFunc);
free(gpuF1);
free(gpuF2);
}
|
03f1621f94af098744ee8a970416e7624b1295b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
// Includes
#include <stdio.h>
#include <cutil_inline.h>
#include <shrQATest.h>
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Host code
/*
int main(int argc, char** argv)
{
shrQAStart(argc, argv);
printf("Vector Addition\n");
int N = 50000;
size_t size = N * sizeof(float);
ParseArguments(argc, argv);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
cutilSafeCall( hipMalloc((void**)&d_A, size) );
cutilSafeCall( hipMalloc((void**)&d_B, size) );
cutilSafeCall( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
cutilSafeCall( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
cutilCheckMsg("kernel launch failure");
#ifdef _DEBUG
cutilSafeCall( cutilDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
cutilSafeCall( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i] + h_B[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
CleanupResources();
shrQAFinishExit(argc, (const char **)argv, (i==N) ? QA_PASSED : QA_FAILED);
}
*/
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
cutilDeviceReset();
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
if (strcmp(argv[i], "--noprompt") == 0 ||
strcmp(argv[i], "-noprompt") == 0)
{
noprompt = true;
break;
}
}
|
03f1621f94af098744ee8a970416e7624b1295b1.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
// Includes
#include <stdio.h>
#include <cutil_inline.h>
#include <shrQATest.h>
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Host code
/*
int main(int argc, char** argv)
{
shrQAStart(argc, argv);
printf("Vector Addition\n");
int N = 50000;
size_t size = N * sizeof(float);
ParseArguments(argc, argv);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
cutilSafeCall( cudaMalloc((void**)&d_A, size) );
cutilSafeCall( cudaMalloc((void**)&d_B, size) );
cutilSafeCall( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
cutilSafeCall( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
cutilCheckMsg("kernel launch failure");
#ifdef _DEBUG
cutilSafeCall( cutilDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
cutilSafeCall( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i] + h_B[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
CleanupResources();
shrQAFinishExit(argc, (const char **)argv, (i==N) ? QA_PASSED : QA_FAILED);
}
*/
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
cutilDeviceReset();
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
if (strcmp(argv[i], "--noprompt") == 0 ||
strcmp(argv[i], "-noprompt") == 0)
{
noprompt = true;
break;
}
}
|
f94ca6894b74360c099ff162638a70b9f5500d77.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file EPlusGG.cu
//---------------------------------------------------------------------------//
#include "EPlusGG.hh"
#include "base/Assert.hh"
#include "base/KernelParamCalculator.cuda.hh"
#include "random/cuda/RngEngine.hh"
#include "physics/base/ModelInterface.hh"
#include "physics/base/ParticleTrackView.hh"
#include "physics/base/PhysicsTrackView.hh"
#include "physics/base/SecondaryAllocatorView.hh"
#include "EPlusGGInteractor.hh"
namespace celeritas
{
namespace detail
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
/*!
* Interact using the EPlusGG model on applicable tracks.
*/
__global__ void eplusgg_interact_kernel(const EPlusGGPointers epgg,
const ModelInteractPointers model)
{
// Get the thread id
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= model.states.size())
return;
// Get views to this Secondary, Particle, and Physics
SecondaryAllocatorView allocate_secondaries(model.secondaries);
ParticleTrackView particle(
model.params.particle, model.states.particle, tid);
PhysicsTrackView physics(model.params.physics,
model.states.physics,
particle.def_id(),
MaterialDefId{},
tid);
// This interaction only applies if the EPlusGG model was selected
if (physics.model_id() != epgg.model_id)
return;
// Do the interaction
EPlusGGInteractor interact(
epgg, particle, model.states.direction[tid.get()], allocate_secondaries);
RngEngine rng(model.states.rng, tid);
model.result[tid.get()] = interact(rng);
CELER_ENSURE(model.result[tid.get()]);
}
} // namespace
//---------------------------------------------------------------------------//
// LAUNCHERS
//---------------------------------------------------------------------------//
/*!
* Launch the EPlusGG interaction.
*/
void eplusgg_interact(const EPlusGGPointers& eplusgg,
const ModelInteractPointers& model)
{
CELER_EXPECT(eplusgg);
CELER_EXPECT(model);
// Calculate kernel launch params
auto params = KernelParamCalculator()(model.states.size());
// Launch the kernel
hipLaunchKernelGGL(( eplusgg_interact_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0, eplusgg,
model);
CELER_CUDA_CHECK_ERROR();
}
//---------------------------------------------------------------------------//
} // namespace detail
} // namespace celeritas
|
f94ca6894b74360c099ff162638a70b9f5500d77.cu
|
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file EPlusGG.cu
//---------------------------------------------------------------------------//
#include "EPlusGG.hh"
#include "base/Assert.hh"
#include "base/KernelParamCalculator.cuda.hh"
#include "random/cuda/RngEngine.hh"
#include "physics/base/ModelInterface.hh"
#include "physics/base/ParticleTrackView.hh"
#include "physics/base/PhysicsTrackView.hh"
#include "physics/base/SecondaryAllocatorView.hh"
#include "EPlusGGInteractor.hh"
namespace celeritas
{
namespace detail
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
/*!
* Interact using the EPlusGG model on applicable tracks.
*/
__global__ void eplusgg_interact_kernel(const EPlusGGPointers epgg,
const ModelInteractPointers model)
{
// Get the thread id
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= model.states.size())
return;
// Get views to this Secondary, Particle, and Physics
SecondaryAllocatorView allocate_secondaries(model.secondaries);
ParticleTrackView particle(
model.params.particle, model.states.particle, tid);
PhysicsTrackView physics(model.params.physics,
model.states.physics,
particle.def_id(),
MaterialDefId{},
tid);
// This interaction only applies if the EPlusGG model was selected
if (physics.model_id() != epgg.model_id)
return;
// Do the interaction
EPlusGGInteractor interact(
epgg, particle, model.states.direction[tid.get()], allocate_secondaries);
RngEngine rng(model.states.rng, tid);
model.result[tid.get()] = interact(rng);
CELER_ENSURE(model.result[tid.get()]);
}
} // namespace
//---------------------------------------------------------------------------//
// LAUNCHERS
//---------------------------------------------------------------------------//
/*!
* Launch the EPlusGG interaction.
*/
void eplusgg_interact(const EPlusGGPointers& eplusgg,
const ModelInteractPointers& model)
{
CELER_EXPECT(eplusgg);
CELER_EXPECT(model);
// Calculate kernel launch params
auto params = KernelParamCalculator()(model.states.size());
// Launch the kernel
eplusgg_interact_kernel<<<params.grid_size, params.block_size>>>(eplusgg,
model);
CELER_CUDA_CHECK_ERROR();
}
//---------------------------------------------------------------------------//
} // namespace detail
} // namespace celeritas
|
693782ed48521600399e065930b87f4a29a79b46.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "include.cuh"
#include "PJKIP.cuh"
PJKIP::PJKIP(System* sys)
{
system = sys;
careful = false;
tolerance = 1e-4;
maxIterations = 1000;
iterations = 0;
totalKrylovIterations = 0;
// spike stuff
partitions = 1;
solverOptions.safeFactorization = true;
solverOptions.trackReordering = true;
solverOptions.maxNumIterations = 5000;
preconditionerUpdateModulus = -1; // the preconditioner updates every ___ time steps
preconditionerMaxKrylovIterations = -1; // the preconditioner updates if Krylov iterations are greater than ____ iterations
mySolver = new SpikeSolver(partitions, solverOptions);
//m_spmv = new MySpmv(grad_f, grad_f_T, system->D, system->DT, system->mass, lambda, lambdaTmp, Dinv, M_hat, gammaTmp, system->f_contact, system->tmp);
stepKrylovIterations = 0;
precUpdated = 0;
// end spike stuff
}
int PJKIP::setup()
{
// vectors: x, y, dx, dy, d, b
x_d = system->a_h;
y_d = system->a_h;
dx_d = system->a_h;
dy_d = system->a_h;
d_d = system->a_h;
b_d = system->a_h;
r_d = system->a_h;
tmp_d = system->a_h;
thrust::device_ptr<double> wrapped_device_x(CASTD1(x_d));
thrust::device_ptr<double> wrapped_device_y(CASTD1(y_d));
thrust::device_ptr<double> wrapped_device_dx(CASTD1(dx_d));
thrust::device_ptr<double> wrapped_device_dy(CASTD1(dy_d));
thrust::device_ptr<double> wrapped_device_d(CASTD1(d_d));
thrust::device_ptr<double> wrapped_device_b(CASTD1(b_d));
thrust::device_ptr<double> wrapped_device_r(CASTD1(r_d));
thrust::device_ptr<double> wrapped_device_tmp(CASTD1(tmp_d));
x = DeviceValueArrayView(wrapped_device_x, wrapped_device_x + x_d.size());
y = DeviceValueArrayView(wrapped_device_y, wrapped_device_y + y_d.size());
dx = DeviceValueArrayView(wrapped_device_dx, wrapped_device_dx + dx_d.size());
dy = DeviceValueArrayView(wrapped_device_dy, wrapped_device_dy + dy_d.size());
d = DeviceValueArrayView(wrapped_device_d, wrapped_device_d + d_d.size());
b = DeviceValueArrayView(wrapped_device_b, wrapped_device_b + b_d.size());
r = DeviceValueArrayView(wrapped_device_r, wrapped_device_r + r_d.size());
tmp = DeviceValueArrayView(wrapped_device_tmp, wrapped_device_tmp + tmp_d.size());
return 0;
}
void PJKIP::setSolverType(int solverType)
{
switch(solverType) {
case 0:
solverOptions.solverType = spike::BiCGStab;
break;
case 1:
solverOptions.solverType = spike::BiCGStab1;
break;
case 2:
solverOptions.solverType = spike::BiCGStab2;
break;
case 3:
solverOptions.solverType = spike::MINRES;
break;
case 4:
solverOptions.solverType = spike::CG_C;
break;
case 5:
solverOptions.solverType = spike::CR_C;
break;
}
}
void PJKIP::setPrecondType(int useSpike)
{
solverOptions.precondType = useSpike ? spike::Spike : spike::None;
}
void PJKIP::printSolverParams()
{
// printf("Step size: %e\n", h);
// printf("Newton tolerance: %e\n", tol);
printf("Krylov relTol: %e abdTol: %e\n", solverOptions.relTol, solverOptions.absTol);
printf("Max. Krylov iterations: %d\n", solverOptions.maxNumIterations);
printf("----------------------------\n");
}
__global__ void initialize_d2(double* x, double* y, double* d, double s, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double xn = x[3*index];
double xt1 = x[3*index+1];
double xt2 = x[3*index+2];
double y0 = y[3*index];
double y1 = y[3*index+1];
double y2 = y[3*index+2];
double yBar0 = s/(xn-(pow(xt1,2.0)+pow(xt2,2.0))/xn);
double yBar1 = -(yBar0/xn)*xt1;
double yBar2 = -(yBar0/xn)*xt2;
d[3*index] = (yBar0-y0)/s;
d[3*index+1] = (yBar1-y1)/s;
d[3*index+2] = (yBar2-y2)/s;
}
__global__ void getInverse2(double* src, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double xn = src[3*index];
double xt1 = src[3*index+1];
double xt2 = src[3*index+2];
double d = 0.5*(pow(xn,2.0)-(pow(xt1,2.0)+pow(xt2,2.0)));
dst[3*index] = xn/d;
dst[3*index+1] = -xt1/d;
dst[3*index+2] = -xt2/d;
}
__global__ void getFeasible2(double* src, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double xn = src[3*index];
double xt1 = src[3*index+1];
double xt2 = src[3*index+2];
xn = xn-sqrt(pow(xt1,2.0)+pow(xt2,2.0));
if(xn!=xn) xn = 0.0;
dst[3*index] = -fmin(0.0,xn);
dst[3*index+1] = -10e30;
dst[3*index+2] = -10e30;
}
__global__ void getResidual_PJKIP(double* src, double* gamma, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
src[3*index] = src[3*index]*gamma[3*index]+src[3*index+1]*gamma[3*index+1]+src[3*index+2]*gamma[3*index+2];
src[3*index+1] = 0;
src[3*index+2] = 0;
}
__global__ void getStepLength2(double* x, double* dx, double* y, double* dy, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double tx;
double ty;
double aux;
double xn = x[3*index];
double xt1 = x[3*index+1];
double xt2 = x[3*index+2];
double detx = 0.5*(pow(xn,2.0)-(pow(xt1,2.0)+pow(xt2,2.0)));
double dxn = dx[3*index];
double dxt1 = dx[3*index+1];
double dxt2 = dx[3*index+2];
double detdx = 0.5*(pow(dxn,2.0)-(pow(dxt1,2.0)+pow(dxt2,2.0)));
if(detdx>0 && dxn>0) {
tx = 1.0;
} else {
tx = 0.5*(dxn*xn-dxt1*xt1-dxt2*xt2);
aux = pow(tx,2.0)-detdx*detx;
if(aux>0.0) {
tx = 0.99*detx/(sqrt(aux)-tx);
} else {
tx = -0.99*detx/tx;
}
}
xn = y[3*index];
xt1 = y[3*index+1];
xt2 = y[3*index+2];
detx = 0.5*(pow(xn,2.0)-(pow(xt1,2.0)+pow(xt2,2.0)));
dxn = dy[3*index];
dxt1 = dy[3*index+1];
dxt2 = dy[3*index+2];
detdx = 0.5*(pow(dxn,2.0)-(pow(dxt1,2.0)+pow(dxt2,2.0)));
if(detdx>0 && dxn>0) {
ty = 1.0;
} else {
ty = 0.5*(dxn*xn-dxt1*xt1-dxt2*xt2);
aux = pow(ty,2.0)-detdx*detx;
if(aux>0.0) {
ty = 0.99*detx/(sqrt(aux)-ty);
} else {
ty = -0.99*detx/ty;
}
}
dst[3*index] = tx;
dst[3*index+1] = ty;
dst[3*index+2] = 1.0;
}
__global__ void getDeterminant2(double* src, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double xn = src[3*index];
double xt1 = src[3*index+1];
double xt2 = src[3*index+2];
double d = 0.5*(pow(xn,2.0)-(pow(xt1,2.0)+pow(xt2,2.0)));
dst[3*index] = log(2.0*d);
dst[3*index+1] = 0.0;
dst[3*index+2] = 0.0;
}
__global__ void constructPw2(int* PwI, int* PwJ, double* Pw, double* x, double* y, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double x0 = x[3*index];
double x1 = x[3*index+1];
double x2 = x[3*index+2];
double y0 = y[3*index];
double y1 = y[3*index+1];
double y2 = y[3*index+2];
double sqrtDetx = sqrt(0.5*(pow(x0,2.0) - (pow(x1,2.0)+pow(x2,2.0))));
double sqrtDety = sqrt(0.5*(pow(y0,2.0) - (pow(y1,2.0)+pow(y2,2.0))));
if(sqrtDetx!=sqrtDetx) sqrtDetx = 0.0;
if(sqrtDety!=sqrtDety) sqrtDety = 0.0;
PwI[9*index] = 3*index;
PwJ[9*index] = 3*index;
Pw[9*index] = pow(y0 + (sqrtDety*x0)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) - sqrtDety/sqrtDetx;
PwI[9*index+1] = 3*index;
PwJ[9*index+1] = 3*index+1;
Pw[9*index+1] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y1 - (sqrtDety*x1)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+2] = 3*index;
PwJ[9*index+2] = 3*index+2;
Pw[9*index+2] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+3] = 3*index+1;
PwJ[9*index+3] = 3*index;
Pw[9*index+3] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y1 - (sqrtDety*x1)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+4] = 3*index+1;
PwJ[9*index+4] = 3*index+1;
Pw[9*index+4] = pow(y1 - (sqrtDety*x1)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) + sqrtDety/sqrtDetx;
PwI[9*index+5] = 3*index+1;
PwJ[9*index+5] = 3*index+2;
Pw[9*index+5] = ((y1 - (sqrtDety*x1)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+6] = 3*index+2;
PwJ[9*index+6] = 3*index;
Pw[9*index+6] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+7] = 3*index+2;
PwJ[9*index+7] = 3*index+1;
Pw[9*index+7] = ((y1 - (sqrtDety*x1)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+8] = 3*index+2;
PwJ[9*index+8] = 3*index+2;
Pw[9*index+8] = pow(y2 - (sqrtDety*x2)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) + sqrtDety/sqrtDetx;
}
__global__ void updatePw2(double* Pw, double* x, double* y, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double x0 = x[3*index];
double x1 = x[3*index+1];
double x2 = x[3*index+2];
double y0 = y[3*index];
double y1 = y[3*index+1];
double y2 = y[3*index+2];
double sqrtDetx = sqrt(abs(0.5*(pow(x0,2.0) - (pow(x1,2.0)+pow(x2,2.0)))));
double sqrtDety = sqrt(abs(0.5*(pow(y0,2.0) - (pow(y1,2.0)+pow(y2,2.0)))));
//if(sqrtDetx!=sqrtDetx) sqrtDetx = 1e-4;//0.0;
//if(sqrtDety!=sqrtDety) sqrtDety = 0.0;
Pw[9*index] = pow(y0 + (sqrtDety*x0)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) - sqrtDety/sqrtDetx;
Pw[9*index+1] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y1 - (sqrtDety*x1)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+2] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+3] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y1 - (sqrtDety*x1)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+4] = pow(y1 - (sqrtDety*x1)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) + sqrtDety/sqrtDetx;
Pw[9*index+5] = ((y1 - (sqrtDety*x1)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+6] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+7] = ((y1 - (sqrtDety*x1)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+8] = pow(y2 - (sqrtDety*x2)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) + sqrtDety/sqrtDetx;
}
int PJKIP::initializePw() {
hipLaunchKernelGGL(( constructPw2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTI1(PwI_d), CASTI1(PwJ_d), CASTD1(Pw_d), CASTD1(x_d), CASTD1(y_d), system->collisionDetector->numCollisions);
// create Pw using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(PwI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + PwI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(PwJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + PwJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(Pw_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + Pw_d.size());
Pw = DeviceView(3*system->collisionDetector->numCollisions, 3*system->collisionDetector->numCollisions, Pw_d.size(), row_indices, column_indices, values);
// end create Pw
return 0;
}
__global__ void initializeImpulseVector2(double* src, double* friction, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double mu = friction[index];
src[3*index ] = mu;
src[3*index+1] = 0.0;
src[3*index+2] = 0.0;
}
__global__ void constructT2(int* invTxI, int* invTxJ, double* invTx, int* TyI, int* TyJ, double* Ty, double* friction, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double mu = friction[index];
invTxI[3*index] = 3*index;
invTxJ[3*index] = 3*index;
invTx[3*index] = 1.0/mu;
invTxI[3*index+1] = 3*index+1;
invTxJ[3*index+1] = 3*index+1;
invTx[3*index+1] = 1.0;
invTxI[3*index+2] = 3*index+2;
invTxJ[3*index+2] = 3*index+2;
invTx[3*index+2] = 1.0;
TyI[3*index] = 3*index;
TyJ[3*index] = 3*index;
Ty[3*index] = 1.0;
TyI[3*index+1] = 3*index+1;
TyJ[3*index+1] = 3*index+1;
Ty[3*index+1] = mu;
TyI[3*index+2] = 3*index+2;
TyJ[3*index+2] = 3*index+2;
Ty[3*index+2] = mu;
}
int PJKIP::initializeT() {
hipLaunchKernelGGL(( constructT2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTI1(invTxI_d), CASTI1(invTxJ_d), CASTD1(invTx_d), CASTI1(TyI_d), CASTI1(TyJ_d), CASTD1(Ty_d), CASTD1(system->friction_d), system->collisionDetector->numCollisions);
{
// create invTx using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(invTxI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + invTxI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(invTxJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + invTxJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(invTx_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + invTx_d.size());
invTx = DeviceView(3*system->collisionDetector->numCollisions, 3*system->collisionDetector->numCollisions, invTx_d.size(), row_indices, column_indices, values);
// end create invTx
}
{
// create Ty using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(TyI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + TyI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(TyJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + TyJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(Ty_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + Ty_d.size());
Ty = DeviceView(3*system->collisionDetector->numCollisions, 3*system->collisionDetector->numCollisions, Ty_d.size(), row_indices, column_indices, values);
// end create Ty
}
return 0;
}
double PJKIP::updateAlpha(double s) {
double alphaCen = 0.1;
double alpha1 = 10;
double betaCen;
double beta1;
double betaBD;
double fcentering;
double barrier;
double beta;
if(careful) {
betaCen=0.1;
beta1=0.5;
betaBD=1.0;
} else {
betaCen=1e-30;
beta1=0.1;
betaBD=0.5;
}
// choose centering force
double n = system->collisionDetector->numCollisions;
double dotprod = cusp::blas::dot(x,y)+s;
if(s > 0) {
fcentering=2.0*(n+1.0)*log(dotprod/(n+1.0));
barrier = log(pow(s,2.0));
} else {
fcentering=2.0*(n)*log(dotprod/n);
barrier = 0.0;
}
hipLaunchKernelGGL(( getDeterminant2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(x_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
barrier+= Thrust_Total(tmp_d);
hipLaunchKernelGGL(( getDeterminant2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(y_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
barrier+= Thrust_Total(tmp_d);
fcentering = fcentering - barrier;
if(fcentering != fcentering) fcentering = 0.0;
if(fcentering < alphaCen) {
beta = betaCen;
} else if(fcentering <= alpha1) {
beta = beta1;
} else {
beta = betaBD;
}
return 0.5*beta*dotprod/(n+1.0);
}
int PJKIP::performSchurComplementProduct(DeviceValueArrayView src, DeviceValueArrayView tmp2) {
cusp::multiply(invTx,src,tmp);
cusp::multiply(system->DT,tmp,system->f_contact);
cusp::multiply(system->mass,system->f_contact,system->tmp);
cusp::multiply(system->D,system->tmp,tmp2);
cusp::multiply(Ty,tmp2,tmp);
return 0;
}
int PJKIP::buildSchurMatrix() {
// build N
cusp::multiply(system->mass,system->DT,system->MinvDT);
cusp::multiply(system->D,system->MinvDT,system->N);
cusp::multiply(system->N,invTx,system->MinvDT);
cusp::multiply(Ty,system->MinvDT,system->N);
return 0;
}
int PJKIP::solve() {
solverOptions.relTol = ::min(0.01 * tolerance, 1e-6);
solverOptions.absTol = 1e-10;
// Initialize scalars
double residual = 10e30;
// vectors: x, y, dx, dy, d, b
system->gamma_d.resize(3*system->collisionDetector->numCollisions);
x_d.resize(3*system->collisionDetector->numCollisions);
y_d.resize(3*system->collisionDetector->numCollisions);
dx_d.resize(3*system->collisionDetector->numCollisions);
dy_d.resize(3*system->collisionDetector->numCollisions);
d_d.resize(3*system->collisionDetector->numCollisions);
b_d.resize(3*system->collisionDetector->numCollisions);
r_d.resize(3*system->collisionDetector->numCollisions);
tmp_d.resize(3*system->collisionDetector->numCollisions);
// matrices: invTx, Ty, Pw, R(?)
invTxI_d.resize(3*system->collisionDetector->numCollisions);
invTxJ_d.resize(3*system->collisionDetector->numCollisions);
invTx_d.resize(3*system->collisionDetector->numCollisions);
TyI_d.resize(3*system->collisionDetector->numCollisions);
TyJ_d.resize(3*system->collisionDetector->numCollisions);
Ty_d.resize(3*system->collisionDetector->numCollisions);
PwI_d.resize(9*system->collisionDetector->numCollisions);
PwJ_d.resize(9*system->collisionDetector->numCollisions);
Pw_d.resize(9*system->collisionDetector->numCollisions);
// TODO: There's got to be a better way to do this...
// vectors: x, y, dx, dy, d, b
thrust::device_ptr<double> wrapped_device_gamma(CASTD1(system->gamma_d));
thrust::device_ptr<double> wrapped_device_x(CASTD1(x_d));
thrust::device_ptr<double> wrapped_device_y(CASTD1(y_d));
thrust::device_ptr<double> wrapped_device_dx(CASTD1(dx_d));
thrust::device_ptr<double> wrapped_device_dy(CASTD1(dy_d));
thrust::device_ptr<double> wrapped_device_d(CASTD1(d_d));
thrust::device_ptr<double> wrapped_device_b(CASTD1(b_d));
thrust::device_ptr<double> wrapped_device_r(CASTD1(r_d));
thrust::device_ptr<double> wrapped_device_tmp(CASTD1(tmp_d));
system->gamma = DeviceValueArrayView(wrapped_device_gamma, wrapped_device_gamma + system->gamma_d.size());
x = DeviceValueArrayView(wrapped_device_x, wrapped_device_x + x_d.size());
y = DeviceValueArrayView(wrapped_device_y, wrapped_device_y + y_d.size());
dx = DeviceValueArrayView(wrapped_device_dx, wrapped_device_dx + dx_d.size());
dy = DeviceValueArrayView(wrapped_device_dy, wrapped_device_dy + dy_d.size());
d = DeviceValueArrayView(wrapped_device_d, wrapped_device_d + d_d.size());
b = DeviceValueArrayView(wrapped_device_b, wrapped_device_b + b_d.size());
r = DeviceValueArrayView(wrapped_device_r, wrapped_device_r + r_d.size());
tmp = DeviceValueArrayView(wrapped_device_tmp, wrapped_device_tmp + tmp_d.size());
// initialize matrices and vectors
initializeT();
if(verbose) {
cusp::print(invTx);
cusp::print(Ty);
cin.get();
}
initializePw();
buildSchurMatrix();
cusp::multiply(Ty,system->r,r);
hipLaunchKernelGGL(( initializeImpulseVector2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(x_d), CASTD1(system->friction_d), system->collisionDetector->numCollisions);
performSchurComplementProduct(x,y); //NOTE: y is destroyed here
cusp::blas::axpby(tmp,r,y,1.0,1.0);
// determine initial alpha
double alpha = cusp::blas::dot(x,y);
alpha = 0.5*abs(alpha)/((double) system->collisionDetector->numCollisions);
if(verbose) {
cusp::print(system->DT);
cusp::print(system->mass);
cusp::print(system->D);
cusp::print(r);
cusp::print(x);
cusp::print(y);
cout << "alpha: " << alpha << endl;
}
// determine d vector
double s = 2*alpha;
hipLaunchKernelGGL(( initialize_d2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(x_d), CASTD1(y_d), CASTD1(d_d), s, system->collisionDetector->numCollisions);
cusp::blas::axpy(d,y,s);
bool feasible = false;
alpha = updateAlpha(s);
if(verbose) {
cusp::print(x);
cusp::print(y);
cusp::print(d);
cout << "alpha: " << alpha << endl;
}
double ds = 0;
int k;
totalKrylovIterations = 0;
for (k=0; k < maxIterations; k++) {
hipLaunchKernelGGL(( updatePw2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(Pw_d), CASTD1(x_d), CASTD1(y_d), system->collisionDetector->numCollisions);
cusp::add(system->N,Pw,A);
if(verbose) {
cusp::print(Pw);
cin.get();
}
if(feasible) {
ds = 0.0;
} else {
ds = 2.0*alpha-s;
}
if(verbose) {
cout << "ds: " << ds << endl;
cin.get();
}
hipLaunchKernelGGL(( getInverse2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(x_d), CASTD1(b_d), system->collisionDetector->numCollisions);
cusp::blas::axpbypcz(b,y,d,b,alpha,-1.0,-ds);
if(verbose) {
cusp::print(b);
cin.get();
system->buildSchurMatrix(); //TODO: remove this!
cusp::print(system->N);
cin.get();
}
// solve system
delete mySolver;
//m_spmv = new MySpmvJKIP(system->mass, system->D, system->DT, Pw, Ty, invTx, system->tmp, system->f_contact, tmp);
mySolver = new SpikeSolver(partitions, solverOptions);
mySolver->setup(A); //TODO: Use preconditioning here! Need to build full matrix...
cusp::blas::fill(dx, 0.0);
bool success = mySolver->solve(A, b, dx);
spike::Stats stats = mySolver->getStats();
if(verbose) {
cusp::print(dx);
cin.get();
}
performSchurComplementProduct(dx,dy); //NOTE: dy is destroyed here
cusp::blas::axpby(tmp,d,dy,1.0,ds);
if(verbose) {
cusp::print(dy);
cin.get();
}
hipLaunchKernelGGL(( getStepLength2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(x_d), CASTD1(dx_d), CASTD1(y_d), CASTD1(dy_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
double theta = fmin(Thrust_Min(tmp_d),1.0);
if(verbose) {
cusp::print(x);
cusp::print(dx);
cusp::print(tmp);
std::cout << "theta: " << theta << std::endl;
cin.get();
}
cusp::blas::axpy(dx,x,theta);
cusp::blas::axpy(dy,y,theta);
s+=theta*ds;
// check feasible and optimal
hipLaunchKernelGGL(( getFeasible2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(x_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
double feasibleX = Thrust_Max(tmp_d);
cusp::blas::axpby(y,d,tmp,1.0,-s);
//double optim = abs(cusp::blas::dot(x,tmp))/((double) system->collisionDetector->numCollisions);
hipLaunchKernelGGL(( getResidual_PJKIP), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(tmp_d), CASTD1(x_d), system->collisionDetector->numCollisions);
double optim = cusp::blas::nrmmax(tmp);
cusp::blas::axpby(y,d,tmp,1.0,-s);
hipLaunchKernelGGL(( getFeasible2), dim3(BLOCKS(system->collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(tmp_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
double feasibleY = Thrust_Max(tmp_d);
if(feasible==false && feasibleY == 0) {
cusp::blas::axpy(d,y,-s);
s = 0.0;
feasible = true;
}
residual = fmax(feasibleX,feasibleY);
residual = fmax(residual,optim);
if (residual < tolerance) break;
if(verbose) cusp::print(x);
alpha = updateAlpha(s);
totalKrylovIterations += stats.numIterations;
if(verbose) {
cout << " Iterations: " << k << " Residual: " << residual << " Total Krylov iters: " << totalKrylovIterations << endl;
cin.get();
}
}
cusp::multiply(invTx,x,system->gamma);
iterations = k;
cout << " Iterations: " << k << " Residual: " << residual << " Total Krylov iters: " << totalKrylovIterations << endl;
return 0;
}
|
693782ed48521600399e065930b87f4a29a79b46.cu
|
#include <algorithm>
#include <vector>
#include "include.cuh"
#include "PJKIP.cuh"
PJKIP::PJKIP(System* sys)
{
system = sys;
careful = false;
tolerance = 1e-4;
maxIterations = 1000;
iterations = 0;
totalKrylovIterations = 0;
// spike stuff
partitions = 1;
solverOptions.safeFactorization = true;
solverOptions.trackReordering = true;
solverOptions.maxNumIterations = 5000;
preconditionerUpdateModulus = -1; // the preconditioner updates every ___ time steps
preconditionerMaxKrylovIterations = -1; // the preconditioner updates if Krylov iterations are greater than ____ iterations
mySolver = new SpikeSolver(partitions, solverOptions);
//m_spmv = new MySpmv(grad_f, grad_f_T, system->D, system->DT, system->mass, lambda, lambdaTmp, Dinv, M_hat, gammaTmp, system->f_contact, system->tmp);
stepKrylovIterations = 0;
precUpdated = 0;
// end spike stuff
}
int PJKIP::setup()
{
// vectors: x, y, dx, dy, d, b
x_d = system->a_h;
y_d = system->a_h;
dx_d = system->a_h;
dy_d = system->a_h;
d_d = system->a_h;
b_d = system->a_h;
r_d = system->a_h;
tmp_d = system->a_h;
thrust::device_ptr<double> wrapped_device_x(CASTD1(x_d));
thrust::device_ptr<double> wrapped_device_y(CASTD1(y_d));
thrust::device_ptr<double> wrapped_device_dx(CASTD1(dx_d));
thrust::device_ptr<double> wrapped_device_dy(CASTD1(dy_d));
thrust::device_ptr<double> wrapped_device_d(CASTD1(d_d));
thrust::device_ptr<double> wrapped_device_b(CASTD1(b_d));
thrust::device_ptr<double> wrapped_device_r(CASTD1(r_d));
thrust::device_ptr<double> wrapped_device_tmp(CASTD1(tmp_d));
x = DeviceValueArrayView(wrapped_device_x, wrapped_device_x + x_d.size());
y = DeviceValueArrayView(wrapped_device_y, wrapped_device_y + y_d.size());
dx = DeviceValueArrayView(wrapped_device_dx, wrapped_device_dx + dx_d.size());
dy = DeviceValueArrayView(wrapped_device_dy, wrapped_device_dy + dy_d.size());
d = DeviceValueArrayView(wrapped_device_d, wrapped_device_d + d_d.size());
b = DeviceValueArrayView(wrapped_device_b, wrapped_device_b + b_d.size());
r = DeviceValueArrayView(wrapped_device_r, wrapped_device_r + r_d.size());
tmp = DeviceValueArrayView(wrapped_device_tmp, wrapped_device_tmp + tmp_d.size());
return 0;
}
void PJKIP::setSolverType(int solverType)
{
switch(solverType) {
case 0:
solverOptions.solverType = spike::BiCGStab;
break;
case 1:
solverOptions.solverType = spike::BiCGStab1;
break;
case 2:
solverOptions.solverType = spike::BiCGStab2;
break;
case 3:
solverOptions.solverType = spike::MINRES;
break;
case 4:
solverOptions.solverType = spike::CG_C;
break;
case 5:
solverOptions.solverType = spike::CR_C;
break;
}
}
void PJKIP::setPrecondType(int useSpike)
{
solverOptions.precondType = useSpike ? spike::Spike : spike::None;
}
void PJKIP::printSolverParams()
{
// printf("Step size: %e\n", h);
// printf("Newton tolerance: %e\n", tol);
printf("Krylov relTol: %e abdTol: %e\n", solverOptions.relTol, solverOptions.absTol);
printf("Max. Krylov iterations: %d\n", solverOptions.maxNumIterations);
printf("----------------------------\n");
}
__global__ void initialize_d2(double* x, double* y, double* d, double s, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double xn = x[3*index];
double xt1 = x[3*index+1];
double xt2 = x[3*index+2];
double y0 = y[3*index];
double y1 = y[3*index+1];
double y2 = y[3*index+2];
double yBar0 = s/(xn-(pow(xt1,2.0)+pow(xt2,2.0))/xn);
double yBar1 = -(yBar0/xn)*xt1;
double yBar2 = -(yBar0/xn)*xt2;
d[3*index] = (yBar0-y0)/s;
d[3*index+1] = (yBar1-y1)/s;
d[3*index+2] = (yBar2-y2)/s;
}
__global__ void getInverse2(double* src, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double xn = src[3*index];
double xt1 = src[3*index+1];
double xt2 = src[3*index+2];
double d = 0.5*(pow(xn,2.0)-(pow(xt1,2.0)+pow(xt2,2.0)));
dst[3*index] = xn/d;
dst[3*index+1] = -xt1/d;
dst[3*index+2] = -xt2/d;
}
__global__ void getFeasible2(double* src, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double xn = src[3*index];
double xt1 = src[3*index+1];
double xt2 = src[3*index+2];
xn = xn-sqrt(pow(xt1,2.0)+pow(xt2,2.0));
if(xn!=xn) xn = 0.0;
dst[3*index] = -fmin(0.0,xn);
dst[3*index+1] = -10e30;
dst[3*index+2] = -10e30;
}
__global__ void getResidual_PJKIP(double* src, double* gamma, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
src[3*index] = src[3*index]*gamma[3*index]+src[3*index+1]*gamma[3*index+1]+src[3*index+2]*gamma[3*index+2];
src[3*index+1] = 0;
src[3*index+2] = 0;
}
__global__ void getStepLength2(double* x, double* dx, double* y, double* dy, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double tx;
double ty;
double aux;
double xn = x[3*index];
double xt1 = x[3*index+1];
double xt2 = x[3*index+2];
double detx = 0.5*(pow(xn,2.0)-(pow(xt1,2.0)+pow(xt2,2.0)));
double dxn = dx[3*index];
double dxt1 = dx[3*index+1];
double dxt2 = dx[3*index+2];
double detdx = 0.5*(pow(dxn,2.0)-(pow(dxt1,2.0)+pow(dxt2,2.0)));
if(detdx>0 && dxn>0) {
tx = 1.0;
} else {
tx = 0.5*(dxn*xn-dxt1*xt1-dxt2*xt2);
aux = pow(tx,2.0)-detdx*detx;
if(aux>0.0) {
tx = 0.99*detx/(sqrt(aux)-tx);
} else {
tx = -0.99*detx/tx;
}
}
xn = y[3*index];
xt1 = y[3*index+1];
xt2 = y[3*index+2];
detx = 0.5*(pow(xn,2.0)-(pow(xt1,2.0)+pow(xt2,2.0)));
dxn = dy[3*index];
dxt1 = dy[3*index+1];
dxt2 = dy[3*index+2];
detdx = 0.5*(pow(dxn,2.0)-(pow(dxt1,2.0)+pow(dxt2,2.0)));
if(detdx>0 && dxn>0) {
ty = 1.0;
} else {
ty = 0.5*(dxn*xn-dxt1*xt1-dxt2*xt2);
aux = pow(ty,2.0)-detdx*detx;
if(aux>0.0) {
ty = 0.99*detx/(sqrt(aux)-ty);
} else {
ty = -0.99*detx/ty;
}
}
dst[3*index] = tx;
dst[3*index+1] = ty;
dst[3*index+2] = 1.0;
}
__global__ void getDeterminant2(double* src, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double xn = src[3*index];
double xt1 = src[3*index+1];
double xt2 = src[3*index+2];
double d = 0.5*(pow(xn,2.0)-(pow(xt1,2.0)+pow(xt2,2.0)));
dst[3*index] = log(2.0*d);
dst[3*index+1] = 0.0;
dst[3*index+2] = 0.0;
}
__global__ void constructPw2(int* PwI, int* PwJ, double* Pw, double* x, double* y, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double x0 = x[3*index];
double x1 = x[3*index+1];
double x2 = x[3*index+2];
double y0 = y[3*index];
double y1 = y[3*index+1];
double y2 = y[3*index+2];
double sqrtDetx = sqrt(0.5*(pow(x0,2.0) - (pow(x1,2.0)+pow(x2,2.0))));
double sqrtDety = sqrt(0.5*(pow(y0,2.0) - (pow(y1,2.0)+pow(y2,2.0))));
if(sqrtDetx!=sqrtDetx) sqrtDetx = 0.0;
if(sqrtDety!=sqrtDety) sqrtDety = 0.0;
PwI[9*index] = 3*index;
PwJ[9*index] = 3*index;
Pw[9*index] = pow(y0 + (sqrtDety*x0)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) - sqrtDety/sqrtDetx;
PwI[9*index+1] = 3*index;
PwJ[9*index+1] = 3*index+1;
Pw[9*index+1] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y1 - (sqrtDety*x1)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+2] = 3*index;
PwJ[9*index+2] = 3*index+2;
Pw[9*index+2] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+3] = 3*index+1;
PwJ[9*index+3] = 3*index;
Pw[9*index+3] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y1 - (sqrtDety*x1)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+4] = 3*index+1;
PwJ[9*index+4] = 3*index+1;
Pw[9*index+4] = pow(y1 - (sqrtDety*x1)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) + sqrtDety/sqrtDetx;
PwI[9*index+5] = 3*index+1;
PwJ[9*index+5] = 3*index+2;
Pw[9*index+5] = ((y1 - (sqrtDety*x1)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+6] = 3*index+2;
PwJ[9*index+6] = 3*index;
Pw[9*index+6] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+7] = 3*index+2;
PwJ[9*index+7] = 3*index+1;
Pw[9*index+7] = ((y1 - (sqrtDety*x1)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
PwI[9*index+8] = 3*index+2;
PwJ[9*index+8] = 3*index+2;
Pw[9*index+8] = pow(y2 - (sqrtDety*x2)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) + sqrtDety/sqrtDetx;
}
__global__ void updatePw2(double* Pw, double* x, double* y, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double x0 = x[3*index];
double x1 = x[3*index+1];
double x2 = x[3*index+2];
double y0 = y[3*index];
double y1 = y[3*index+1];
double y2 = y[3*index+2];
double sqrtDetx = sqrt(abs(0.5*(pow(x0,2.0) - (pow(x1,2.0)+pow(x2,2.0)))));
double sqrtDety = sqrt(abs(0.5*(pow(y0,2.0) - (pow(y1,2.0)+pow(y2,2.0)))));
//if(sqrtDetx!=sqrtDetx) sqrtDetx = 1e-4;//0.0;
//if(sqrtDety!=sqrtDety) sqrtDety = 0.0;
Pw[9*index] = pow(y0 + (sqrtDety*x0)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) - sqrtDety/sqrtDetx;
Pw[9*index+1] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y1 - (sqrtDety*x1)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+2] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+3] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y1 - (sqrtDety*x1)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+4] = pow(y1 - (sqrtDety*x1)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) + sqrtDety/sqrtDetx;
Pw[9*index+5] = ((y1 - (sqrtDety*x1)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+6] = ((y0 + (sqrtDety*x0)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+7] = ((y1 - (sqrtDety*x1)/sqrtDetx)*(y2 - (sqrtDety*x2)/sqrtDetx))/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety);
Pw[9*index+8] = pow(y2 - (sqrtDety*x2)/sqrtDetx,2.0)/(x0*y0 + x1*y1 + x2*y2 + 2*sqrtDetx*sqrtDety) + sqrtDety/sqrtDetx;
}
int PJKIP::initializePw() {
constructPw2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTI1(PwI_d), CASTI1(PwJ_d), CASTD1(Pw_d), CASTD1(x_d), CASTD1(y_d), system->collisionDetector->numCollisions);
// create Pw using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(PwI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + PwI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(PwJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + PwJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(Pw_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + Pw_d.size());
Pw = DeviceView(3*system->collisionDetector->numCollisions, 3*system->collisionDetector->numCollisions, Pw_d.size(), row_indices, column_indices, values);
// end create Pw
return 0;
}
__global__ void initializeImpulseVector2(double* src, double* friction, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double mu = friction[index];
src[3*index ] = mu;
src[3*index+1] = 0.0;
src[3*index+2] = 0.0;
}
__global__ void constructT2(int* invTxI, int* invTxJ, double* invTx, int* TyI, int* TyJ, double* Ty, double* friction, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double mu = friction[index];
invTxI[3*index] = 3*index;
invTxJ[3*index] = 3*index;
invTx[3*index] = 1.0/mu;
invTxI[3*index+1] = 3*index+1;
invTxJ[3*index+1] = 3*index+1;
invTx[3*index+1] = 1.0;
invTxI[3*index+2] = 3*index+2;
invTxJ[3*index+2] = 3*index+2;
invTx[3*index+2] = 1.0;
TyI[3*index] = 3*index;
TyJ[3*index] = 3*index;
Ty[3*index] = 1.0;
TyI[3*index+1] = 3*index+1;
TyJ[3*index+1] = 3*index+1;
Ty[3*index+1] = mu;
TyI[3*index+2] = 3*index+2;
TyJ[3*index+2] = 3*index+2;
Ty[3*index+2] = mu;
}
int PJKIP::initializeT() {
constructT2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTI1(invTxI_d), CASTI1(invTxJ_d), CASTD1(invTx_d), CASTI1(TyI_d), CASTI1(TyJ_d), CASTD1(Ty_d), CASTD1(system->friction_d), system->collisionDetector->numCollisions);
{
// create invTx using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(invTxI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + invTxI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(invTxJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + invTxJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(invTx_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + invTx_d.size());
invTx = DeviceView(3*system->collisionDetector->numCollisions, 3*system->collisionDetector->numCollisions, invTx_d.size(), row_indices, column_indices, values);
// end create invTx
}
{
// create Ty using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(TyI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + TyI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(TyJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + TyJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(Ty_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + Ty_d.size());
Ty = DeviceView(3*system->collisionDetector->numCollisions, 3*system->collisionDetector->numCollisions, Ty_d.size(), row_indices, column_indices, values);
// end create Ty
}
return 0;
}
double PJKIP::updateAlpha(double s) {
double alphaCen = 0.1;
double alpha1 = 10;
double betaCen;
double beta1;
double betaBD;
double fcentering;
double barrier;
double beta;
if(careful) {
betaCen=0.1;
beta1=0.5;
betaBD=1.0;
} else {
betaCen=1e-30;
beta1=0.1;
betaBD=0.5;
}
// choose centering force
double n = system->collisionDetector->numCollisions;
double dotprod = cusp::blas::dot(x,y)+s;
if(s > 0) {
fcentering=2.0*(n+1.0)*log(dotprod/(n+1.0));
barrier = log(pow(s,2.0));
} else {
fcentering=2.0*(n)*log(dotprod/n);
barrier = 0.0;
}
getDeterminant2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(x_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
barrier+= Thrust_Total(tmp_d);
getDeterminant2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(y_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
barrier+= Thrust_Total(tmp_d);
fcentering = fcentering - barrier;
if(fcentering != fcentering) fcentering = 0.0;
if(fcentering < alphaCen) {
beta = betaCen;
} else if(fcentering <= alpha1) {
beta = beta1;
} else {
beta = betaBD;
}
return 0.5*beta*dotprod/(n+1.0);
}
int PJKIP::performSchurComplementProduct(DeviceValueArrayView src, DeviceValueArrayView tmp2) {
cusp::multiply(invTx,src,tmp);
cusp::multiply(system->DT,tmp,system->f_contact);
cusp::multiply(system->mass,system->f_contact,system->tmp);
cusp::multiply(system->D,system->tmp,tmp2);
cusp::multiply(Ty,tmp2,tmp);
return 0;
}
int PJKIP::buildSchurMatrix() {
// build N
cusp::multiply(system->mass,system->DT,system->MinvDT);
cusp::multiply(system->D,system->MinvDT,system->N);
cusp::multiply(system->N,invTx,system->MinvDT);
cusp::multiply(Ty,system->MinvDT,system->N);
return 0;
}
int PJKIP::solve() {
solverOptions.relTol = std::min(0.01 * tolerance, 1e-6);
solverOptions.absTol = 1e-10;
// Initialize scalars
double residual = 10e30;
// vectors: x, y, dx, dy, d, b
system->gamma_d.resize(3*system->collisionDetector->numCollisions);
x_d.resize(3*system->collisionDetector->numCollisions);
y_d.resize(3*system->collisionDetector->numCollisions);
dx_d.resize(3*system->collisionDetector->numCollisions);
dy_d.resize(3*system->collisionDetector->numCollisions);
d_d.resize(3*system->collisionDetector->numCollisions);
b_d.resize(3*system->collisionDetector->numCollisions);
r_d.resize(3*system->collisionDetector->numCollisions);
tmp_d.resize(3*system->collisionDetector->numCollisions);
// matrices: invTx, Ty, Pw, R(?)
invTxI_d.resize(3*system->collisionDetector->numCollisions);
invTxJ_d.resize(3*system->collisionDetector->numCollisions);
invTx_d.resize(3*system->collisionDetector->numCollisions);
TyI_d.resize(3*system->collisionDetector->numCollisions);
TyJ_d.resize(3*system->collisionDetector->numCollisions);
Ty_d.resize(3*system->collisionDetector->numCollisions);
PwI_d.resize(9*system->collisionDetector->numCollisions);
PwJ_d.resize(9*system->collisionDetector->numCollisions);
Pw_d.resize(9*system->collisionDetector->numCollisions);
// TODO: There's got to be a better way to do this...
// vectors: x, y, dx, dy, d, b
thrust::device_ptr<double> wrapped_device_gamma(CASTD1(system->gamma_d));
thrust::device_ptr<double> wrapped_device_x(CASTD1(x_d));
thrust::device_ptr<double> wrapped_device_y(CASTD1(y_d));
thrust::device_ptr<double> wrapped_device_dx(CASTD1(dx_d));
thrust::device_ptr<double> wrapped_device_dy(CASTD1(dy_d));
thrust::device_ptr<double> wrapped_device_d(CASTD1(d_d));
thrust::device_ptr<double> wrapped_device_b(CASTD1(b_d));
thrust::device_ptr<double> wrapped_device_r(CASTD1(r_d));
thrust::device_ptr<double> wrapped_device_tmp(CASTD1(tmp_d));
system->gamma = DeviceValueArrayView(wrapped_device_gamma, wrapped_device_gamma + system->gamma_d.size());
x = DeviceValueArrayView(wrapped_device_x, wrapped_device_x + x_d.size());
y = DeviceValueArrayView(wrapped_device_y, wrapped_device_y + y_d.size());
dx = DeviceValueArrayView(wrapped_device_dx, wrapped_device_dx + dx_d.size());
dy = DeviceValueArrayView(wrapped_device_dy, wrapped_device_dy + dy_d.size());
d = DeviceValueArrayView(wrapped_device_d, wrapped_device_d + d_d.size());
b = DeviceValueArrayView(wrapped_device_b, wrapped_device_b + b_d.size());
r = DeviceValueArrayView(wrapped_device_r, wrapped_device_r + r_d.size());
tmp = DeviceValueArrayView(wrapped_device_tmp, wrapped_device_tmp + tmp_d.size());
// initialize matrices and vectors
initializeT();
if(verbose) {
cusp::print(invTx);
cusp::print(Ty);
cin.get();
}
initializePw();
buildSchurMatrix();
cusp::multiply(Ty,system->r,r);
initializeImpulseVector2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(x_d), CASTD1(system->friction_d), system->collisionDetector->numCollisions);
performSchurComplementProduct(x,y); //NOTE: y is destroyed here
cusp::blas::axpby(tmp,r,y,1.0,1.0);
// determine initial alpha
double alpha = cusp::blas::dot(x,y);
alpha = 0.5*abs(alpha)/((double) system->collisionDetector->numCollisions);
if(verbose) {
cusp::print(system->DT);
cusp::print(system->mass);
cusp::print(system->D);
cusp::print(r);
cusp::print(x);
cusp::print(y);
cout << "alpha: " << alpha << endl;
}
// determine d vector
double s = 2*alpha;
initialize_d2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(x_d), CASTD1(y_d), CASTD1(d_d), s, system->collisionDetector->numCollisions);
cusp::blas::axpy(d,y,s);
bool feasible = false;
alpha = updateAlpha(s);
if(verbose) {
cusp::print(x);
cusp::print(y);
cusp::print(d);
cout << "alpha: " << alpha << endl;
}
double ds = 0;
int k;
totalKrylovIterations = 0;
for (k=0; k < maxIterations; k++) {
updatePw2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(Pw_d), CASTD1(x_d), CASTD1(y_d), system->collisionDetector->numCollisions);
cusp::add(system->N,Pw,A);
if(verbose) {
cusp::print(Pw);
cin.get();
}
if(feasible) {
ds = 0.0;
} else {
ds = 2.0*alpha-s;
}
if(verbose) {
cout << "ds: " << ds << endl;
cin.get();
}
getInverse2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(x_d), CASTD1(b_d), system->collisionDetector->numCollisions);
cusp::blas::axpbypcz(b,y,d,b,alpha,-1.0,-ds);
if(verbose) {
cusp::print(b);
cin.get();
system->buildSchurMatrix(); //TODO: remove this!
cusp::print(system->N);
cin.get();
}
// solve system
delete mySolver;
//m_spmv = new MySpmvJKIP(system->mass, system->D, system->DT, Pw, Ty, invTx, system->tmp, system->f_contact, tmp);
mySolver = new SpikeSolver(partitions, solverOptions);
mySolver->setup(A); //TODO: Use preconditioning here! Need to build full matrix...
cusp::blas::fill(dx, 0.0);
bool success = mySolver->solve(A, b, dx);
spike::Stats stats = mySolver->getStats();
if(verbose) {
cusp::print(dx);
cin.get();
}
performSchurComplementProduct(dx,dy); //NOTE: dy is destroyed here
cusp::blas::axpby(tmp,d,dy,1.0,ds);
if(verbose) {
cusp::print(dy);
cin.get();
}
getStepLength2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(x_d), CASTD1(dx_d), CASTD1(y_d), CASTD1(dy_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
double theta = fmin(Thrust_Min(tmp_d),1.0);
if(verbose) {
cusp::print(x);
cusp::print(dx);
cusp::print(tmp);
std::cout << "theta: " << theta << std::endl;
cin.get();
}
cusp::blas::axpy(dx,x,theta);
cusp::blas::axpy(dy,y,theta);
s+=theta*ds;
// check feasible and optimal
getFeasible2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(x_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
double feasibleX = Thrust_Max(tmp_d);
cusp::blas::axpby(y,d,tmp,1.0,-s);
//double optim = abs(cusp::blas::dot(x,tmp))/((double) system->collisionDetector->numCollisions);
getResidual_PJKIP<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(tmp_d), CASTD1(x_d), system->collisionDetector->numCollisions);
double optim = cusp::blas::nrmmax(tmp);
cusp::blas::axpby(y,d,tmp,1.0,-s);
getFeasible2<<<BLOCKS(system->collisionDetector->numCollisions),THREADS>>>(CASTD1(tmp_d), CASTD1(tmp_d), system->collisionDetector->numCollisions);
double feasibleY = Thrust_Max(tmp_d);
if(feasible==false && feasibleY == 0) {
cusp::blas::axpy(d,y,-s);
s = 0.0;
feasible = true;
}
residual = fmax(feasibleX,feasibleY);
residual = fmax(residual,optim);
if (residual < tolerance) break;
if(verbose) cusp::print(x);
alpha = updateAlpha(s);
totalKrylovIterations += stats.numIterations;
if(verbose) {
cout << " Iterations: " << k << " Residual: " << residual << " Total Krylov iters: " << totalKrylovIterations << endl;
cin.get();
}
}
cusp::multiply(invTx,x,system->gamma);
iterations = k;
cout << " Iterations: " << k << " Residual: " << residual << " Total Krylov iters: " << totalKrylovIterations << endl;
return 0;
}
|
25b6f45edf56ef34c9ba1f28c823153ee795e663.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
#include <stdlib.h>
#include "cdist_grad_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__device__ __forceinline__ T sign(T val) {
return ((0.0) < static_cast<float>(val)) - (static_cast<float>(val) < (0.0));
}
template <typename T>
__global__ void InitOutput(T *grad, const size_t size) {
T zero = 0.0;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
grad[pos] = zero;
}
return;
}
// ONE
template <typename T>
__global__ void CdistGradOne(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1, int64_t r2, int64_t col,
int64_t count, size_t r_size, int64_t x1_size, int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
T res = grad_k * sign(*self_m - *self_n);
MsAtomicAdd(res_m, res);
}
}
// less than 2
template <typename T>
__global__ void CdistGradLessthanTwo(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1,
int64_t r2, int64_t col, int64_t count, size_t r_size, int64_t x1_size,
int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
if (dist_k != 0.0 && p >= 1) {
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
double dist_k_pow = pow(static_cast<double>(dist_k), p - 1);
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
const T diff = *self_m - *self_n;
T res = static_cast<T>(sign(diff) * pow(static_cast<double>(abs(diff)), p - 1) * grad_k / dist_k_pow);
MsAtomicAdd(res_m, res);
}
}
}
// 2
template <typename T>
__global__ void CdistGradTwo(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1, int64_t r2, int64_t col,
int64_t count, size_t r_size, int64_t x1_size, int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
if (dist_k != 0.0) {
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
T res = grad_k * (*self_m - *self_n) / dist_k;
MsAtomicAdd(res_m, res);
}
}
}
// P
template <typename T>
__global__ void CdistGradP(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1, int64_t r2, int64_t col,
int64_t count, size_t r_size, int64_t x1_size, int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
if (dist_k != 0.0) {
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
double dist_k_pow = pow(static_cast<double>(dist_k), p - 1);
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
const T diff = *self_m - *self_n;
T res_num = static_cast<T>(diff * pow(static_cast<double>(abs(diff)), p - 2) * grad_k / dist_k_pow);
MsAtomicAdd(res_m, res_num);
}
}
}
// INF
template <typename T>
__global__ void CdistGradInf(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1, int64_t r2, int64_t col,
int64_t count, size_t r_size, int64_t x1_size, int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
T diff = *self_m - *self_n;
T res = grad_k * sign(diff) * (abs(diff) == (dist_k));
MsAtomicAdd(res_m, res);
}
}
// CAL
template <typename T>
void CalCdistGrad(size_t out_size, int64_t l1_size, int64_t l2_size, T *grad_start, T *dist_start, T *t1_start,
T *t2_start, T *res_start, int64_t m, double p, int64_t r1, int64_t r2, int64_t batch,
const uint32_t &device_id, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( InitOutput), dim3(CUDA_BLOCKS(device_id, out_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, res_start, out_size);
if (p == 0.0 || out_size == 0 || l1_size == 0 || l2_size == 0) {
return;
}
const int block_x = 64;
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int64_t count = batch * r2 * r1;
const int64_t grid_temp = (count + block_y - 1) / block_y;
const int grid_y = (grid_temp - 1) / 65535 + 1;
const int grid_z = (grid_temp - 1) / grid_y + 1;
const dim3 grid(grid_x, grid_y, grid_z);
const dim3 block(block_x, block_y);
const int64_t r_size = r1 * r2;
if (std::isinf(p)) {
hipLaunchKernelGGL(( CdistGradInf<T>), dim3(grid), dim3(block), 0, cuda_stream, grad_start, dist_start, t1_start, t2_start, res_start,
p, r1, r2, m, count, r_size, l1_size, l2_size);
} else if (p == 1.0) {
hipLaunchKernelGGL(( CdistGradOne<T>), dim3(grid), dim3(block), 0, cuda_stream, grad_start, dist_start, t1_start, t2_start, res_start,
p, r1, r2, m, count, r_size, l1_size, l2_size);
} else if (p < 2.0) {
hipLaunchKernelGGL(( CdistGradLessthanTwo<T>), dim3(grid), dim3(block), 0, cuda_stream, grad_start, dist_start, t1_start, t2_start,
res_start, p, r1, r2, m, count, r_size,
l1_size, l2_size);
} else if (p == 2.0) {
hipLaunchKernelGGL(( CdistGradTwo<T>), dim3(grid), dim3(block), 0, cuda_stream, grad_start, dist_start, t1_start, t2_start, res_start,
p, r1, r2, m, count, r_size, l1_size, l2_size);
} else {
hipLaunchKernelGGL(( CdistGradP<T>), dim3(grid), dim3(block), 0, cuda_stream, grad_start, dist_start, t1_start, t2_start, res_start,
p, r1, r2, m, count, r_size, l1_size, l2_size);
}
return;
}
template
CUDA_LIB_EXPORT void CalCdistGrad<float>(size_t out_size, int64_t l1_size, int64_t l2_size, float *grad_start,
float *dist_start, float *t1_start, float *t2_start, float *res_start,
int64_t m, double p, int64_t r1, int64_t r2, int64_t batch,
const uint32_t &device_id, hipStream_t cuda_stream);
template
CUDA_LIB_EXPORT void CalCdistGrad<double>(size_t out_size, int64_t l1_size, int64_t l2_size, double *grad_start,
double *dist_start, double *t1_start, double *t2_start, double *res_start,
int64_t m, double p, int64_t r1, int64_t r2, int64_t batch,
const uint32_t &device_id, hipStream_t cuda_stream);
|
25b6f45edf56ef34c9ba1f28c823153ee795e663.cu
|
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
#include <stdlib.h>
#include "cdist_grad_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__device__ __forceinline__ T sign(T val) {
return ((0.0) < static_cast<float>(val)) - (static_cast<float>(val) < (0.0));
}
template <typename T>
__global__ void InitOutput(T *grad, const size_t size) {
T zero = 0.0;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
grad[pos] = zero;
}
return;
}
// ONE
template <typename T>
__global__ void CdistGradOne(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1, int64_t r2, int64_t col,
int64_t count, size_t r_size, int64_t x1_size, int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
T res = grad_k * sign(*self_m - *self_n);
MsAtomicAdd(res_m, res);
}
}
// less than 2
template <typename T>
__global__ void CdistGradLessthanTwo(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1,
int64_t r2, int64_t col, int64_t count, size_t r_size, int64_t x1_size,
int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
if (dist_k != 0.0 && p >= 1) {
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
double dist_k_pow = pow(static_cast<double>(dist_k), p - 1);
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
const T diff = *self_m - *self_n;
T res = static_cast<T>(sign(diff) * pow(static_cast<double>(abs(diff)), p - 1) * grad_k / dist_k_pow);
MsAtomicAdd(res_m, res);
}
}
}
// 2
template <typename T>
__global__ void CdistGradTwo(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1, int64_t r2, int64_t col,
int64_t count, size_t r_size, int64_t x1_size, int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
if (dist_k != 0.0) {
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
T res = grad_k * (*self_m - *self_n) / dist_k;
MsAtomicAdd(res_m, res);
}
}
}
// P
template <typename T>
__global__ void CdistGradP(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1, int64_t r2, int64_t col,
int64_t count, size_t r_size, int64_t x1_size, int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
if (dist_k != 0.0) {
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
double dist_k_pow = pow(static_cast<double>(dist_k), p - 1);
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
const T diff = *self_m - *self_n;
T res_num = static_cast<T>(diff * pow(static_cast<double>(abs(diff)), p - 2) * grad_k / dist_k_pow);
MsAtomicAdd(res_m, res_num);
}
}
}
// INF
template <typename T>
__global__ void CdistGradInf(T *grad, T *dist, T *t1, T *t2, T *res, double p, int64_t r1, int64_t r2, int64_t col,
int64_t count, size_t r_size, int64_t x1_size, int64_t x2_size) {
const int current = (blockIdx.y * gridDim.z + blockIdx.z) * blockDim.y + threadIdx.y;
const int current_i = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (current >= count || current_i >= col) {
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
const int current_l = current / r_size;
const int current_k = current % r_size;
int64_t m = current_k / r2;
int64_t n = current_k % r2;
const T * const start = t1 + current_l * x1_size + m * col;
const T * const end = start + col;
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
T diff = *self_m - *self_n;
T res = grad_k * sign(diff) * (abs(diff) == (dist_k));
MsAtomicAdd(res_m, res);
}
}
// CAL
template <typename T>
void CalCdistGrad(size_t out_size, int64_t l1_size, int64_t l2_size, T *grad_start, T *dist_start, T *t1_start,
T *t2_start, T *res_start, int64_t m, double p, int64_t r1, int64_t r2, int64_t batch,
const uint32_t &device_id, cudaStream_t cuda_stream) {
InitOutput<<<CUDA_BLOCKS(device_id, out_size), CUDA_THREADS(device_id), 0, cuda_stream>>>(res_start, out_size);
if (p == 0.0 || out_size == 0 || l1_size == 0 || l2_size == 0) {
return;
}
const int block_x = 64;
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int64_t count = batch * r2 * r1;
const int64_t grid_temp = (count + block_y - 1) / block_y;
const int grid_y = (grid_temp - 1) / 65535 + 1;
const int grid_z = (grid_temp - 1) / grid_y + 1;
const dim3 grid(grid_x, grid_y, grid_z);
const dim3 block(block_x, block_y);
const int64_t r_size = r1 * r2;
if (std::isinf(p)) {
CdistGradInf<T><<<grid, block, 0, cuda_stream>>>(grad_start, dist_start, t1_start, t2_start, res_start,
p, r1, r2, m, count, r_size, l1_size, l2_size);
} else if (p == 1.0) {
CdistGradOne<T><<<grid, block, 0, cuda_stream>>>(grad_start, dist_start, t1_start, t2_start, res_start,
p, r1, r2, m, count, r_size, l1_size, l2_size);
} else if (p < 2.0) {
CdistGradLessthanTwo<T><<<grid, block, 0, cuda_stream>>>(grad_start, dist_start, t1_start, t2_start,
res_start, p, r1, r2, m, count, r_size,
l1_size, l2_size);
} else if (p == 2.0) {
CdistGradTwo<T><<<grid, block, 0, cuda_stream>>>(grad_start, dist_start, t1_start, t2_start, res_start,
p, r1, r2, m, count, r_size, l1_size, l2_size);
} else {
CdistGradP<T><<<grid, block, 0, cuda_stream>>>(grad_start, dist_start, t1_start, t2_start, res_start,
p, r1, r2, m, count, r_size, l1_size, l2_size);
}
return;
}
template
CUDA_LIB_EXPORT void CalCdistGrad<float>(size_t out_size, int64_t l1_size, int64_t l2_size, float *grad_start,
float *dist_start, float *t1_start, float *t2_start, float *res_start,
int64_t m, double p, int64_t r1, int64_t r2, int64_t batch,
const uint32_t &device_id, cudaStream_t cuda_stream);
template
CUDA_LIB_EXPORT void CalCdistGrad<double>(size_t out_size, int64_t l1_size, int64_t l2_size, double *grad_start,
double *dist_start, double *t1_start, double *t2_start, double *res_start,
int64_t m, double p, int64_t r1, int64_t r2, int64_t batch,
const uint32_t &device_id, cudaStream_t cuda_stream);
|
52cdc5804824b295b31d86e224b7ce8c73343fa0.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <metrics/contingencyMatrix.cuh>
#include <raft/cudart_utils.h>
#include <random>
#include <rmm/device_uvector.hpp>
namespace MLCommon {
namespace Metrics {
struct ContingencyMatrixParam {
int nElements;
int minClass;
int maxClass;
bool calcCardinality;
bool skipLabels;
float tolerance;
};
template <typename T>
class ContingencyMatrixTest : public ::testing::TestWithParam<ContingencyMatrixParam> {
protected:
ContingencyMatrixTest()
: pWorkspace(0, stream),
dY(0, stream),
dYHat(0, stream),
dComputedOutput(0, stream),
dGoldenOutput(0, stream)
{
}
void SetUp() override
{
params = ::testing::TestWithParam<ContingencyMatrixParam>::GetParam();
int numElements = params.nElements;
int lowerLabelRange = params.minClass;
int upperLabelRange = params.maxClass;
std::vector<int> y(numElements, 0);
std::vector<int> y_hat(numElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(y.begin(), y.end(), [&]() { return intGenerator(dre); });
std::generate(y_hat.begin(), y_hat.end(), [&]() { return intGenerator(dre); });
if (params.skipLabels) {
// remove two label value from input arrays
int y1 = (upperLabelRange - lowerLabelRange) / 2;
int y2 = y1 + (upperLabelRange - lowerLabelRange) / 4;
// replacement values
int y1_R = y1 + 1;
int y2_R = y2 + 1;
std::replace(y.begin(), y.end(), y1, y1_R);
std::replace(y.begin(), y.end(), y2, y2_R);
std::replace(y_hat.begin(), y_hat.end(), y1, y1_R);
std::replace(y_hat.begin(), y_hat.end(), y2, y2_R);
}
RAFT_CUDA_TRY(hipStreamCreate(&stream));
dY.resize(numElements, stream);
dYHat.resize(numElements, stream);
raft::update_device(dYHat.data(), &y_hat[0], numElements, stream);
raft::update_device(dY.data(), &y[0], numElements, stream);
if (params.calcCardinality) {
MLCommon::Metrics::getInputClassCardinality(
dY.data(), numElements, stream, minLabel, maxLabel);
} else {
minLabel = lowerLabelRange;
maxLabel = upperLabelRange;
}
numUniqueClasses = maxLabel - minLabel + 1;
dComputedOutput.resize(numUniqueClasses * numUniqueClasses, stream);
dGoldenOutput.resize(numUniqueClasses * numUniqueClasses, stream);
// generate golden output on CPU
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
std::vector<int> hGoldenOutput(sizeOfMat, 0);
for (int i = 0; i < numElements; i++) {
auto row = y[i] - minLabel;
auto column = y_hat[i] - minLabel;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
raft::update_device(
dGoldenOutput.data(), hGoldenOutput.data(), numUniqueClasses * numUniqueClasses, stream);
workspaceSz = MLCommon::Metrics::getContingencyMatrixWorkspaceSize(
numElements, dY.data(), stream, minLabel, maxLabel);
pWorkspace.resize(workspaceSz, stream);
raft::interruptible::synchronize(stream);
}
void TearDown() override { RAFT_CUDA_TRY(hipStreamDestroy(stream)); }
void RunTest()
{
int numElements = params.nElements;
MLCommon::Metrics::contingencyMatrix(dY.data(),
dYHat.data(),
numElements,
dComputedOutput.data(),
stream,
(void*)pWorkspace.data(),
workspaceSz,
minLabel,
maxLabel);
ASSERT_TRUE(raft::devArrMatch(dComputedOutput.data(),
dGoldenOutput.data(),
numUniqueClasses * numUniqueClasses,
raft::Compare<T>()));
}
ContingencyMatrixParam params;
int numUniqueClasses = -1;
T minLabel, maxLabel;
hipStream_t stream = 0;
size_t workspaceSz;
rmm::device_uvector<char> pWorkspace;
rmm::device_uvector<T> dY, dYHat;
rmm::device_uvector<int> dComputedOutput, dGoldenOutput;
};
const std::vector<ContingencyMatrixParam> inputs = {
{10000, 1, 10, true, false, 0.000001},
{10000, 1, 5000, true, false, 0.000001},
{10000, 1, 10000, true, false, 0.000001},
{10000, 1, 20000, true, false, 0.000001},
{10000, 1, 10, false, false, 0.000001},
{10000, 1, 5000, false, false, 0.000001},
{10000, 1, 10000, false, false, 0.000001},
{10000, 1, 20000, false, false, 0.000001},
{100000, 1, 100, false, false, 0.000001},
{1000000, 1, 1200, true, false, 0.000001},
{1000000, 1, 10000, false, false, 0.000001},
{100000, 1, 100, false, true, 0.000001},
};
typedef ContingencyMatrixTest<int> ContingencyMatrixTestS;
TEST_P(ContingencyMatrixTestS, Result) { RunTest(); }
INSTANTIATE_TEST_CASE_P(ContingencyMatrix, ContingencyMatrixTestS, ::testing::ValuesIn(inputs));
} // namespace Metrics
} // namespace MLCommon
|
52cdc5804824b295b31d86e224b7ce8c73343fa0.cu
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <metrics/contingencyMatrix.cuh>
#include <raft/cudart_utils.h>
#include <random>
#include <rmm/device_uvector.hpp>
namespace MLCommon {
namespace Metrics {
struct ContingencyMatrixParam {
int nElements;
int minClass;
int maxClass;
bool calcCardinality;
bool skipLabels;
float tolerance;
};
template <typename T>
class ContingencyMatrixTest : public ::testing::TestWithParam<ContingencyMatrixParam> {
protected:
ContingencyMatrixTest()
: pWorkspace(0, stream),
dY(0, stream),
dYHat(0, stream),
dComputedOutput(0, stream),
dGoldenOutput(0, stream)
{
}
void SetUp() override
{
params = ::testing::TestWithParam<ContingencyMatrixParam>::GetParam();
int numElements = params.nElements;
int lowerLabelRange = params.minClass;
int upperLabelRange = params.maxClass;
std::vector<int> y(numElements, 0);
std::vector<int> y_hat(numElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(y.begin(), y.end(), [&]() { return intGenerator(dre); });
std::generate(y_hat.begin(), y_hat.end(), [&]() { return intGenerator(dre); });
if (params.skipLabels) {
// remove two label value from input arrays
int y1 = (upperLabelRange - lowerLabelRange) / 2;
int y2 = y1 + (upperLabelRange - lowerLabelRange) / 4;
// replacement values
int y1_R = y1 + 1;
int y2_R = y2 + 1;
std::replace(y.begin(), y.end(), y1, y1_R);
std::replace(y.begin(), y.end(), y2, y2_R);
std::replace(y_hat.begin(), y_hat.end(), y1, y1_R);
std::replace(y_hat.begin(), y_hat.end(), y2, y2_R);
}
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
dY.resize(numElements, stream);
dYHat.resize(numElements, stream);
raft::update_device(dYHat.data(), &y_hat[0], numElements, stream);
raft::update_device(dY.data(), &y[0], numElements, stream);
if (params.calcCardinality) {
MLCommon::Metrics::getInputClassCardinality(
dY.data(), numElements, stream, minLabel, maxLabel);
} else {
minLabel = lowerLabelRange;
maxLabel = upperLabelRange;
}
numUniqueClasses = maxLabel - minLabel + 1;
dComputedOutput.resize(numUniqueClasses * numUniqueClasses, stream);
dGoldenOutput.resize(numUniqueClasses * numUniqueClasses, stream);
// generate golden output on CPU
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
std::vector<int> hGoldenOutput(sizeOfMat, 0);
for (int i = 0; i < numElements; i++) {
auto row = y[i] - minLabel;
auto column = y_hat[i] - minLabel;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
raft::update_device(
dGoldenOutput.data(), hGoldenOutput.data(), numUniqueClasses * numUniqueClasses, stream);
workspaceSz = MLCommon::Metrics::getContingencyMatrixWorkspaceSize(
numElements, dY.data(), stream, minLabel, maxLabel);
pWorkspace.resize(workspaceSz, stream);
raft::interruptible::synchronize(stream);
}
void TearDown() override { RAFT_CUDA_TRY(cudaStreamDestroy(stream)); }
void RunTest()
{
int numElements = params.nElements;
MLCommon::Metrics::contingencyMatrix(dY.data(),
dYHat.data(),
numElements,
dComputedOutput.data(),
stream,
(void*)pWorkspace.data(),
workspaceSz,
minLabel,
maxLabel);
ASSERT_TRUE(raft::devArrMatch(dComputedOutput.data(),
dGoldenOutput.data(),
numUniqueClasses * numUniqueClasses,
raft::Compare<T>()));
}
ContingencyMatrixParam params;
int numUniqueClasses = -1;
T minLabel, maxLabel;
cudaStream_t stream = 0;
size_t workspaceSz;
rmm::device_uvector<char> pWorkspace;
rmm::device_uvector<T> dY, dYHat;
rmm::device_uvector<int> dComputedOutput, dGoldenOutput;
};
const std::vector<ContingencyMatrixParam> inputs = {
{10000, 1, 10, true, false, 0.000001},
{10000, 1, 5000, true, false, 0.000001},
{10000, 1, 10000, true, false, 0.000001},
{10000, 1, 20000, true, false, 0.000001},
{10000, 1, 10, false, false, 0.000001},
{10000, 1, 5000, false, false, 0.000001},
{10000, 1, 10000, false, false, 0.000001},
{10000, 1, 20000, false, false, 0.000001},
{100000, 1, 100, false, false, 0.000001},
{1000000, 1, 1200, true, false, 0.000001},
{1000000, 1, 10000, false, false, 0.000001},
{100000, 1, 100, false, true, 0.000001},
};
typedef ContingencyMatrixTest<int> ContingencyMatrixTestS;
TEST_P(ContingencyMatrixTestS, Result) { RunTest(); }
INSTANTIATE_TEST_CASE_P(ContingencyMatrix, ContingencyMatrixTestS, ::testing::ValuesIn(inputs));
} // namespace Metrics
} // namespace MLCommon
|
7d195551b94050c0ee4586cc8bf9d628d90ae29a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "projektcuda.h"
#include "project_comm.h"
//#include "mex.h"
/* Kernel to square elements of the array on the GPU */
__global__ void norm_elements(t_ve* in,t_ve* out, unsigned int N)
{
__shared__ float Cs[VECTOR_BLOCK_SIZE];
int idx = blockIdx.x*blockDim.x+threadIdx.x;
Cs[threadIdx.x] = 0;
if ( idx < N ) {
Cs[threadIdx.x] = in[ idx ] * in[ idx ];
}
t_ve blocksum = 0;
if(threadIdx.x==0){
out[blockIdx.x]=0;
}
__syncthreads();
if(threadIdx.x==0){
for ( int i = 0; i < blockDim.x; i++ ) {
blocksum += Cs[i];
}
out[blockIdx.x]=blocksum;
}
__syncthreads();
if ( idx == 0 ) {
for ( int i = 1; i < gridDim.x; i++ ) {
out[0] += out[i];
}
out[0] = sqrt(out[0]);
}
}
|
7d195551b94050c0ee4586cc8bf9d628d90ae29a.cu
|
#include "cuda.h"
#include <stdio.h>
#include "projektcuda.h"
#include "project_comm.h"
//#include "mex.h"
/* Kernel to square elements of the array on the GPU */
__global__ void norm_elements(t_ve* in,t_ve* out, unsigned int N)
{
__shared__ float Cs[VECTOR_BLOCK_SIZE];
int idx = blockIdx.x*blockDim.x+threadIdx.x;
Cs[threadIdx.x] = 0;
if ( idx < N ) {
Cs[threadIdx.x] = in[ idx ] * in[ idx ];
}
t_ve blocksum = 0;
if(threadIdx.x==0){
out[blockIdx.x]=0;
}
__syncthreads();
if(threadIdx.x==0){
for ( int i = 0; i < blockDim.x; i++ ) {
blocksum += Cs[i];
}
out[blockIdx.x]=blocksum;
}
__syncthreads();
if ( idx == 0 ) {
for ( int i = 1; i < gridDim.x; i++ ) {
out[0] += out[i];
}
out[0] = sqrt(out[0]);
}
}
|
e647b79b15673231caf97980185fbfc289433b97.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <[email protected]>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*
* Modified by VinInn for testing math funcs
*/
/* to run test
foreach f ( $CMSSW_BASE/test/$SCRAM_ARCH/DFM_Vector* )
echo $f; $f
end
*/
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <random>
#include "cuda/api_wrappers.h"
#include "DataFormats/Math/interface/approx_atan2.h"
constexpr float xmin=-100.001; // avoid 0
constexpr float incr = 0.04;
constexpr int Nsteps = 2.*std::abs(xmin)/incr;
template<int DEGREE>
__global__ void diffAtan(int * diffs)
{
auto mdiff = &diffs[0];
auto idiff = &diffs[1];
auto sdiff = &diffs[2];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
auto x = xmin +incr*i;
auto y = xmin +incr*j;
auto approx = unsafe_atan2f<DEGREE>(y,x);
auto iapprox = unsafe_atan2i<DEGREE>(y,x);
auto sapprox = unsafe_atan2s<DEGREE>(y,x);
auto std = std::atan2(y,x);
auto fd = std::abs(std-approx);
atomicMax(mdiff, int(fd*1.e7) );
atomicMax(idiff, std::abs(phi2int(std)-iapprox));
short dd = std::abs(phi2short(std)-sapprox);
atomicMax(sdiff,int(dd));
}
template<int DEGREE>
void go() {
auto start = std::chrono::high_resolution_clock::now();
auto delta = start - start;
auto current_device = cuda::device::current::get();
// atan2
delta -= (std::chrono::high_resolution_clock::now()-start);
auto diff_d = cuda::memory::device::make_unique<int[]>(current_device,3);
int diffs[3];
cuda::memory::device::zero(diff_d.get(),3*4);
// Launch the diff CUDA Kernel
dim3 threadsPerBlock(32,32,1);
dim3 blocksPerGrid((Nsteps + threadsPerBlock.x - 1) / threadsPerBlock.x,
(Nsteps + threadsPerBlock.y - 1) / threadsPerBlock.y,
1);
std::cout
<< "CUDA kernel 'diff' launch with " << blocksPerGrid.x
<< " blocks of " << threadsPerBlock.y << " threads\n";
cuda::launch(
diffAtan<DEGREE>,
{ blocksPerGrid, threadsPerBlock },
diff_d.get());
cuda::memory::copy(diffs, diff_d.get(), 3*4);
delta += (std::chrono::high_resolution_clock::now()-start);
float mdiff = diffs[0]*1.e-7;
int idiff = diffs[1];
int sdiff = diffs[2];
std::cout << "for degree " << DEGREE << " max diff is " << mdiff
<< ' ' << idiff << ' ' << int2phi(idiff)
<< ' ' << sdiff << ' ' << short2phi(sdiff) << std::endl;
std::cout << "cuda computation took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(delta).count()
<< " ms" << std::endl;
}
int main() {
int count = 0;
auto status = hipGetDeviceCount(& count);
if (status != hipSuccess) {
std::cerr << "Failed to initialise the CUDA runtime, the test will be skipped." << "\n";
exit(EXIT_SUCCESS);
}
if (count == 0) {
std::cerr << "No CUDA devices on this system, the test will be skipped." << "\n";
exit(EXIT_SUCCESS);
}
try {
go<3>();
go<5>();
go<7>();
go<9>();
} catch(cuda::runtime_error &ex) {
std::cerr << "CUDA error: " << ex.what() << std::endl;
exit(EXIT_FAILURE);
} catch(...) {
std::cerr << "A non-CUDA error occurred" << std::endl;
exit(EXIT_FAILURE);
}
return EXIT_SUCCESS;
}
|
e647b79b15673231caf97980185fbfc289433b97.cu
|
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <[email protected]>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*
* Modified by VinInn for testing math funcs
*/
/* to run test
foreach f ( $CMSSW_BASE/test/$SCRAM_ARCH/DFM_Vector* )
echo $f; $f
end
*/
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <random>
#include "cuda/api_wrappers.h"
#include "DataFormats/Math/interface/approx_atan2.h"
constexpr float xmin=-100.001; // avoid 0
constexpr float incr = 0.04;
constexpr int Nsteps = 2.*std::abs(xmin)/incr;
template<int DEGREE>
__global__ void diffAtan(int * diffs)
{
auto mdiff = &diffs[0];
auto idiff = &diffs[1];
auto sdiff = &diffs[2];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
auto x = xmin +incr*i;
auto y = xmin +incr*j;
auto approx = unsafe_atan2f<DEGREE>(y,x);
auto iapprox = unsafe_atan2i<DEGREE>(y,x);
auto sapprox = unsafe_atan2s<DEGREE>(y,x);
auto std = std::atan2(y,x);
auto fd = std::abs(std-approx);
atomicMax(mdiff, int(fd*1.e7) );
atomicMax(idiff, std::abs(phi2int(std)-iapprox));
short dd = std::abs(phi2short(std)-sapprox);
atomicMax(sdiff,int(dd));
}
template<int DEGREE>
void go() {
auto start = std::chrono::high_resolution_clock::now();
auto delta = start - start;
auto current_device = cuda::device::current::get();
// atan2
delta -= (std::chrono::high_resolution_clock::now()-start);
auto diff_d = cuda::memory::device::make_unique<int[]>(current_device,3);
int diffs[3];
cuda::memory::device::zero(diff_d.get(),3*4);
// Launch the diff CUDA Kernel
dim3 threadsPerBlock(32,32,1);
dim3 blocksPerGrid((Nsteps + threadsPerBlock.x - 1) / threadsPerBlock.x,
(Nsteps + threadsPerBlock.y - 1) / threadsPerBlock.y,
1);
std::cout
<< "CUDA kernel 'diff' launch with " << blocksPerGrid.x
<< " blocks of " << threadsPerBlock.y << " threads\n";
cuda::launch(
diffAtan<DEGREE>,
{ blocksPerGrid, threadsPerBlock },
diff_d.get());
cuda::memory::copy(diffs, diff_d.get(), 3*4);
delta += (std::chrono::high_resolution_clock::now()-start);
float mdiff = diffs[0]*1.e-7;
int idiff = diffs[1];
int sdiff = diffs[2];
std::cout << "for degree " << DEGREE << " max diff is " << mdiff
<< ' ' << idiff << ' ' << int2phi(idiff)
<< ' ' << sdiff << ' ' << short2phi(sdiff) << std::endl;
std::cout << "cuda computation took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(delta).count()
<< " ms" << std::endl;
}
int main() {
int count = 0;
auto status = cudaGetDeviceCount(& count);
if (status != cudaSuccess) {
std::cerr << "Failed to initialise the CUDA runtime, the test will be skipped." << "\n";
exit(EXIT_SUCCESS);
}
if (count == 0) {
std::cerr << "No CUDA devices on this system, the test will be skipped." << "\n";
exit(EXIT_SUCCESS);
}
try {
go<3>();
go<5>();
go<7>();
go<9>();
} catch(cuda::runtime_error &ex) {
std::cerr << "CUDA error: " << ex.what() << std::endl;
exit(EXIT_FAILURE);
} catch(...) {
std::cerr << "A non-CUDA error occurred" << std::endl;
exit(EXIT_FAILURE);
}
return EXIT_SUCCESS;
}
|
71cf09cc15147f211c933f913a6587e53c1fe3fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void local_update1_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num * output_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num) % filter_num;
int q = (index / location_num) / filter_num;
data_R[index] += data_A[q*location_num+p] * data_B[n*location_num+p];
}
}
template <typename Dtype>
void local_update1_gpu(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is filter_num x location_num
// data_R is output_num x filter_num x location_num,
// the update performed is Rqnp += Aqp * Bnp
const int nthreads = filter_num * location_num * output_num;
local_update1_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data_A, data_B, data_R, filter_num, location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update1_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update1_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
template <typename Dtype>
__global__ void local_update2_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num);
for (int q = 0; q < output_num; q++) {
data_R[index] +=
data_A[q*location_num+p] * data_B[(q*filter_num+n)*location_num+p];
}
}
}
template <typename Dtype>
void local_update2_gpu(const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is output_num x filter_num x location_num
// data_R is filter_num x location_num,
// the update performed is Rnp += \sum_q(Aqp * Bqnp)
int nthreads = filter_num * location_num;
local_update2_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data_A, data_B, data_R, filter_num,
location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update2_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update2_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
/// @brief refer to CPU forward -- the BLAS implementation is the same.
template <typename Dtype>
void LocallyConnectedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* x_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Blob<Dtype> E;
E.Reshape(1, 1, 1, K_);
FillerParameter filler_param;
filler_param.set_value(1);
ConstantFiller<Dtype> filler(filler_param);
filler.Fill(&E);
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, K_, N_);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, x_data);
for (int m = 0; m < num_output_; m++) {
caffe_gpu_mul(K_*N_, x_data, weight+this->blobs_[0]->offset(m),
intermediate.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, N_, K_,
(Dtype)1., E.gpu_data(), intermediate.gpu_data(),
(Dtype)0., top_data + top[0]->offset(n, m));
}
if (bias_term_) {
caffe_gpu_add(M_ * N_, this->blobs_[1]->gpu_data(),
top_data + top[0]->offset(n),
top_data + top[0]->offset(n));
}
}
}
/// @brief refer to CPU backward -- the BLAS implementation is the same.
template <typename Dtype>
void LocallyConnectedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* x_data = col_buffer_.mutable_gpu_data();
Dtype* x_diff = col_buffer_.mutable_gpu_diff();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* bias_diff = NULL;
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, 1, N_);
Blob<Dtype> xt;
xt.Reshape(1, 1, K_, N_);
Dtype* xt_data = xt.mutable_gpu_data();
if (bias_term_) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0.), bias_diff);
for (int n = 0; n < num_; ++n) {
caffe_gpu_add(M_ * N_, bias_diff,
top_diff + top[0]->offset(n),
bias_diff);
}
}
Blob<Dtype> buf;
buf.Reshape(1, 1, K_, N_);
Dtype* buf_data = buf.mutable_gpu_data();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0.), weight_diff);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, x_data);
local_update1_gpu(
top_diff+top[0]->offset(n), x_data,
weight_diff, K_, N_, M_);
if (propagate_down[0]) {
caffe_gpu_set(col_buffer_.count(), Dtype(0.), x_diff);
local_update2_gpu(top_diff+top[0]->offset(n), weight, x_diff, K_, N_, M_);
// col2im back to the data
col2im_gpu(x_diff, channels_, height_, width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, bottom_diff + bottom[0]->offset(n));
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LocallyConnectedLayer);
} // namespace caffe
|
71cf09cc15147f211c933f913a6587e53c1fe3fa.cu
|
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void local_update1_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num * output_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num) % filter_num;
int q = (index / location_num) / filter_num;
data_R[index] += data_A[q*location_num+p] * data_B[n*location_num+p];
}
}
template <typename Dtype>
void local_update1_gpu(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is filter_num x location_num
// data_R is output_num x filter_num x location_num,
// the update performed is Rqnp += Aqp * Bnp
const int nthreads = filter_num * location_num * output_num;
local_update1_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
data_A, data_B, data_R, filter_num, location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update1_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update1_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
template <typename Dtype>
__global__ void local_update2_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num);
for (int q = 0; q < output_num; q++) {
data_R[index] +=
data_A[q*location_num+p] * data_B[(q*filter_num+n)*location_num+p];
}
}
}
template <typename Dtype>
void local_update2_gpu(const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is output_num x filter_num x location_num
// data_R is filter_num x location_num,
// the update performed is Rnp += \sum_q(Aqp * Bqnp)
int nthreads = filter_num * location_num;
local_update2_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
data_A, data_B, data_R, filter_num,
location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update2_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update2_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
/// @brief refer to CPU forward -- the BLAS implementation is the same.
template <typename Dtype>
void LocallyConnectedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* x_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Blob<Dtype> E;
E.Reshape(1, 1, 1, K_);
FillerParameter filler_param;
filler_param.set_value(1);
ConstantFiller<Dtype> filler(filler_param);
filler.Fill(&E);
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, K_, N_);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, x_data);
for (int m = 0; m < num_output_; m++) {
caffe_gpu_mul(K_*N_, x_data, weight+this->blobs_[0]->offset(m),
intermediate.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, N_, K_,
(Dtype)1., E.gpu_data(), intermediate.gpu_data(),
(Dtype)0., top_data + top[0]->offset(n, m));
}
if (bias_term_) {
caffe_gpu_add(M_ * N_, this->blobs_[1]->gpu_data(),
top_data + top[0]->offset(n),
top_data + top[0]->offset(n));
}
}
}
/// @brief refer to CPU backward -- the BLAS implementation is the same.
template <typename Dtype>
void LocallyConnectedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* x_data = col_buffer_.mutable_gpu_data();
Dtype* x_diff = col_buffer_.mutable_gpu_diff();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* bias_diff = NULL;
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, 1, N_);
Blob<Dtype> xt;
xt.Reshape(1, 1, K_, N_);
Dtype* xt_data = xt.mutable_gpu_data();
if (bias_term_) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0.), bias_diff);
for (int n = 0; n < num_; ++n) {
caffe_gpu_add(M_ * N_, bias_diff,
top_diff + top[0]->offset(n),
bias_diff);
}
}
Blob<Dtype> buf;
buf.Reshape(1, 1, K_, N_);
Dtype* buf_data = buf.mutable_gpu_data();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0.), weight_diff);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, x_data);
local_update1_gpu(
top_diff+top[0]->offset(n), x_data,
weight_diff, K_, N_, M_);
if (propagate_down[0]) {
caffe_gpu_set(col_buffer_.count(), Dtype(0.), x_diff);
local_update2_gpu(top_diff+top[0]->offset(n), weight, x_diff, K_, N_, M_);
// col2im back to the data
col2im_gpu(x_diff, channels_, height_, width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, bottom_diff + bottom[0]->offset(n));
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LocallyConnectedLayer);
} // namespace caffe
|
4bc4db45c24dd9f7eaa6ae62c9c158ac7a0a76ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file MersenneTwister_d.cu
*
* @ingroup Simulator/Utils/RNG
*
* @brief MersenneTwister logic from Nvidia
*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*
*
* Edited by Warner Smidt Sep 4th 2011
* ds_MT now stores the seed state after each call to the random number generator.
* Each consecutive call to the random number generator will not produce the same
* results now.
* Note: iState has replaced seed in mt_struct_stripped, therefore the .dat files
* last parameter which was for the seed is now used for the iState.
* Also added RandomNormGPU which combines RandomGPU and BoxMuller for normalized
* random numbers without extra global memory transfers.
*
* Edit Sep 14th 2011
* MT_RNG_COUNT is the max total threads that will be used. initMTGP is now used
* to setup RandomNormGPU/RandomGPU to be called from normalMTGPU/uniformMTGPU.
* Allows the random number generation to be more dynamic without relying as much
* on #defines as well as being able to make the calculations for the needed data
* at initialization only once, and not everytime the random numbers are needed.
*/
#include <iostream>
#include <stdio.h>
using namespace std;
#include "MersenneTwister_d.h"
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
__device__ unsigned int mt[MT_RNG_COUNT*MT_NN];
//#define MT_DATAFILE "MersenneTwister/data/MersenneTwister.dat"
/*
//globals
__device__ static mt_struct_stripped * ds_MT;
static mt_struct_stripped * h_MT;
__device__ unsigned int * mt;
*/
unsigned int mt_rng_count;
unsigned int mt_blocks;
unsigned int mt_threads;
unsigned int mt_nPerRng;
//Load twister configurations
void loadMTGPU(const char *fname){
FILE *fd = fopen(fname, "rb");
if(!fd){
cerr << "initMTGPU(): failed to open " << fname << endl << "FAILED" << endl;
exit(0);
}
if( !fread(h_MT, mt_rng_count*sizeof(mt_struct_stripped), 1, fd) ){
cerr << "initMTGPU(): failed to load " << fname << endl << "FAILED" << endl;
exit(0);
}
fclose(fd);
}
//initialize the seed to mt[]
__global__ void seedMTGPUState(unsigned int seed){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int iState;
mt[MT_NN*tid] = seed;
for (iState = MT_NN*tid+1; iState < MT_NN*(1+tid); iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
}
//Initialize/seed twister for current GPU context
void seedMTGPU(unsigned int seed){
int i;
//Need to be thread-safe
mt_struct_stripped *MT = (mt_struct_stripped *)malloc(mt_rng_count * sizeof(mt_struct_stripped));
for(i = 0; i < mt_rng_count; i++){
MT[i] = h_MT[i];
MT[i].iState = i*MT_NN;
}
//seed does need to be used to initialize mt[] elements.
int threadsPerBlock = 256;
//get ceil of MT_RNG_COUNT/threadsPerBlock
int blocksPerGrid = (mt_rng_count+threadsPerBlock-1)/threadsPerBlock;
hipLaunchKernelGGL(( seedMTGPUState), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, seed);
if(hipMemcpyToSymbol(ds_MT, MT, mt_rng_count*sizeof(mt_struct_stripped))!=hipSuccess){
cerr << "seedMTGP failed" << endl;
exit(0);
}
free(MT);
}
////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of nPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU(
float *d_Random,
int nPerRng, int mt_rng_count)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int matrix_a, mask_b, mask_c;
//Load bit-vector Mersenne Twister parameters
matrix_a = ds_MT[tid].matrix_a;
mask_b = ds_MT[tid].mask_b;
mask_c = ds_MT[tid].mask_c;
iState = ds_MT[tid].iState;
mti1 = mt[iState];
for (iOut = 0; iOut < nPerRng; iOut++) {
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN*(1+tid)) iState1 -= MT_NN;
if(iStateM >= MT_NN*(1+tid)) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[tid + iOut * mt_rng_count] = ((float)x + 1.0f) / 4294967296.0f;
}
ds_MT[tid].iState = iState;
}
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of nPerRng uniformly distributed
// random samples, produced by RandomGPU(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// nPerRng must be even.
////////////////////////////////////////////////////////////////////////////////
#define PI 3.14159265358979f
__device__ inline void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__ void BoxMullerGPU(float *d_Random, int nPerRng, int mt_rng_count){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int iOut = 0; iOut < nPerRng; iOut += 2)
BoxMuller(
d_Random[tid + (iOut + 0) * mt_rng_count],
d_Random[tid + (iOut + 1) * mt_rng_count]
);
}
//skip the seperate BoxMullerGPU for increased speed (uses register memory).
//nPerRng must be a multiple of 2
__global__ void RandomNormGPU(
float *d_Random,
int nPerRng, int mt_rng_count)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int matrix_a, mask_b, mask_c;
float regVal1, regVal2; //need 2 values for boxmuller
bool boxFlag = false; //will perform boxmuller transform on true
//Load bit-vector Mersenne Twister parameters
matrix_a = ds_MT[tid].matrix_a;
mask_b = ds_MT[tid].mask_b;
mask_c = ds_MT[tid].mask_c;
iState = ds_MT[tid].iState;
mti1 = mt[iState];
for (iOut = 0; iOut < nPerRng; iOut++) {
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN*(1+tid)) iState1 -= MT_NN;
if(iStateM >= MT_NN*(1+tid)) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
if(boxFlag){
regVal2 = ((float)x + 1.0f) / 4294967296.0f;
BoxMuller(regVal1,regVal2);
d_Random[tid + (iOut-1) * mt_rng_count] = regVal1;
d_Random[tid + iOut * mt_rng_count] = regVal2;
boxFlag = false;
}else{
regVal1 = ((float)x + 1.0f) / 4294967296.0f;
boxFlag = true;
}
}
ds_MT[tid].iState = iState;
}
extern "C" void uniformMTGPU(float * d_random){
hipLaunchKernelGGL(( RandomGPU), dim3(mt_blocks),dim3(mt_threads), 0, 0, d_random, mt_nPerRng, mt_rng_count);
}
extern "C" void normalMTGPU(float * d_random){
hipLaunchKernelGGL(( RandomNormGPU), dim3(mt_blocks),dim3(mt_threads), 0, 0, d_random, mt_nPerRng, mt_rng_count);
}
//initialize globals and setup state
//Note: mt_rng_count must equal blocks*threads. mt_rng_count*nPerRng should equal the total number of randon numbers to be generated
extern "C" void initMTGPU(unsigned int seed, unsigned int blocks, unsigned int threads, unsigned int nPerRng, unsigned int mt_rng_c){
mt_blocks = blocks;
mt_threads = threads;
mt_nPerRng = nPerRng;
mt_rng_count = mt_rng_c;
loadMTGPU(MT_DATAFILE);
seedMTGPU(seed);
}
|
4bc4db45c24dd9f7eaa6ae62c9c158ac7a0a76ae.cu
|
/**
* @file MersenneTwister_d.cu
*
* @ingroup Simulator/Utils/RNG
*
* @brief MersenneTwister logic from Nvidia
*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*
*
* Edited by Warner Smidt Sep 4th 2011
* ds_MT now stores the seed state after each call to the random number generator.
* Each consecutive call to the random number generator will not produce the same
* results now.
* Note: iState has replaced seed in mt_struct_stripped, therefore the .dat files
* last parameter which was for the seed is now used for the iState.
* Also added RandomNormGPU which combines RandomGPU and BoxMuller for normalized
* random numbers without extra global memory transfers.
*
* Edit Sep 14th 2011
* MT_RNG_COUNT is the max total threads that will be used. initMTGP is now used
* to setup RandomNormGPU/RandomGPU to be called from normalMTGPU/uniformMTGPU.
* Allows the random number generation to be more dynamic without relying as much
* on #defines as well as being able to make the calculations for the needed data
* at initialization only once, and not everytime the random numbers are needed.
*/
#include <iostream>
#include <stdio.h>
using namespace std;
#include "MersenneTwister_d.h"
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
__device__ unsigned int mt[MT_RNG_COUNT*MT_NN];
//#define MT_DATAFILE "MersenneTwister/data/MersenneTwister.dat"
/*
//globals
__device__ static mt_struct_stripped * ds_MT;
static mt_struct_stripped * h_MT;
__device__ unsigned int * mt;
*/
unsigned int mt_rng_count;
unsigned int mt_blocks;
unsigned int mt_threads;
unsigned int mt_nPerRng;
//Load twister configurations
void loadMTGPU(const char *fname){
FILE *fd = fopen(fname, "rb");
if(!fd){
cerr << "initMTGPU(): failed to open " << fname << endl << "FAILED" << endl;
exit(0);
}
if( !fread(h_MT, mt_rng_count*sizeof(mt_struct_stripped), 1, fd) ){
cerr << "initMTGPU(): failed to load " << fname << endl << "FAILED" << endl;
exit(0);
}
fclose(fd);
}
//initialize the seed to mt[]
__global__ void seedMTGPUState(unsigned int seed){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int iState;
mt[MT_NN*tid] = seed;
for (iState = MT_NN*tid+1; iState < MT_NN*(1+tid); iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
}
//Initialize/seed twister for current GPU context
void seedMTGPU(unsigned int seed){
int i;
//Need to be thread-safe
mt_struct_stripped *MT = (mt_struct_stripped *)malloc(mt_rng_count * sizeof(mt_struct_stripped));
for(i = 0; i < mt_rng_count; i++){
MT[i] = h_MT[i];
MT[i].iState = i*MT_NN;
}
//seed does need to be used to initialize mt[] elements.
int threadsPerBlock = 256;
//get ceil of MT_RNG_COUNT/threadsPerBlock
int blocksPerGrid = (mt_rng_count+threadsPerBlock-1)/threadsPerBlock;
seedMTGPUState<<<blocksPerGrid,threadsPerBlock>>>(seed);
if(cudaMemcpyToSymbol(ds_MT, MT, mt_rng_count*sizeof(mt_struct_stripped))!=cudaSuccess){
cerr << "seedMTGP failed" << endl;
exit(0);
}
free(MT);
}
////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of nPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU(
float *d_Random,
int nPerRng, int mt_rng_count)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int matrix_a, mask_b, mask_c;
//Load bit-vector Mersenne Twister parameters
matrix_a = ds_MT[tid].matrix_a;
mask_b = ds_MT[tid].mask_b;
mask_c = ds_MT[tid].mask_c;
iState = ds_MT[tid].iState;
mti1 = mt[iState];
for (iOut = 0; iOut < nPerRng; iOut++) {
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN*(1+tid)) iState1 -= MT_NN;
if(iStateM >= MT_NN*(1+tid)) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[tid + iOut * mt_rng_count] = ((float)x + 1.0f) / 4294967296.0f;
}
ds_MT[tid].iState = iState;
}
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of nPerRng uniformly distributed
// random samples, produced by RandomGPU(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// nPerRng must be even.
////////////////////////////////////////////////////////////////////////////////
#define PI 3.14159265358979f
__device__ inline void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__ void BoxMullerGPU(float *d_Random, int nPerRng, int mt_rng_count){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int iOut = 0; iOut < nPerRng; iOut += 2)
BoxMuller(
d_Random[tid + (iOut + 0) * mt_rng_count],
d_Random[tid + (iOut + 1) * mt_rng_count]
);
}
//skip the seperate BoxMullerGPU for increased speed (uses register memory).
//nPerRng must be a multiple of 2
__global__ void RandomNormGPU(
float *d_Random,
int nPerRng, int mt_rng_count)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int matrix_a, mask_b, mask_c;
float regVal1, regVal2; //need 2 values for boxmuller
bool boxFlag = false; //will perform boxmuller transform on true
//Load bit-vector Mersenne Twister parameters
matrix_a = ds_MT[tid].matrix_a;
mask_b = ds_MT[tid].mask_b;
mask_c = ds_MT[tid].mask_c;
iState = ds_MT[tid].iState;
mti1 = mt[iState];
for (iOut = 0; iOut < nPerRng; iOut++) {
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN*(1+tid)) iState1 -= MT_NN;
if(iStateM >= MT_NN*(1+tid)) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
if(boxFlag){
regVal2 = ((float)x + 1.0f) / 4294967296.0f;
BoxMuller(regVal1,regVal2);
d_Random[tid + (iOut-1) * mt_rng_count] = regVal1;
d_Random[tid + iOut * mt_rng_count] = regVal2;
boxFlag = false;
}else{
regVal1 = ((float)x + 1.0f) / 4294967296.0f;
boxFlag = true;
}
}
ds_MT[tid].iState = iState;
}
extern "C" void uniformMTGPU(float * d_random){
RandomGPU<<<mt_blocks,mt_threads>>>(d_random, mt_nPerRng, mt_rng_count);
}
extern "C" void normalMTGPU(float * d_random){
RandomNormGPU<<<mt_blocks,mt_threads>>>(d_random, mt_nPerRng, mt_rng_count);
}
//initialize globals and setup state
//Note: mt_rng_count must equal blocks*threads. mt_rng_count*nPerRng should equal the total number of randon numbers to be generated
extern "C" void initMTGPU(unsigned int seed, unsigned int blocks, unsigned int threads, unsigned int nPerRng, unsigned int mt_rng_c){
mt_blocks = blocks;
mt_threads = threads;
mt_nPerRng = nPerRng;
mt_rng_count = mt_rng_c;
loadMTGPU(MT_DATAFILE);
seedMTGPU(seed);
}
|
5bb81a11733dbf1fae8e6fbc93e69717dd2f1fd2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ops.cuh"
#define DEBUG0
#define CEIL(a,b) (1 + ((a - 1) / b))
using namespace std;
__host__ dim3* getTransposeGridDims(size_t height, size_t width, size_t tiledim)
{
dim3 dimBlock(min(min(tiledim, size_t(32)), width),
min(min(tiledim, size_t(32)), height));
dim3 dimGrid(max(CEIL(width, dimBlock.x), size_t(1)),
max(CEIL(height, dimBlock.y), size_t(1)));
dim3 shMem(dimBlock.x*(dimBlock.y+1)*sizeof(double));
return new dim3[3]{dimGrid, dimBlock, shMem};
}
__host__ dim3* getDotGridDims(size_t Arows, size_t Bcols, size_t tiledim)
{
tiledim = min(tiledim, size_t(32));
dim3 dimBlock(tiledim, tiledim);
dim3 dimGrid(CEIL(Bcols, tiledim), CEIL(Arows, tiledim));
dim3 shMem(2*tiledim*(tiledim+1)*sizeof(double));
return new dim3[3]{ dimGrid, dimBlock, shMem};
}
__host__ dim3* getMaxGrid(size_t rows, size_t cols)
{
size_t tiledim = min(max(rows,cols), size_t(32));
const dim3 dimBlock(tiledim, tiledim);
const dim3 dimGrid(CEIL(cols, tiledim), CEIL(rows, tiledim));
return new dim3[2]{ dimGrid, dimBlock };
}
__host__ dim3* getOneDims(size_t size)
{
int dim = min(size, size_t(1024));
dim = (dim % 2 == 0) ? dim: dim + 1;
dim3 dimBlock(dim);
dim3 dimGrid(CEIL(size,dimBlock.x));
dim3 shMem(dimBlock.x * sizeof(double));
return new dim3[3]{ dimGrid, dimBlock, shMem };
}
__global__ void deviceFilter(const size_t cols, const size_t rows, double* unfiltered, double* filtered, int * remap)
{
const size_t tx = blockDim.x*blockIdx.x + threadIdx.x;
const size_t ty = blockDim.y*blockIdx.y + threadIdx.y;
if (tx >= cols || ty >= rows) return;
filtered[tx*rows + ty] = unfiltered[remap[tx]*rows+ty];
}
__global__ void deviceTranspose(double *odata, const double *idata, int width, int height)
{
extern __shared__ double tile[];
int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
int index_in = xIndex*height + yIndex;
int index_out = yIndex*width + xIndex;
if (index_out >= width*height) return;
#ifdef DEBUG
if (index_in == 0)
printf("Grid: (%d,%d); Block: (%d,%d)\n", gridDim.x,
gridDim.y, blockDim.x, blockDim.y);
__syncthreads();
#endif
int ix = threadIdx.y*blockDim.x + threadIdx.x;
tile[ix] = idata[index_in];
__syncthreads();
odata[index_out] = tile[ix];
}
__global__ void eye(double *values, int rows, int cols) {
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (y < rows && x < cols)
values[x*rows+y] = (x == y) ? 1.0 : 0.0;
}
__global__ void deviceDot(double* A, double* B, double* C, size_t Acols, size_t Bcols, size_t Arows)
{
size_t tiledim = blockDim.x;
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
double* Csub = &C[Bcols*tiledim*blockRow + tiledim*blockCol];
double Cvalue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (int m = 0; m <= (Acols / tiledim); ++m) {
double* Asub = &A[Acols*tiledim*blockRow + tiledim*m];
double* Bsub = &B[Bcols*tiledim*m + tiledim*blockCol];
extern __shared__ double shmem[];
double *Bs = (double*)&shmem;
double *As = (double*)&shmem[tiledim * (tiledim+1)];
As[row*tiledim+col] = Asub[row*Acols + col];
Bs[row*tiledim+col] = Bsub[row*Bcols + col];
__syncthreads();
for (int e = 0; e < tiledim; ++e)
Cvalue += As[row*tiledim+e] * Bs[e*tiledim+col];
__syncthreads();
}
if (col + blockDim.x*blockIdx.x < Bcols && row + blockDim.y*blockIdx.y < Arows)
Csub[row*Bcols + col] = Cvalue;
}
__global__ void deviceCopy(double *A, size_t lda, double *B, size_t ldb, size_t row, size_t col, size_t dimy, size_t dimx)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dimx || y >= dimy) return;
B[x*ldb+y] = A[(col+x)*lda + y+row];
}
//First sream should be assigned to handle, second stream should be free
void invPipelineCUDA(size_t dim, hipStream_t *streams, hipsolverDnHandle_t handle, double * d_values, double * &d_result)
{
int *d_Ipiv, *d_info;
int lwork, info;
double *d_work;
cusolve(hipsolverDnDgetrf_bufferSize(handle, dim, dim, d_values, dim, &lwork));
getCudaEyeAsynch(d_result, dim, streams[1]);
cud(hipMalloc((void**)&d_Ipiv, sizeof(int) * dim));
cud(hipMalloc((void**)&d_info, sizeof(int)));
cud(hipMalloc((void**)&d_work, sizeof(double)*lwork));
cusolve(hipsolverDnDgetrf(handle, dim, dim, d_values, dim, d_work, d_Ipiv, d_info));
cud(hipMemcpyAsync(&info, d_info, sizeof(int), hipMemcpyDeviceToHost, streams[0]));
cud(hipStreamSynchronize(streams[0]));
if (info < 0){
printf("Error #%d after factorization, aborting.\n", info);
goto CLEAR;
}
cud(hipStreamSynchronize(streams[1]));
cusolve(hipsolverDnDgetrs(handle, HIPBLAS_OP_N, dim, dim, d_values, dim, d_Ipiv, d_result, dim, d_info));
cud(hipStreamSynchronize(streams[0]));
cud(hipMemcpyAsync(&info, d_info, sizeof(int), hipMemcpyDeviceToHost, streams[0]));
cud(hipStreamSynchronize(streams[0]));
if (info < 0) printf("Error #%d after factorization, aborting.\n", info);
CLEAR:
cud(hipFree(d_Ipiv));
cud(hipFree(d_info));
cud(hipFree(d_work));
}
void CUDAPipeline(Matrix& Xt, Matrix& Xval, Matrix& Yt, Matrix& Yval, Matrix& X, Matrix& Y,
int *d_remap, int newcols, hipStream_t * streams, hipblasHandle_t *cublasHandles,
hipsolverDnHandle_t *cuSolverHandles, double *d_num_t, double *d_num_val, double** pointers)
{
//size_t newcols = 0;
//size_t cols = Xt.ncols(), rows = Xt.nrows();
//int *remap = (int*)malloc(sizeof(int)*cols);
////Create a mask
//for (int i = 0; i < cols; ++i)
// if (curr_mask[i]) {
// remap[newcols] = i;
// newcols++;
// }
size_t rows = rows = Xt.nrows();
dim3 * dims = getMaxGrid(Xt.nrows(), newcols);
//cud(hipStreamSynchronize(streams[4]));
//cud(hipMemcpyAsync((int*)pointers[s.d_remap], remap, sizeof(int)*cols, hipMemcpyHostToDevice, streams[4]));
//cud(hipStreamSynchronize(streams[4]));
//Filter X matrices
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
hipLaunchKernelGGL(( deviceFilter) , dim3(dims[0]), dim3(dims[1]), 0, streams[0] , newcols, rows, Xt.values, (double*)pointers[s.d_Xt_filtered], d_remap);
hipLaunchKernelGGL(( deviceFilter) , dim3(dims[0]), dim3(dims[1]), 0, streams[1] , newcols, rows, Xval.values, (double*)pointers[s.d_Xval_filtered], d_remap);
//Filter X matrices
cud(hipStreamSynchronize(streams[2]));
cud(hipStreamSynchronize(streams[3]));
cublas(hipblasDgemm(cublasHandles[2], HIPBLAS_OP_T, HIPBLAS_OP_N, newcols, 1, rows, &one,
(double*)pointers[s.d_Xt_filtered], rows, Yt.values, rows, &zero, (double*)pointers[s.d_XTdotY_t], newcols));
cublas(hipblasDgemm(cublasHandles[3], HIPBLAS_OP_T, HIPBLAS_OP_N, newcols, 1, rows, &one,
(double*)pointers[s.d_Xval_filtered], rows, Yval.values, rows, &zero, (double*)pointers[s.d_XTdotY_val], newcols));
//Synchronize for further gemm
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
cublas(hipblasDgemm(cublasHandles[0], HIPBLAS_OP_T, HIPBLAS_OP_N, newcols, newcols, rows, &one,
(double*)pointers[s.d_Xt_filtered], rows, (double*)pointers[s.d_Xt_filtered], rows, &zero, (double*)pointers[s.d_XTdotX_t], newcols));
cublas(hipblasDgemm(cublasHandles[1], HIPBLAS_OP_T, HIPBLAS_OP_N, newcols, newcols, rows, &one,
(double*)pointers[s.d_Xval_filtered], rows, (double*)pointers[s.d_Xval_filtered], rows, &zero, (double*)pointers[s.d_XTdotX_val], newcols));
if (newcols<10) {
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
cublas(hipblasDgetrfBatched(cublasHandles[0], newcols, (double**)pointers[s.d_matrices], newcols, (int*)pointers[s.d_piv], (int*)pointers[s.d_info], 2));
cud(hipStreamSynchronize(streams[0]));
cublas(hipblasDgetriBatched(cublasHandles[0], newcols, (const double **)pointers[s.d_matrices], newcols,
(int*)pointers[s.d_piv], (double**)pointers[s.d_inverted], newcols, (int*)pointers[s.d_info], 2));
}
else {
hipStream_t *streams_t = new hipStream_t[2]{ streams[0], streams[4] };
hipStream_t *streams_val = new hipStream_t[2]{ streams[1], streams[5] };
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
cud(hipStreamSynchronize(streams[4]));
cud(hipStreamSynchronize(streams[5]));
//Synchronize again, this time for inversion
thread th1 = thread(invPipelineCUDA, newcols, ref(streams_t), cuSolverHandles[0], (double*)pointers[s.d_XTdotX_t], ref((double*&)pointers[s.d_XTdotX_inv_t]));
thread th2 = thread(invPipelineCUDA, newcols, ref(streams_val), cuSolverHandles[1], (double*)pointers[s.d_XTdotX_val], ref((double*&)pointers[s.d_XTdotX_inv_val]));
th1.join(); th2.join();
delete[] streams_t;
delete[] streams_val;
}
//Gemm to get coefs
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
cublas(hipblasDgemm(cublasHandles[0], HIPBLAS_OP_N, HIPBLAS_OP_N, newcols, 1, newcols, &one,
(double*)pointers[s.d_XTdotX_inv_t], newcols, (double*)pointers[s.d_XTdotY_t], newcols, &zero, (double*)pointers[s.d_coefs_t], newcols));
cublas(hipblasDgemm(cublasHandles[1], HIPBLAS_OP_N, HIPBLAS_OP_N, newcols, 1, newcols, &one,
(double*)pointers[s.d_XTdotX_inv_val], newcols, (double*)pointers[s.d_XTdotY_val], newcols, &zero, (double*)pointers[s.d_coefs_val], newcols));
//Gemm to infere Ys
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
cublas(hipblasDgemm(cublasHandles[0], HIPBLAS_OP_N, HIPBLAS_OP_N, rows, 1, newcols, &one,
(double*)pointers[s.d_Xt_filtered], rows, (double*)pointers[s.d_coefs_t], rows, &zero, (double*)pointers[s.d_Y_hat_t], rows));
cublas(hipblasDgemm(cublasHandles[1], HIPBLAS_OP_N, HIPBLAS_OP_N, rows, 1, newcols, &one,
(double*)pointers[s.d_Xval_filtered], rows, (double*)pointers[s.d_coefs_val], rows, &zero, (double*)pointers[s.d_Y_hat_val], rows));
//Get error
dim3 * rowDim = getOneDims(rows);
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
elementwise << < rowDim[0], rowDim[1], 0, streams[0] >> > ( (double*)pointers[s.d_Y_hat_t], Yt.values, rows, (double*)pointers[s.diff_t], diff<double>());
elementwise << < rowDim[0], rowDim[1], 0, streams[1] >> > ( (double*)pointers[s.d_Y_hat_val], Yval.values, rows, (double*)pointers[s.diff_val], diff<double>());
//Make it square
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
elementwise << < rowDim[0], rowDim[1], 0, streams[0] >> > ( (double*)pointers[s.diff_t], (double*)pointers[s.diff_t], rows, (double*)pointers[s.d_diff_t_sq], product<double>());
elementwise << < rowDim[0], rowDim[1], 0, streams[1] >> > ( (double*)pointers[s.diff_val], (double*)pointers[s.diff_val], rows, (double*)pointers[s.d_diff_val_sq], product<double>());
//Reduce
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
reduce((double*)pointers[s.d_diff_t_sq], rows, sum<double>(), &d_num_t, streams[0]);
reduce((double*)pointers[s.d_diff_val_sq], rows, sum<double>(), &d_num_val, streams[1]);
}
void cudaPrecomputeDenominators(Matrix& Yt, Matrix Yval, double &denom_t, double &denom_val, hipStream_t *streams)
{
size_t rows = Yt.rows;
dim3 * rowDim = getOneDims(rows);
double *d_denom_t, *d_denom_val;
double *d_Yt_sq, *d_Yval_sq;
cud(hipMalloc(&d_Yt_sq, sizeof(double)*rows));
cud(hipMalloc(&d_Yval_sq, sizeof(double)*rows));
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
elementwise << < rowDim[0], rowDim[1], 0, streams[0] >> > (Yt.values, Yt.values, rows, d_Yt_sq, product<double>());
elementwise << < rowDim[0], rowDim[1], 0, streams[1] >> > (Yval.values, Yval.values, rows, d_Yval_sq, product<double>());
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
cud(hipMalloc((void **)&d_denom_t, sizeof(double)));
cud(hipMalloc((void **)&d_denom_val, sizeof(double)));
reduce(d_Yt_sq, rows, sum<double>(), &d_denom_t, streams[0]);
reduce(d_Yval_sq, rows, sum<double>(), &d_denom_val, streams[1]);
cud(hipStreamSynchronize(streams[0]));
cud(hipStreamSynchronize(streams[1]));
cud(hipMemcpy(&denom_t, d_denom_t, sizeof(double), hipMemcpyDeviceToHost));
cud(hipMemcpy(&denom_val, d_denom_val, sizeof(double), hipMemcpyDeviceToHost));
cud(hipFree(d_denom_t));
cud(hipFree(d_denom_val));
}
double** allocateDeviceMemory(size_t rows, size_t cols)
{
double** pointers = (double**)malloc(sizeof(double*) * 23);
cud(hipMalloc((void**)&pointers[s.d_XTdotX_inv_t], sizeof(double)*cols*cols));
cud(hipMalloc((void**)&pointers[s.d_XTdotX_inv_val], sizeof(double)*cols*cols));
cud(hipMalloc((void**)&pointers[s.d_remap], sizeof(int)*cols));
cud(hipMalloc((void**)&pointers[s.d_Xt_filtered], sizeof(double)*cols*rows));
cud(hipMalloc((void**)&pointers[s.d_Xval_filtered], sizeof(double)*cols*rows));
cud(hipMalloc((void**)&pointers[s.d_XTdotX_t], sizeof(double)*cols*cols));
cud(hipMalloc((void**)&pointers[s.d_XTdotX_val], sizeof(double)*cols*cols));
cud(hipMalloc((void**)&pointers[s.d_XTdotY_t], sizeof(double)*cols));
cud(hipMalloc((void**)&pointers[s.d_XTdotY_val], sizeof(double)*cols));
cud(hipMalloc((void**)&pointers[s.d_matrices], 2 * sizeof(double *)));
cud(hipMalloc((void**)&pointers[s.d_inverted], 2 * sizeof(double *)));
double ** matrices = new double*[2]{ pointers[s.d_XTdotX_t], pointers[s.d_XTdotX_val] };
double ** inverted = new double*[2]{ pointers[s.d_XTdotX_inv_t], pointers[s.d_XTdotX_inv_val] };
cud(hipMemcpy(pointers[s.d_matrices], matrices, sizeof(double*) * 2, hipMemcpyHostToDevice));
cud(hipMemcpy(pointers[s.d_inverted], inverted, sizeof(double*) * 2, hipMemcpyHostToDevice));
cud(hipMalloc((void**)&pointers[s.d_piv], 2 * cols * sizeof(int)));
cud(hipMalloc((void**)&pointers[s.d_info], 2 * sizeof(int)));
cud(hipMalloc((void**)&pointers[s.d_coefs_t], sizeof(double)*cols));
cud(hipMalloc((void**)&pointers[s.d_coefs_val], sizeof(double)*cols));
cud(hipMalloc((void**)&pointers[s.d_Y_hat_t], sizeof(double)*rows));
cud(hipMalloc((void**)&pointers[s.d_Y_hat_val], sizeof(double)*rows));
cud(hipMalloc((void**)&pointers[s.diff_t], sizeof(double)*rows));
cud(hipMalloc((void**)&pointers[s.diff_val], sizeof(double)*rows));
cud(hipMalloc((void**)&pointers[s.d_diff_t_sq], sizeof(double)*rows));
cud(hipMalloc((void**)&pointers[s.d_diff_val_sq], sizeof(double)*rows));
cud(hipMalloc((void**)&pointers[s.d_Yt_sq], sizeof(double)*rows));
cud(hipMalloc((void**)&pointers[s.d_Yval_sq], sizeof(double)*rows));
delete[] matrices;
delete[] inverted;
return pointers;
}
void deallocateDeviceMemory(void** allocations)
{
for (int i = 0; i < 23; ++i) cud(hipFree(allocations[i]));
}
void createCUDAWorkspace(hipStream_t * &streams, hipsolverDnHandle_t * &cusdolverHandles, hipblasHandle_t * &cublasHandles)
{
streams = (hipStream_t*)malloc(sizeof(hipStream_t) * 6);
cusdolverHandles = (hipsolverDnHandle_t*)malloc(sizeof(hipsolverDnHandle_t) * 2);
cublasHandles = (hipblasHandle_t*)malloc(sizeof(hipblasHandle_t) * 4);
for (int i = 0; i < 6; ++i) {
streams[i] = hipStream_t();
hipStreamCreate(&streams[i]);
if (i<4) {
cublasHandles[i] = hipblasHandle_t();
hipblasCreate(&cublasHandles[i]);
hipblasSetStream(cublasHandles[i], streams[i]);
if (i<2) {
cusdolverHandles[i] = hipsolverDnHandle_t();
hipsolverDnCreate(&cusdolverHandles[i]);
hipsolverDnSetStream(cusdolverHandles[i], streams[i]);
}
}
}
}
void destroyCUDAWorkspace(hipStream_t * &streams, hipsolverDnHandle_t * &cusdolverHandles, hipblasHandle_t * &cublasHandles)
{
for (int i = 0; i < 6; ++i) {
if (i < 4) {
if (i < 2)
hipsolverDnDestroy(cusdolverHandles[i]);
hipblasDestroy(cublasHandles[i]);
}
hipStreamDestroy(streams[i]);
}
free(cublasHandles);
free(cusdolverHandles);
free(streams);
}
|
5bb81a11733dbf1fae8e6fbc93e69717dd2f1fd2.cu
|
#include "ops.cuh"
#define DEBUG0
#define CEIL(a,b) (1 + ((a - 1) / b))
using namespace std;
__host__ dim3* getTransposeGridDims(size_t height, size_t width, size_t tiledim)
{
dim3 dimBlock(min(min(tiledim, size_t(32)), width),
min(min(tiledim, size_t(32)), height));
dim3 dimGrid(max(CEIL(width, dimBlock.x), size_t(1)),
max(CEIL(height, dimBlock.y), size_t(1)));
dim3 shMem(dimBlock.x*(dimBlock.y+1)*sizeof(double));
return new dim3[3]{dimGrid, dimBlock, shMem};
}
__host__ dim3* getDotGridDims(size_t Arows, size_t Bcols, size_t tiledim)
{
tiledim = min(tiledim, size_t(32));
dim3 dimBlock(tiledim, tiledim);
dim3 dimGrid(CEIL(Bcols, tiledim), CEIL(Arows, tiledim));
dim3 shMem(2*tiledim*(tiledim+1)*sizeof(double));
return new dim3[3]{ dimGrid, dimBlock, shMem};
}
__host__ dim3* getMaxGrid(size_t rows, size_t cols)
{
size_t tiledim = min(max(rows,cols), size_t(32));
const dim3 dimBlock(tiledim, tiledim);
const dim3 dimGrid(CEIL(cols, tiledim), CEIL(rows, tiledim));
return new dim3[2]{ dimGrid, dimBlock };
}
__host__ dim3* getOneDims(size_t size)
{
int dim = min(size, size_t(1024));
dim = (dim % 2 == 0) ? dim: dim + 1;
dim3 dimBlock(dim);
dim3 dimGrid(CEIL(size,dimBlock.x));
dim3 shMem(dimBlock.x * sizeof(double));
return new dim3[3]{ dimGrid, dimBlock, shMem };
}
__global__ void deviceFilter(const size_t cols, const size_t rows, double* unfiltered, double* filtered, int * remap)
{
const size_t tx = blockDim.x*blockIdx.x + threadIdx.x;
const size_t ty = blockDim.y*blockIdx.y + threadIdx.y;
if (tx >= cols || ty >= rows) return;
filtered[tx*rows + ty] = unfiltered[remap[tx]*rows+ty];
}
__global__ void deviceTranspose(double *odata, const double *idata, int width, int height)
{
extern __shared__ double tile[];
int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
int index_in = xIndex*height + yIndex;
int index_out = yIndex*width + xIndex;
if (index_out >= width*height) return;
#ifdef DEBUG
if (index_in == 0)
printf("Grid: (%d,%d); Block: (%d,%d)\n", gridDim.x,
gridDim.y, blockDim.x, blockDim.y);
__syncthreads();
#endif
int ix = threadIdx.y*blockDim.x + threadIdx.x;
tile[ix] = idata[index_in];
__syncthreads();
odata[index_out] = tile[ix];
}
__global__ void eye(double *values, int rows, int cols) {
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (y < rows && x < cols)
values[x*rows+y] = (x == y) ? 1.0 : 0.0;
}
__global__ void deviceDot(double* A, double* B, double* C, size_t Acols, size_t Bcols, size_t Arows)
{
size_t tiledim = blockDim.x;
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
double* Csub = &C[Bcols*tiledim*blockRow + tiledim*blockCol];
double Cvalue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (int m = 0; m <= (Acols / tiledim); ++m) {
double* Asub = &A[Acols*tiledim*blockRow + tiledim*m];
double* Bsub = &B[Bcols*tiledim*m + tiledim*blockCol];
extern __shared__ double shmem[];
double *Bs = (double*)&shmem;
double *As = (double*)&shmem[tiledim * (tiledim+1)];
As[row*tiledim+col] = Asub[row*Acols + col];
Bs[row*tiledim+col] = Bsub[row*Bcols + col];
__syncthreads();
for (int e = 0; e < tiledim; ++e)
Cvalue += As[row*tiledim+e] * Bs[e*tiledim+col];
__syncthreads();
}
if (col + blockDim.x*blockIdx.x < Bcols && row + blockDim.y*blockIdx.y < Arows)
Csub[row*Bcols + col] = Cvalue;
}
__global__ void deviceCopy(double *A, size_t lda, double *B, size_t ldb, size_t row, size_t col, size_t dimy, size_t dimx)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dimx || y >= dimy) return;
B[x*ldb+y] = A[(col+x)*lda + y+row];
}
//First sream should be assigned to handle, second stream should be free
void invPipelineCUDA(size_t dim, cudaStream_t *streams, cusolverDnHandle_t handle, double * d_values, double * &d_result)
{
int *d_Ipiv, *d_info;
int lwork, info;
double *d_work;
cusolve(cusolverDnDgetrf_bufferSize(handle, dim, dim, d_values, dim, &lwork));
getCudaEyeAsynch(d_result, dim, streams[1]);
cud(cudaMalloc((void**)&d_Ipiv, sizeof(int) * dim));
cud(cudaMalloc((void**)&d_info, sizeof(int)));
cud(cudaMalloc((void**)&d_work, sizeof(double)*lwork));
cusolve(cusolverDnDgetrf(handle, dim, dim, d_values, dim, d_work, d_Ipiv, d_info));
cud(cudaMemcpyAsync(&info, d_info, sizeof(int), cudaMemcpyDeviceToHost, streams[0]));
cud(cudaStreamSynchronize(streams[0]));
if (info < 0){
printf("Error #%d after factorization, aborting.\n", info);
goto CLEAR;
}
cud(cudaStreamSynchronize(streams[1]));
cusolve(cusolverDnDgetrs(handle, CUBLAS_OP_N, dim, dim, d_values, dim, d_Ipiv, d_result, dim, d_info));
cud(cudaStreamSynchronize(streams[0]));
cud(cudaMemcpyAsync(&info, d_info, sizeof(int), cudaMemcpyDeviceToHost, streams[0]));
cud(cudaStreamSynchronize(streams[0]));
if (info < 0) printf("Error #%d after factorization, aborting.\n", info);
CLEAR:
cud(cudaFree(d_Ipiv));
cud(cudaFree(d_info));
cud(cudaFree(d_work));
}
void CUDAPipeline(Matrix& Xt, Matrix& Xval, Matrix& Yt, Matrix& Yval, Matrix& X, Matrix& Y,
int *d_remap, int newcols, cudaStream_t * streams, cublasHandle_t *cublasHandles,
cusolverDnHandle_t *cuSolverHandles, double *d_num_t, double *d_num_val, double** pointers)
{
//size_t newcols = 0;
//size_t cols = Xt.ncols(), rows = Xt.nrows();
//int *remap = (int*)malloc(sizeof(int)*cols);
////Create a mask
//for (int i = 0; i < cols; ++i)
// if (curr_mask[i]) {
// remap[newcols] = i;
// newcols++;
// }
size_t rows = rows = Xt.nrows();
dim3 * dims = getMaxGrid(Xt.nrows(), newcols);
//cud(cudaStreamSynchronize(streams[4]));
//cud(cudaMemcpyAsync((int*)pointers[s.d_remap], remap, sizeof(int)*cols, cudaMemcpyHostToDevice, streams[4]));
//cud(cudaStreamSynchronize(streams[4]));
//Filter X matrices
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
deviceFilter <<< dims[0], dims[1], 0, streams[0] >>> (newcols, rows, Xt.values, (double*)pointers[s.d_Xt_filtered], d_remap);
deviceFilter <<< dims[0], dims[1], 0, streams[1] >>> (newcols, rows, Xval.values, (double*)pointers[s.d_Xval_filtered], d_remap);
//Filter X matrices
cud(cudaStreamSynchronize(streams[2]));
cud(cudaStreamSynchronize(streams[3]));
cublas(cublasDgemm(cublasHandles[2], CUBLAS_OP_T, CUBLAS_OP_N, newcols, 1, rows, &one,
(double*)pointers[s.d_Xt_filtered], rows, Yt.values, rows, &zero, (double*)pointers[s.d_XTdotY_t], newcols));
cublas(cublasDgemm(cublasHandles[3], CUBLAS_OP_T, CUBLAS_OP_N, newcols, 1, rows, &one,
(double*)pointers[s.d_Xval_filtered], rows, Yval.values, rows, &zero, (double*)pointers[s.d_XTdotY_val], newcols));
//Synchronize for further gemm
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
cublas(cublasDgemm(cublasHandles[0], CUBLAS_OP_T, CUBLAS_OP_N, newcols, newcols, rows, &one,
(double*)pointers[s.d_Xt_filtered], rows, (double*)pointers[s.d_Xt_filtered], rows, &zero, (double*)pointers[s.d_XTdotX_t], newcols));
cublas(cublasDgemm(cublasHandles[1], CUBLAS_OP_T, CUBLAS_OP_N, newcols, newcols, rows, &one,
(double*)pointers[s.d_Xval_filtered], rows, (double*)pointers[s.d_Xval_filtered], rows, &zero, (double*)pointers[s.d_XTdotX_val], newcols));
if (newcols<10) {
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
cublas(cublasDgetrfBatched(cublasHandles[0], newcols, (double**)pointers[s.d_matrices], newcols, (int*)pointers[s.d_piv], (int*)pointers[s.d_info], 2));
cud(cudaStreamSynchronize(streams[0]));
cublas(cublasDgetriBatched(cublasHandles[0], newcols, (const double **)pointers[s.d_matrices], newcols,
(int*)pointers[s.d_piv], (double**)pointers[s.d_inverted], newcols, (int*)pointers[s.d_info], 2));
}
else {
cudaStream_t *streams_t = new cudaStream_t[2]{ streams[0], streams[4] };
cudaStream_t *streams_val = new cudaStream_t[2]{ streams[1], streams[5] };
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
cud(cudaStreamSynchronize(streams[4]));
cud(cudaStreamSynchronize(streams[5]));
//Synchronize again, this time for inversion
thread th1 = thread(invPipelineCUDA, newcols, ref(streams_t), cuSolverHandles[0], (double*)pointers[s.d_XTdotX_t], ref((double*&)pointers[s.d_XTdotX_inv_t]));
thread th2 = thread(invPipelineCUDA, newcols, ref(streams_val), cuSolverHandles[1], (double*)pointers[s.d_XTdotX_val], ref((double*&)pointers[s.d_XTdotX_inv_val]));
th1.join(); th2.join();
delete[] streams_t;
delete[] streams_val;
}
//Gemm to get coefs
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
cublas(cublasDgemm(cublasHandles[0], CUBLAS_OP_N, CUBLAS_OP_N, newcols, 1, newcols, &one,
(double*)pointers[s.d_XTdotX_inv_t], newcols, (double*)pointers[s.d_XTdotY_t], newcols, &zero, (double*)pointers[s.d_coefs_t], newcols));
cublas(cublasDgemm(cublasHandles[1], CUBLAS_OP_N, CUBLAS_OP_N, newcols, 1, newcols, &one,
(double*)pointers[s.d_XTdotX_inv_val], newcols, (double*)pointers[s.d_XTdotY_val], newcols, &zero, (double*)pointers[s.d_coefs_val], newcols));
//Gemm to infere Ys
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
cublas(cublasDgemm(cublasHandles[0], CUBLAS_OP_N, CUBLAS_OP_N, rows, 1, newcols, &one,
(double*)pointers[s.d_Xt_filtered], rows, (double*)pointers[s.d_coefs_t], rows, &zero, (double*)pointers[s.d_Y_hat_t], rows));
cublas(cublasDgemm(cublasHandles[1], CUBLAS_OP_N, CUBLAS_OP_N, rows, 1, newcols, &one,
(double*)pointers[s.d_Xval_filtered], rows, (double*)pointers[s.d_coefs_val], rows, &zero, (double*)pointers[s.d_Y_hat_val], rows));
//Get error
dim3 * rowDim = getOneDims(rows);
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
elementwise << < rowDim[0], rowDim[1], 0, streams[0] >> > ( (double*)pointers[s.d_Y_hat_t], Yt.values, rows, (double*)pointers[s.diff_t], diff<double>());
elementwise << < rowDim[0], rowDim[1], 0, streams[1] >> > ( (double*)pointers[s.d_Y_hat_val], Yval.values, rows, (double*)pointers[s.diff_val], diff<double>());
//Make it square
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
elementwise << < rowDim[0], rowDim[1], 0, streams[0] >> > ( (double*)pointers[s.diff_t], (double*)pointers[s.diff_t], rows, (double*)pointers[s.d_diff_t_sq], product<double>());
elementwise << < rowDim[0], rowDim[1], 0, streams[1] >> > ( (double*)pointers[s.diff_val], (double*)pointers[s.diff_val], rows, (double*)pointers[s.d_diff_val_sq], product<double>());
//Reduce
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
reduce((double*)pointers[s.d_diff_t_sq], rows, sum<double>(), &d_num_t, streams[0]);
reduce((double*)pointers[s.d_diff_val_sq], rows, sum<double>(), &d_num_val, streams[1]);
}
void cudaPrecomputeDenominators(Matrix& Yt, Matrix Yval, double &denom_t, double &denom_val, cudaStream_t *streams)
{
size_t rows = Yt.rows;
dim3 * rowDim = getOneDims(rows);
double *d_denom_t, *d_denom_val;
double *d_Yt_sq, *d_Yval_sq;
cud(cudaMalloc(&d_Yt_sq, sizeof(double)*rows));
cud(cudaMalloc(&d_Yval_sq, sizeof(double)*rows));
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
elementwise << < rowDim[0], rowDim[1], 0, streams[0] >> > (Yt.values, Yt.values, rows, d_Yt_sq, product<double>());
elementwise << < rowDim[0], rowDim[1], 0, streams[1] >> > (Yval.values, Yval.values, rows, d_Yval_sq, product<double>());
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
cud(cudaMalloc((void **)&d_denom_t, sizeof(double)));
cud(cudaMalloc((void **)&d_denom_val, sizeof(double)));
reduce(d_Yt_sq, rows, sum<double>(), &d_denom_t, streams[0]);
reduce(d_Yval_sq, rows, sum<double>(), &d_denom_val, streams[1]);
cud(cudaStreamSynchronize(streams[0]));
cud(cudaStreamSynchronize(streams[1]));
cud(cudaMemcpy(&denom_t, d_denom_t, sizeof(double), cudaMemcpyDeviceToHost));
cud(cudaMemcpy(&denom_val, d_denom_val, sizeof(double), cudaMemcpyDeviceToHost));
cud(cudaFree(d_denom_t));
cud(cudaFree(d_denom_val));
}
double** allocateDeviceMemory(size_t rows, size_t cols)
{
double** pointers = (double**)malloc(sizeof(double*) * 23);
cud(cudaMalloc((void**)&pointers[s.d_XTdotX_inv_t], sizeof(double)*cols*cols));
cud(cudaMalloc((void**)&pointers[s.d_XTdotX_inv_val], sizeof(double)*cols*cols));
cud(cudaMalloc((void**)&pointers[s.d_remap], sizeof(int)*cols));
cud(cudaMalloc((void**)&pointers[s.d_Xt_filtered], sizeof(double)*cols*rows));
cud(cudaMalloc((void**)&pointers[s.d_Xval_filtered], sizeof(double)*cols*rows));
cud(cudaMalloc((void**)&pointers[s.d_XTdotX_t], sizeof(double)*cols*cols));
cud(cudaMalloc((void**)&pointers[s.d_XTdotX_val], sizeof(double)*cols*cols));
cud(cudaMalloc((void**)&pointers[s.d_XTdotY_t], sizeof(double)*cols));
cud(cudaMalloc((void**)&pointers[s.d_XTdotY_val], sizeof(double)*cols));
cud(cudaMalloc((void**)&pointers[s.d_matrices], 2 * sizeof(double *)));
cud(cudaMalloc((void**)&pointers[s.d_inverted], 2 * sizeof(double *)));
double ** matrices = new double*[2]{ pointers[s.d_XTdotX_t], pointers[s.d_XTdotX_val] };
double ** inverted = new double*[2]{ pointers[s.d_XTdotX_inv_t], pointers[s.d_XTdotX_inv_val] };
cud(cudaMemcpy(pointers[s.d_matrices], matrices, sizeof(double*) * 2, cudaMemcpyHostToDevice));
cud(cudaMemcpy(pointers[s.d_inverted], inverted, sizeof(double*) * 2, cudaMemcpyHostToDevice));
cud(cudaMalloc((void**)&pointers[s.d_piv], 2 * cols * sizeof(int)));
cud(cudaMalloc((void**)&pointers[s.d_info], 2 * sizeof(int)));
cud(cudaMalloc((void**)&pointers[s.d_coefs_t], sizeof(double)*cols));
cud(cudaMalloc((void**)&pointers[s.d_coefs_val], sizeof(double)*cols));
cud(cudaMalloc((void**)&pointers[s.d_Y_hat_t], sizeof(double)*rows));
cud(cudaMalloc((void**)&pointers[s.d_Y_hat_val], sizeof(double)*rows));
cud(cudaMalloc((void**)&pointers[s.diff_t], sizeof(double)*rows));
cud(cudaMalloc((void**)&pointers[s.diff_val], sizeof(double)*rows));
cud(cudaMalloc((void**)&pointers[s.d_diff_t_sq], sizeof(double)*rows));
cud(cudaMalloc((void**)&pointers[s.d_diff_val_sq], sizeof(double)*rows));
cud(cudaMalloc((void**)&pointers[s.d_Yt_sq], sizeof(double)*rows));
cud(cudaMalloc((void**)&pointers[s.d_Yval_sq], sizeof(double)*rows));
delete[] matrices;
delete[] inverted;
return pointers;
}
void deallocateDeviceMemory(void** allocations)
{
for (int i = 0; i < 23; ++i) cud(cudaFree(allocations[i]));
}
void createCUDAWorkspace(cudaStream_t * &streams, cusolverDnHandle_t * &cusdolverHandles, cublasHandle_t * &cublasHandles)
{
streams = (cudaStream_t*)malloc(sizeof(cudaStream_t) * 6);
cusdolverHandles = (cusolverDnHandle_t*)malloc(sizeof(cusolverDnHandle_t) * 2);
cublasHandles = (cublasHandle_t*)malloc(sizeof(cublasHandle_t) * 4);
for (int i = 0; i < 6; ++i) {
streams[i] = cudaStream_t();
cudaStreamCreate(&streams[i]);
if (i<4) {
cublasHandles[i] = cublasHandle_t();
cublasCreate_v2(&cublasHandles[i]);
cublasSetStream_v2(cublasHandles[i], streams[i]);
if (i<2) {
cusdolverHandles[i] = cusolverDnHandle_t();
cusolverDnCreate(&cusdolverHandles[i]);
cusolverDnSetStream(cusdolverHandles[i], streams[i]);
}
}
}
}
void destroyCUDAWorkspace(cudaStream_t * &streams, cusolverDnHandle_t * &cusdolverHandles, cublasHandle_t * &cublasHandles)
{
for (int i = 0; i < 6; ++i) {
if (i < 4) {
if (i < 2)
cusolverDnDestroy(cusdolverHandles[i]);
cublasDestroy_v2(cublasHandles[i]);
}
cudaStreamDestroy(streams[i]);
}
free(cublasHandles);
free(cusdolverHandles);
free(streams);
}
|
5ca17555090059e2ae677fd2645903a4cb01623e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
/*1-20*/
#define BLOCK_WIDTH 2
#define BLOCK_SIZE 4
using namespace std;
/*
//BlockTranspose
__global__
void BlockTranspose(float *A_elements, int A_width, int A_height) {
__shared__ float blockA[BLOCK_WIDTH][BLOCK_WIDTH];
int baseIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
baseIdx += (blockIdx.y * BLOCK_SIZE + threadIdx.y) * A_width;
blockA[threadIdx.y][threadIdx.x] = A_elements[baseIdx];
A_elements[baseIdx] = blockA[threadIdx.x][threadIdx.y];
}
*/
/*BlockTranspose_Kernel*/
__global__
void BlockTranspose_Kernel(float *A_elements, int A_width, int A_height) {
__shared__ float blockA[BLOCK_WIDTH][BLOCK_WIDTH];
int baseIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
baseIdx += (blockIdx.y * BLOCK_SIZE + threadIdx.y) * A_width;
blockA[threadIdx.y][threadIdx.x] = A_elements[baseIdx];
__syncthreads();
A_elements[baseIdx] = blockA[threadIdx.x][threadIdx.y];
}
/*BlockTranspose_GPU*/
void BlockTranspose_GPU(float* h_A, int A_width, int A_height) {
int size = A_width * A_height * sizeof(float);
float *d_A;
hipMalloc(&d_A, size);
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
dim3 blockDim(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 gridDim(A_width / blockDim.x, A_height / blockDim.y);
hipLaunchKernelGGL(( BlockTranspose_Kernel) , dim3(gridDim), dim3(blockDim) , 0, 0, h_A, A_width, A_height);
hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
hipFree(d_A);
}
int main() {
//Host
float *h_A;
int A_width = 8;
int A_height = 8;
h_A = (float*)malloc(A_width*A_height * sizeof(float));
//Create
for (int i = 0; i < A_width*A_height; i++) {
h_A[i] = i + 1.0f;
}
//Print BlockTranspose
for (int i = 0; i < A_height; i++) {
for (int j = 0; j < A_width; j++) {
cout << h_A[i*A_width + j] << " ";
}
cout << endl;
}
cout << endl;
//BlockTranspose (Main)
chrono::time_point<chrono::system_clock> BlockTranspose_GPU_Start, BlockTranspose_GPU_End;
BlockTranspose_GPU_Start = chrono::system_clock::now();
BlockTranspose_GPU(h_A, A_width, A_height);
BlockTranspose_GPU_End = chrono::system_clock::now();
cout << "BlockTranspose_GPU: " << chrono::duration_cast<chrono::nanoseconds>(BlockTranspose_GPU_End - BlockTranspose_GPU_Start).count() << "ns." << endl;
//Print BlockTranspose
for (int i = 0; i < A_height; i++) {
for (int j = 0; j < A_width; j++) {
cout << h_A[i*A_width + j] << " ";
}
cout << endl;
}
cout << endl;
//Free
free(h_A);
return 0;
}
|
5ca17555090059e2ae677fd2645903a4cb01623e.cu
|
#include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
/*1-20*/
#define BLOCK_WIDTH 2
#define BLOCK_SIZE 4
using namespace std;
/*
//BlockTranspose
__global__
void BlockTranspose(float *A_elements, int A_width, int A_height) {
__shared__ float blockA[BLOCK_WIDTH][BLOCK_WIDTH];
int baseIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
baseIdx += (blockIdx.y * BLOCK_SIZE + threadIdx.y) * A_width;
blockA[threadIdx.y][threadIdx.x] = A_elements[baseIdx];
A_elements[baseIdx] = blockA[threadIdx.x][threadIdx.y];
}
*/
/*BlockTranspose_Kernel*/
__global__
void BlockTranspose_Kernel(float *A_elements, int A_width, int A_height) {
__shared__ float blockA[BLOCK_WIDTH][BLOCK_WIDTH];
int baseIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
baseIdx += (blockIdx.y * BLOCK_SIZE + threadIdx.y) * A_width;
blockA[threadIdx.y][threadIdx.x] = A_elements[baseIdx];
__syncthreads();
A_elements[baseIdx] = blockA[threadIdx.x][threadIdx.y];
}
/*BlockTranspose_GPU*/
void BlockTranspose_GPU(float* h_A, int A_width, int A_height) {
int size = A_width * A_height * sizeof(float);
float *d_A;
cudaMalloc(&d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
dim3 blockDim(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 gridDim(A_width / blockDim.x, A_height / blockDim.y);
BlockTranspose_Kernel <<< gridDim, blockDim >>> (h_A, A_width, A_height);
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
}
int main() {
//Host
float *h_A;
int A_width = 8;
int A_height = 8;
h_A = (float*)malloc(A_width*A_height * sizeof(float));
//Create
for (int i = 0; i < A_width*A_height; i++) {
h_A[i] = i + 1.0f;
}
//Print BlockTranspose
for (int i = 0; i < A_height; i++) {
for (int j = 0; j < A_width; j++) {
cout << h_A[i*A_width + j] << " ";
}
cout << endl;
}
cout << endl;
//BlockTranspose (Main)
chrono::time_point<chrono::system_clock> BlockTranspose_GPU_Start, BlockTranspose_GPU_End;
BlockTranspose_GPU_Start = chrono::system_clock::now();
BlockTranspose_GPU(h_A, A_width, A_height);
BlockTranspose_GPU_End = chrono::system_clock::now();
cout << "BlockTranspose_GPU: " << chrono::duration_cast<chrono::nanoseconds>(BlockTranspose_GPU_End - BlockTranspose_GPU_Start).count() << "ns." << endl;
//Print BlockTranspose
for (int i = 0; i < A_height; i++) {
for (int j = 0; j < A_width; j++) {
cout << h_A[i*A_width + j] << " ";
}
cout << endl;
}
cout << endl;
//Free
free(h_A);
return 0;
}
|
16ecb0e41e92e4d1c3bf927e148bf6ef3a34066a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <device_launch_parameters.h>
constexpr auto PI = 3.14f;
//umplerea a doua matrici cu date
__global__ void fill_array2D(float *a, float *b, int N, int M)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < M)
{
a[row*N + col] = powf(sinf(2 * PI * row / N), 2) + powf(cosf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie
b[row*N + col] = powf(cosf(2 * PI * row / N), 2) + powf(sinf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie
}
}
//umplerea a doi vectori cu date
__global__ void fill_array1D(float *a,float*b, int N, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int row = idx / N;
int col = idx % N;
if (row < N && col < M)
{
a[row*N + col] = powf(sinf(2 * PI*row/N), 2) + powf(cosf(2 * PI*col/N), 2);
b[row*N + col] = powf(cosf(2 * PI*row/M), 2) + powf(sinf(2 * PI*col/M), 2);
}
}
//suma a doi vectori, intr-un vector c
__global__ void sum_vectors1D(float *a, float *b, float *c, int N, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int row = idx/N;
int col = idx%N;
if (row < N && col < M)
{
c[row*N + col] = a[row*N + col] + b[row*N + col];
}
}
//suma a doua matrici, intr-o matrice c
__global__ void sum_vectors2D(float *a, float *b, float *c, int N,int M)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < M)
{
c[row*N + col] = a[row*N + col]+ b[row*N + col];
}
}
int main()
{
float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d;
const int N = 512;
const int M = 512;
size_t size = N * M * sizeof(float);
//alocare host
a_h = (float*)malloc(size);
b_h = (float*)malloc(size);
c_h = (float*)malloc(size);
//alocare device
hipMalloc((void**)&a_d, size);
hipMalloc((void**)&b_d, size);
hipMalloc((void**)&c_d, size);
//dimensiuni grid si threads
dim3 grid2D(16,16,1);
dim3 threads2D(32,32,1);
dim3 grid1D(512, 1, 1);
dim3 threads1D(512, 1, 1);
//fill arrays
hipLaunchKernelGGL(( fill_array2D) , dim3(grid2D), dim3(threads2D) , 0, 0, a_d, b_d,N, M);
hipLaunchKernelGGL(( sum_vectors2D) , dim3(grid2D), dim3(threads2D) , 0, 0, a_d, b_d, c_d, N, M);
hipLaunchKernelGGL(( fill_array1D) , dim3(grid1D), dim3(threads1D) , 0, 0, a_d, b_d, N, M);
hipLaunchKernelGGL(( sum_vectors1D) , dim3(grid1D), dim3(threads1D) , 0, 0, a_d, b_d, c_d, N, M);
//copy device data to host
hipMemcpy(a_h, a_d, size, hipMemcpyDeviceToHost);
hipMemcpy(b_h, b_d, size, hipMemcpyDeviceToHost);
hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
std::cout << c_h[i*N + j]<<" ";
}
std::cout << std::endl;
}
//cuda cleanup
free(a_h);
free(b_h);
free(c_h);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
}
|
16ecb0e41e92e4d1c3bf927e148bf6ef3a34066a.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <device_launch_parameters.h>
constexpr auto PI = 3.14f;
//umplerea a doua matrici cu date
__global__ void fill_array2D(float *a, float *b, int N, int M)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < M)
{
a[row*N + col] = powf(sinf(2 * PI * row / N), 2) + powf(cosf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie
b[row*N + col] = powf(cosf(2 * PI * row / N), 2) + powf(sinf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie
}
}
//umplerea a doi vectori cu date
__global__ void fill_array1D(float *a,float*b, int N, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int row = idx / N;
int col = idx % N;
if (row < N && col < M)
{
a[row*N + col] = powf(sinf(2 * PI*row/N), 2) + powf(cosf(2 * PI*col/N), 2);
b[row*N + col] = powf(cosf(2 * PI*row/M), 2) + powf(sinf(2 * PI*col/M), 2);
}
}
//suma a doi vectori, intr-un vector c
__global__ void sum_vectors1D(float *a, float *b, float *c, int N, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int row = idx/N;
int col = idx%N;
if (row < N && col < M)
{
c[row*N + col] = a[row*N + col] + b[row*N + col];
}
}
//suma a doua matrici, intr-o matrice c
__global__ void sum_vectors2D(float *a, float *b, float *c, int N,int M)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < M)
{
c[row*N + col] = a[row*N + col]+ b[row*N + col];
}
}
int main()
{
float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d;
const int N = 512;
const int M = 512;
size_t size = N * M * sizeof(float);
//alocare host
a_h = (float*)malloc(size);
b_h = (float*)malloc(size);
c_h = (float*)malloc(size);
//alocare device
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&b_d, size);
cudaMalloc((void**)&c_d, size);
//dimensiuni grid si threads
dim3 grid2D(16,16,1);
dim3 threads2D(32,32,1);
dim3 grid1D(512, 1, 1);
dim3 threads1D(512, 1, 1);
//fill arrays
fill_array2D <<< grid2D, threads2D >>> (a_d, b_d,N, M);
sum_vectors2D <<< grid2D, threads2D >>> (a_d, b_d, c_d, N, M);
fill_array1D <<< grid1D, threads1D >>> (a_d, b_d, N, M);
sum_vectors1D <<< grid1D, threads1D >>> (a_d, b_d, c_d, N, M);
//copy device data to host
cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b_h, b_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
std::cout << c_h[i*N + j]<<" ";
}
std::cout << std::endl;
}
//cuda cleanup
free(a_h);
free(b_h);
free(c_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
}
|
c5c944c353305a2288ec372b8bc7644b7e288584.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <c10/hip/HIPException.h>
#include <ATen/ATen.h>
__global__ void tanh_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
const float tanh_x = 2.0f / (1.0f + __expf(-2.0f * x[index])) - 1;
const float tanh_y = 2.0f / (1.0f + __expf(-2.0f * y[index])) - 1;
output[index] = tanh_x + tanh_y;
}
}
void tanh_add_cuda(const float* x, const float* y, float* output, int size) {
const int threads = 1024;
const int blocks = (size + threads - 1) / threads;
hipLaunchKernelGGL(( tanh_add_kernel), dim3(blocks), dim3(threads), 0, 0, x, y, output, size);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
|
c5c944c353305a2288ec372b8bc7644b7e288584.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <c10/cuda/CUDAException.h>
#include <ATen/ATen.h>
__global__ void tanh_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
const float tanh_x = 2.0f / (1.0f + __expf(-2.0f * x[index])) - 1;
const float tanh_y = 2.0f / (1.0f + __expf(-2.0f * y[index])) - 1;
output[index] = tanh_x + tanh_y;
}
}
void tanh_add_cuda(const float* x, const float* y, float* output, int size) {
const int threads = 1024;
const int blocks = (size + threads - 1) / threads;
tanh_add_kernel<<<blocks, threads>>>(x, y, output, size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
|
9201790119c030519a8f7c70ca559fc9ba83f1e7.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cmath>
#include "paddle/phi/kernels/funcs/fft.h"
#include "paddle/phi/kernels/funcs/fft_cache.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/assign_kernel.h"
#include "paddle/phi/kernels/complex_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/scale_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
namespace phi {
namespace funcs {
namespace detail {
// Use the optimized path to perform single R2C or C2R if transformation dim is
// supported by cuFFT
static bool use_optimized_fft_path(const std::vector<int64_t>& axes) {
// For performance reason, when axes starts with (0, 1), do not use the
// optimized path.
if (axes.size() > kMaxFFTNdim ||
(axes.size() >= 2 && axes[0] == 0 && axes[1] == 1)) {
return false;
} else {
return true;
}
}
static double fft_normalization_scale(FFTNormMode normalization,
const std::vector<int64_t>& sizes,
const std::vector<int64_t>& dims) {
// auto norm = static_cast<fft_norm_mode>(normalization);
if (normalization == FFTNormMode::none) {
return static_cast<double>(1.0);
}
int64_t signal_numel = 1;
for (auto dim : dims) {
signal_numel *= sizes[dim];
}
const double scale_denom = (normalization == FFTNormMode::by_sqrt_n)
? std::sqrt(signal_numel)
: static_cast<double>(signal_numel);
return static_cast<double>(1.0 / scale_denom);
}
template <typename T>
void exec_normalization(const phi::GPUContext& ctx,
const DenseTensor& in,
DenseTensor* out,
FFTNormMode normalization,
const std::vector<int64_t>& sizes,
const std::vector<int64_t>& axes) {
const double scale = fft_normalization_scale(normalization, sizes, axes);
if (scale != 1.0) {
ScaleKernel<T, phi::GPUContext>(ctx, in, scale, 0, true, out);
} else {
AssignKernel<phi::GPUContext>(ctx, in, out);
}
}
bool has_large_prime_factor(int64_t n) {
constexpr int64_t first_large_prime = 11;
const std::array<int64_t, 4> prime_radices{{2, 3, 5, 7}};
for (auto prime : prime_radices) {
if (n < first_large_prime) {
return false;
}
while (n % prime == 0) {
n /= prime;
}
}
return n != 1;
}
#if defined(PADDLE_WITH_CUDA)
inline bool use_cache(const int64_t* signal_size) {
bool using_cache = true;
int cufft_version;
phi::dynload::hipfftGetVersion(&cufft_version);
if (10300 <= cufft_version && cufft_version <= 10400) {
using_cache = std::none_of(
signal_size + 1, signal_size + kMaxDataNdim, [](int64_t dim_size) {
return has_large_prime_factor(dim_size);
});
}
return using_cache;
}
#elif defined(PADDLE_WITH_HIP)
inline bool use_cache(const int64_t* signal_size) { return true; }
#endif
// up to 3d unnormalized fft transform (c2r, r2c, c2c)
template <typename Ti, typename To>
void exec_fft(const phi::GPUContext& ctx,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
bool forward) {
const phi::DDim& in_sizes = x.dims();
const int ndim = in_sizes.size();
const int signal_ndim = axes.size();
const int batch_ndim = ndim - signal_ndim;
const phi::DDim& out_sizes = out->dims();
// make a dim permutation
std::vector<int> dim_permute(ndim);
std::iota(dim_permute.begin(), dim_permute.end(), 0);
std::vector<bool> is_transformed_dim(ndim, false);
for (const auto& d : axes) {
is_transformed_dim[d] = true;
}
const auto batch_end =
std::partition(dim_permute.begin(), dim_permute.end(), [&](size_t axis) {
return !is_transformed_dim[axis];
});
std::copy(axes.cbegin(), axes.cend(), batch_end);
// transpose input according to the permutation
DenseTensor transposed_input =
Transpose<Ti, phi::GPUContext>(ctx, x, dim_permute);
const phi::DDim transposed_input_shape = transposed_input.dims();
// batch size
int64_t batch_size = 1L;
for (int i = 0; i < batch_ndim; i++) {
batch_size *= transposed_input_shape[i];
}
// make an collapsed input: collapse batch axes for input
std::vector<int64_t> collapsed_input_shape_;
collapsed_input_shape_.reserve(1 + signal_ndim);
collapsed_input_shape_.emplace_back(batch_size);
for (int i = 0; i < signal_ndim; i++) {
collapsed_input_shape_.push_back(in_sizes[axes[i]]);
}
phi::DDim collapsed_input_shape = phi::make_ddim(collapsed_input_shape_);
transposed_input.Resize(collapsed_input_shape);
DenseTensor& collapsed_input = transposed_input;
// make a collapsed output
phi::DDim transposed_output_shape = out_sizes.transpose(dim_permute);
std::vector<int64_t> collapsed_output_shape_;
collapsed_output_shape_.reserve(1 + signal_ndim);
collapsed_output_shape_.emplace_back(batch_size);
for (int i = 0; i < signal_ndim; i++) {
collapsed_output_shape_.push_back(out_sizes[axes[i]]);
}
phi::DDim collapsed_output_shape = phi::make_ddim(collapsed_output_shape_);
DenseTensor collapsed_output;
collapsed_output.Resize(collapsed_output_shape);
ctx.Alloc<To>(&collapsed_output);
FFTConfigKey key =
create_fft_configkey(collapsed_input, collapsed_output, signal_ndim);
int64_t device_id = ctx.GetPlace().GetDeviceId();
FFTConfig* config = nullptr;
std::unique_ptr<FFTConfig> config_ = nullptr;
bool using_cache = use_cache(key.sizes_);
if (using_cache) {
FFTConfigCache& plan_cache = get_fft_plan_cache(device_id);
std::unique_lock<std::mutex> guard(plan_cache.mutex, std::defer_lock);
guard.lock();
config = &(plan_cache.lookup(key));
} else {
config_ = std::make_unique<FFTConfig>(key);
config = config_.get();
}
const int64_t workspace_size = static_cast<int64_t>(config->workspace_size());
DenseTensor workspace_tensor = Empty<uint8_t>(ctx, {workspace_size});
// prepare cufft for execution
#if defined(PADDLE_WITH_CUDA)
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::hipfftSetStream(config->plan(), ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::hipfftSetWorkArea(config->plan(), workspace_tensor.data()));
#elif defined(PADDLE_WITH_HIP)
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::hipfftSetStream(config->plan(), ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::hipfftSetWorkArea(config->plan(), workspace_tensor.data()));
#endif
// execution of fft plan
const FFTTransformType fft_type = config->transform_type();
if (fft_type == FFTTransformType::C2R && forward) {
ConjKernel<Ti, phi::GPUContext>(ctx, collapsed_input, &collapsed_input);
exec_plan(*config, collapsed_input.data(), collapsed_output.data(), false);
} else if (fft_type == FFTTransformType::R2C && !forward) {
exec_plan(*config, collapsed_input.data(), collapsed_output.data(), true);
ConjKernel<To, phi::GPUContext>(ctx, collapsed_output, &collapsed_output);
} else {
exec_plan(
*config, collapsed_input.data(), collapsed_output.data(), forward);
}
// resize for the collapsed output
collapsed_output.Resize(transposed_output_shape);
phi::DenseTensor& transposed_output = collapsed_output;
// reverse the transposition
std::vector<int> reverse_dim_permute(ndim);
for (int i = 0; i < ndim; i++) {
reverse_dim_permute[dim_permute[i]] = i;
}
TransposeKernel<To, phi::GPUContext>(
ctx, transposed_output, reverse_dim_permute, out);
}
} // namespace detail
template <typename Ti, typename To>
struct FFTC2CFunctor<phi::GPUContext, Ti, To> {
void operator()(const phi::GPUContext& ctx,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
FFTNormMode normalization,
bool forward) {
if (axes.empty()) {
AssignKernel<phi::GPUContext>(ctx, x, out);
return;
}
std::vector<int64_t> working_axes = axes;
std::sort(working_axes.begin(), working_axes.end());
std::vector<int64_t> first_dims;
size_t max_dims;
DenseTensor working_tensor = x; // shallow copy
while (true) {
max_dims = ::min(static_cast<size_t>(detail::kMaxFFTNdim),
working_axes.size());
first_dims.assign(working_axes.end() - max_dims, working_axes.end());
detail::exec_fft<Ti, To>(ctx, working_tensor, out, first_dims, forward);
working_axes.resize(working_axes.size() - max_dims);
first_dims.clear();
if (working_axes.empty()) {
break;
}
if (working_tensor.IsSharedWith(x)) {
working_tensor = std::move(*out);
*out = EmptyLike<Ti>(ctx, x);
} else {
std::swap(*out, working_tensor);
}
}
std::vector<int64_t> out_dims = phi::vectorize(x.dims());
detail::exec_normalization<To>(
ctx, *out, out, normalization, out_dims, axes);
}
};
template <typename Ti, typename To>
struct FFTC2RFunctor<phi::GPUContext, Ti, To> {
void operator()(const phi::GPUContext& ctx,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
FFTNormMode normalization,
bool forward) {
std::vector<int64_t> out_dims = phi::vectorize(out->dims());
if (detail::use_optimized_fft_path(axes)) {
DenseTensor x_copy = Assign(ctx, x);
detail::exec_fft<Ti, To>(ctx, x_copy, out, axes, forward);
} else {
DenseTensor c2c_result = EmptyLike<Ti, phi::GPUContext>(ctx, x);
FFTC2CFunctor<phi::GPUContext, Ti, Ti> c2c_functor;
c2c_functor(ctx,
x,
&c2c_result,
{axes.begin(), axes.end() - 1},
FFTNormMode::none,
forward);
detail::exec_fft<Ti, To>(ctx, c2c_result, out, {axes.back()}, forward);
}
detail::exec_normalization<To>(
ctx, *out, out, normalization, out_dims, axes);
}
};
template <typename Ti, typename To>
struct FFTR2CFunctor<phi::GPUContext, Ti, To> {
void operator()(const phi::GPUContext& ctx,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
FFTNormMode normalization,
bool forward) {
if (detail::use_optimized_fft_path(axes)) {
detail::exec_fft<Ti, To>(ctx, x, out, axes, forward);
} else {
DenseTensor r2c_result = EmptyLike<To, phi::GPUContext>(ctx, *out);
detail::exec_fft<Ti, To>(ctx, x, &r2c_result, {axes.back()}, forward);
FFTC2CFunctor<phi::GPUContext, To, To> fft_c2c_func;
fft_c2c_func(ctx,
r2c_result,
out,
{axes.begin(), axes.end() - 1},
FFTNormMode::none,
forward);
}
const auto in_dims = phi::vectorize(x.dims());
detail::exec_normalization<To>(
ctx, *out, out, normalization, in_dims, axes);
}
};
using complex64_t = phi::dtype::complex<float>;
using complex128_t = phi::dtype::complex<double>;
template struct FFTC2CFunctor<phi::GPUContext, complex64_t, complex64_t>;
template struct FFTC2CFunctor<phi::GPUContext, complex128_t, complex128_t>;
template struct FFTC2RFunctor<phi::GPUContext, complex64_t, float>;
template struct FFTC2RFunctor<phi::GPUContext, complex128_t, double>;
template struct FFTR2CFunctor<phi::GPUContext, float, complex64_t>;
template struct FFTR2CFunctor<phi::GPUContext, double, complex128_t>;
} // namespace funcs
} // namespace phi
|
9201790119c030519a8f7c70ca559fc9ba83f1e7.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cmath>
#include "paddle/phi/kernels/funcs/fft.h"
#include "paddle/phi/kernels/funcs/fft_cache.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/assign_kernel.h"
#include "paddle/phi/kernels/complex_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/scale_kernel.h"
#include "paddle/phi/kernels/transpose_kernel.h"
namespace phi {
namespace funcs {
namespace detail {
// Use the optimized path to perform single R2C or C2R if transformation dim is
// supported by cuFFT
static bool use_optimized_fft_path(const std::vector<int64_t>& axes) {
// For performance reason, when axes starts with (0, 1), do not use the
// optimized path.
if (axes.size() > kMaxFFTNdim ||
(axes.size() >= 2 && axes[0] == 0 && axes[1] == 1)) {
return false;
} else {
return true;
}
}
static double fft_normalization_scale(FFTNormMode normalization,
const std::vector<int64_t>& sizes,
const std::vector<int64_t>& dims) {
// auto norm = static_cast<fft_norm_mode>(normalization);
if (normalization == FFTNormMode::none) {
return static_cast<double>(1.0);
}
int64_t signal_numel = 1;
for (auto dim : dims) {
signal_numel *= sizes[dim];
}
const double scale_denom = (normalization == FFTNormMode::by_sqrt_n)
? std::sqrt(signal_numel)
: static_cast<double>(signal_numel);
return static_cast<double>(1.0 / scale_denom);
}
template <typename T>
void exec_normalization(const phi::GPUContext& ctx,
const DenseTensor& in,
DenseTensor* out,
FFTNormMode normalization,
const std::vector<int64_t>& sizes,
const std::vector<int64_t>& axes) {
const double scale = fft_normalization_scale(normalization, sizes, axes);
if (scale != 1.0) {
ScaleKernel<T, phi::GPUContext>(ctx, in, scale, 0, true, out);
} else {
AssignKernel<phi::GPUContext>(ctx, in, out);
}
}
bool has_large_prime_factor(int64_t n) {
constexpr int64_t first_large_prime = 11;
const std::array<int64_t, 4> prime_radices{{2, 3, 5, 7}};
for (auto prime : prime_radices) {
if (n < first_large_prime) {
return false;
}
while (n % prime == 0) {
n /= prime;
}
}
return n != 1;
}
#if defined(PADDLE_WITH_CUDA)
inline bool use_cache(const int64_t* signal_size) {
bool using_cache = true;
int cufft_version;
phi::dynload::cufftGetVersion(&cufft_version);
if (10300 <= cufft_version && cufft_version <= 10400) {
using_cache = std::none_of(
signal_size + 1, signal_size + kMaxDataNdim, [](int64_t dim_size) {
return has_large_prime_factor(dim_size);
});
}
return using_cache;
}
#elif defined(PADDLE_WITH_HIP)
inline bool use_cache(const int64_t* signal_size) { return true; }
#endif
// up to 3d unnormalized fft transform (c2r, r2c, c2c)
template <typename Ti, typename To>
void exec_fft(const phi::GPUContext& ctx,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
bool forward) {
const phi::DDim& in_sizes = x.dims();
const int ndim = in_sizes.size();
const int signal_ndim = axes.size();
const int batch_ndim = ndim - signal_ndim;
const phi::DDim& out_sizes = out->dims();
// make a dim permutation
std::vector<int> dim_permute(ndim);
std::iota(dim_permute.begin(), dim_permute.end(), 0);
std::vector<bool> is_transformed_dim(ndim, false);
for (const auto& d : axes) {
is_transformed_dim[d] = true;
}
const auto batch_end =
std::partition(dim_permute.begin(), dim_permute.end(), [&](size_t axis) {
return !is_transformed_dim[axis];
});
std::copy(axes.cbegin(), axes.cend(), batch_end);
// transpose input according to the permutation
DenseTensor transposed_input =
Transpose<Ti, phi::GPUContext>(ctx, x, dim_permute);
const phi::DDim transposed_input_shape = transposed_input.dims();
// batch size
int64_t batch_size = 1L;
for (int i = 0; i < batch_ndim; i++) {
batch_size *= transposed_input_shape[i];
}
// make an collapsed input: collapse batch axes for input
std::vector<int64_t> collapsed_input_shape_;
collapsed_input_shape_.reserve(1 + signal_ndim);
collapsed_input_shape_.emplace_back(batch_size);
for (int i = 0; i < signal_ndim; i++) {
collapsed_input_shape_.push_back(in_sizes[axes[i]]);
}
phi::DDim collapsed_input_shape = phi::make_ddim(collapsed_input_shape_);
transposed_input.Resize(collapsed_input_shape);
DenseTensor& collapsed_input = transposed_input;
// make a collapsed output
phi::DDim transposed_output_shape = out_sizes.transpose(dim_permute);
std::vector<int64_t> collapsed_output_shape_;
collapsed_output_shape_.reserve(1 + signal_ndim);
collapsed_output_shape_.emplace_back(batch_size);
for (int i = 0; i < signal_ndim; i++) {
collapsed_output_shape_.push_back(out_sizes[axes[i]]);
}
phi::DDim collapsed_output_shape = phi::make_ddim(collapsed_output_shape_);
DenseTensor collapsed_output;
collapsed_output.Resize(collapsed_output_shape);
ctx.Alloc<To>(&collapsed_output);
FFTConfigKey key =
create_fft_configkey(collapsed_input, collapsed_output, signal_ndim);
int64_t device_id = ctx.GetPlace().GetDeviceId();
FFTConfig* config = nullptr;
std::unique_ptr<FFTConfig> config_ = nullptr;
bool using_cache = use_cache(key.sizes_);
if (using_cache) {
FFTConfigCache& plan_cache = get_fft_plan_cache(device_id);
std::unique_lock<std::mutex> guard(plan_cache.mutex, std::defer_lock);
guard.lock();
config = &(plan_cache.lookup(key));
} else {
config_ = std::make_unique<FFTConfig>(key);
config = config_.get();
}
const int64_t workspace_size = static_cast<int64_t>(config->workspace_size());
DenseTensor workspace_tensor = Empty<uint8_t>(ctx, {workspace_size});
// prepare cufft for execution
#if defined(PADDLE_WITH_CUDA)
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cufftSetStream(config->plan(), ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cufftSetWorkArea(config->plan(), workspace_tensor.data()));
#elif defined(PADDLE_WITH_HIP)
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::hipfftSetStream(config->plan(), ctx.stream()));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::hipfftSetWorkArea(config->plan(), workspace_tensor.data()));
#endif
// execution of fft plan
const FFTTransformType fft_type = config->transform_type();
if (fft_type == FFTTransformType::C2R && forward) {
ConjKernel<Ti, phi::GPUContext>(ctx, collapsed_input, &collapsed_input);
exec_plan(*config, collapsed_input.data(), collapsed_output.data(), false);
} else if (fft_type == FFTTransformType::R2C && !forward) {
exec_plan(*config, collapsed_input.data(), collapsed_output.data(), true);
ConjKernel<To, phi::GPUContext>(ctx, collapsed_output, &collapsed_output);
} else {
exec_plan(
*config, collapsed_input.data(), collapsed_output.data(), forward);
}
// resize for the collapsed output
collapsed_output.Resize(transposed_output_shape);
phi::DenseTensor& transposed_output = collapsed_output;
// reverse the transposition
std::vector<int> reverse_dim_permute(ndim);
for (int i = 0; i < ndim; i++) {
reverse_dim_permute[dim_permute[i]] = i;
}
TransposeKernel<To, phi::GPUContext>(
ctx, transposed_output, reverse_dim_permute, out);
}
} // namespace detail
template <typename Ti, typename To>
struct FFTC2CFunctor<phi::GPUContext, Ti, To> {
void operator()(const phi::GPUContext& ctx,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
FFTNormMode normalization,
bool forward) {
if (axes.empty()) {
AssignKernel<phi::GPUContext>(ctx, x, out);
return;
}
std::vector<int64_t> working_axes = axes;
std::sort(working_axes.begin(), working_axes.end());
std::vector<int64_t> first_dims;
size_t max_dims;
DenseTensor working_tensor = x; // shallow copy
while (true) {
max_dims = std::min(static_cast<size_t>(detail::kMaxFFTNdim),
working_axes.size());
first_dims.assign(working_axes.end() - max_dims, working_axes.end());
detail::exec_fft<Ti, To>(ctx, working_tensor, out, first_dims, forward);
working_axes.resize(working_axes.size() - max_dims);
first_dims.clear();
if (working_axes.empty()) {
break;
}
if (working_tensor.IsSharedWith(x)) {
working_tensor = std::move(*out);
*out = EmptyLike<Ti>(ctx, x);
} else {
std::swap(*out, working_tensor);
}
}
std::vector<int64_t> out_dims = phi::vectorize(x.dims());
detail::exec_normalization<To>(
ctx, *out, out, normalization, out_dims, axes);
}
};
template <typename Ti, typename To>
struct FFTC2RFunctor<phi::GPUContext, Ti, To> {
void operator()(const phi::GPUContext& ctx,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
FFTNormMode normalization,
bool forward) {
std::vector<int64_t> out_dims = phi::vectorize(out->dims());
if (detail::use_optimized_fft_path(axes)) {
DenseTensor x_copy = Assign(ctx, x);
detail::exec_fft<Ti, To>(ctx, x_copy, out, axes, forward);
} else {
DenseTensor c2c_result = EmptyLike<Ti, phi::GPUContext>(ctx, x);
FFTC2CFunctor<phi::GPUContext, Ti, Ti> c2c_functor;
c2c_functor(ctx,
x,
&c2c_result,
{axes.begin(), axes.end() - 1},
FFTNormMode::none,
forward);
detail::exec_fft<Ti, To>(ctx, c2c_result, out, {axes.back()}, forward);
}
detail::exec_normalization<To>(
ctx, *out, out, normalization, out_dims, axes);
}
};
template <typename Ti, typename To>
struct FFTR2CFunctor<phi::GPUContext, Ti, To> {
void operator()(const phi::GPUContext& ctx,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
FFTNormMode normalization,
bool forward) {
if (detail::use_optimized_fft_path(axes)) {
detail::exec_fft<Ti, To>(ctx, x, out, axes, forward);
} else {
DenseTensor r2c_result = EmptyLike<To, phi::GPUContext>(ctx, *out);
detail::exec_fft<Ti, To>(ctx, x, &r2c_result, {axes.back()}, forward);
FFTC2CFunctor<phi::GPUContext, To, To> fft_c2c_func;
fft_c2c_func(ctx,
r2c_result,
out,
{axes.begin(), axes.end() - 1},
FFTNormMode::none,
forward);
}
const auto in_dims = phi::vectorize(x.dims());
detail::exec_normalization<To>(
ctx, *out, out, normalization, in_dims, axes);
}
};
using complex64_t = phi::dtype::complex<float>;
using complex128_t = phi::dtype::complex<double>;
template struct FFTC2CFunctor<phi::GPUContext, complex64_t, complex64_t>;
template struct FFTC2CFunctor<phi::GPUContext, complex128_t, complex128_t>;
template struct FFTC2RFunctor<phi::GPUContext, complex64_t, float>;
template struct FFTC2RFunctor<phi::GPUContext, complex128_t, double>;
template struct FFTR2CFunctor<phi::GPUContext, float, complex64_t>;
template struct FFTR2CFunctor<phi::GPUContext, double, complex128_t>;
} // namespace funcs
} // namespace phi
|
dd6c7afc00ea2aaec731a48dbecdac4a402dd16b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_bits.h"
#include "cuda_mfields.h"
#include "fields.hxx"
#include <psc.h>
// the loops include 2 levels of ghost cells
// they really only need -1:2 and -1:1, respectively (for 1st order)
// but always doing 2:2 seems cheap enough
#define BND 2
// OPT: precalc offset
__global__ static void
push_fields_E_xyz(DMFields dmflds, float dt, float cnx, float cny, float cnz, int gridz)
{
int bidx_z = blockIdx.z % gridz;
int p = blockIdx.z / gridz;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int iz = bidx_z * blockDim.z + threadIdx.z;
if (!(ix < dmflds.im(0) - 2 * (2-BND) - 1 &&
iy < dmflds.im(1) - 2 * (2-BND) - 1 &&
iz < dmflds.im(2) - 2 * (2-BND) - 1))
return;
ix -= BND;
iy -= BND;
iz -= BND;
DFields F = dmflds[p];
F(EX, ix,iy,iz) +=
cny * (F(HZ, ix,iy,iz) - F(HZ, ix,iy-1,iz)) -
cnz * (F(HY, ix,iy,iz) - F(HY, ix,iy,iz-1)) -
dt * F(JXI, ix,iy,iz);
F(EY, ix,iy,iz) +=
cnz * (F(HX, ix,iy,iz) - F(HX, ix,iy,iz-1)) -
cnx * (F(HZ, ix,iy,iz) - F(HZ, ix-1,iy,iz)) -
dt * F(JYI, ix,iy,iz);
F(EZ, ix,iy,iz) +=
cnx * (F(HY, ix,iy,iz) - F(HY, ix-1,iy,iz)) -
cny * (F(HX, ix,iy,iz) - F(HX, ix,iy-1,iz)) -
dt * F(JZI, ix,iy,iz);
}
__global__ static void
push_fields_H_xyz(DMFields dmflds, float cnx, float cny, float cnz, int gridz)
{
int bidx_z = blockIdx.z % gridz;
int p = blockIdx.z / gridz;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int iz = bidx_z * blockDim.z + threadIdx.z;
if (!(ix < dmflds.im(0) - 2 * (2-BND) - 1 &&
iy < dmflds.im(1) - 2 * (2-BND) - 1 &&
iz < dmflds.im(2) - 2 * (2-BND) - 1))
return;
ix -= BND;
iy -= BND;
iz -= BND;
DFields F = dmflds[p];
F(HX, ix,iy,iz) -=
cny * (F(EZ, ix,iy+1,iz) - F(EZ, ix,iy,iz)) -
cnz * (F(EY, ix,iy,iz+1) - F(EY, ix,iy,iz));
F(HY, ix,iy,iz) -=
cnz * (F(EX, ix,iy,iz+1) - F(EX, ix,iy,iz)) -
cnx * (F(EZ, ix+1,iy,iz) - F(EZ, ix,iy,iz));
F(HZ, ix,iy,iz) -=
cnx * (F(EY, ix+1,iy,iz) - F(EY, ix,iy,iz)) -
cny * (F(EX, ix,iy+1,iz) - F(EX, ix,iy,iz));
}
#define BLOCKSIZE_X 8
#define BLOCKSIZE_Y 8
#define BLOCKSIZE_Z 8
void
cuda_push_fields_E_xyz(struct cuda_mfields *cmflds, float dt)
{
if (cmflds->n_patches() == 0) {
return;
}
assert(cmflds->n_comps() == NR_FIELDS);
assert(cmflds->ib(0) == -BND);
assert(cmflds->ib(1) == -BND);
assert(cmflds->ib(2) == -BND);
float cnx = dt / cmflds->grid().domain.dx[0];
float cny = dt / cmflds->grid().domain.dx[1];
float cnz = dt / cmflds->grid().domain.dx[2];
int grid[3] = { (cmflds->im(0) + BLOCKSIZE_X - 1) / BLOCKSIZE_X,
(cmflds->im(1) + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(cmflds->im(2) + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
dim3 dimBlock(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);
dim3 dimGrid(grid[0], grid[1], grid[2] * cmflds->n_patches());
hipLaunchKernelGGL(( push_fields_E_xyz), dim3(dimGrid), dim3(dimBlock), 0, 0, *cmflds, dt, cnx, cny, cnz, grid[2]);
cuda_sync_if_enabled();
}
void
cuda_push_fields_H_xyz(struct cuda_mfields *cmflds, float dt)
{
if (cmflds->n_patches() == 0) {
return;
}
float cnx = dt / cmflds->grid().domain.dx[0];
float cny = dt / cmflds->grid().domain.dx[1];
float cnz = dt / cmflds->grid().domain.dx[2];
int grid[3] = { (cmflds->im(0) + BLOCKSIZE_X - 1) / BLOCKSIZE_X,
(cmflds->im(1) + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(cmflds->im(2) + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
dim3 dimBlock(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);
dim3 dimGrid(grid[0], grid[1], grid[2] * cmflds->n_patches());
hipLaunchKernelGGL(( push_fields_H_xyz), dim3(dimGrid), dim3(dimBlock), 0, 0, *cmflds, cnx, cny, cnz, grid[2]);
cuda_sync_if_enabled();
}
|
dd6c7afc00ea2aaec731a48dbecdac4a402dd16b.cu
|
#include "cuda_bits.h"
#include "cuda_mfields.h"
#include "fields.hxx"
#include <psc.h>
// the loops include 2 levels of ghost cells
// they really only need -1:2 and -1:1, respectively (for 1st order)
// but always doing 2:2 seems cheap enough
#define BND 2
// OPT: precalc offset
__global__ static void
push_fields_E_xyz(DMFields dmflds, float dt, float cnx, float cny, float cnz, int gridz)
{
int bidx_z = blockIdx.z % gridz;
int p = blockIdx.z / gridz;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int iz = bidx_z * blockDim.z + threadIdx.z;
if (!(ix < dmflds.im(0) - 2 * (2-BND) - 1 &&
iy < dmflds.im(1) - 2 * (2-BND) - 1 &&
iz < dmflds.im(2) - 2 * (2-BND) - 1))
return;
ix -= BND;
iy -= BND;
iz -= BND;
DFields F = dmflds[p];
F(EX, ix,iy,iz) +=
cny * (F(HZ, ix,iy,iz) - F(HZ, ix,iy-1,iz)) -
cnz * (F(HY, ix,iy,iz) - F(HY, ix,iy,iz-1)) -
dt * F(JXI, ix,iy,iz);
F(EY, ix,iy,iz) +=
cnz * (F(HX, ix,iy,iz) - F(HX, ix,iy,iz-1)) -
cnx * (F(HZ, ix,iy,iz) - F(HZ, ix-1,iy,iz)) -
dt * F(JYI, ix,iy,iz);
F(EZ, ix,iy,iz) +=
cnx * (F(HY, ix,iy,iz) - F(HY, ix-1,iy,iz)) -
cny * (F(HX, ix,iy,iz) - F(HX, ix,iy-1,iz)) -
dt * F(JZI, ix,iy,iz);
}
__global__ static void
push_fields_H_xyz(DMFields dmflds, float cnx, float cny, float cnz, int gridz)
{
int bidx_z = blockIdx.z % gridz;
int p = blockIdx.z / gridz;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int iz = bidx_z * blockDim.z + threadIdx.z;
if (!(ix < dmflds.im(0) - 2 * (2-BND) - 1 &&
iy < dmflds.im(1) - 2 * (2-BND) - 1 &&
iz < dmflds.im(2) - 2 * (2-BND) - 1))
return;
ix -= BND;
iy -= BND;
iz -= BND;
DFields F = dmflds[p];
F(HX, ix,iy,iz) -=
cny * (F(EZ, ix,iy+1,iz) - F(EZ, ix,iy,iz)) -
cnz * (F(EY, ix,iy,iz+1) - F(EY, ix,iy,iz));
F(HY, ix,iy,iz) -=
cnz * (F(EX, ix,iy,iz+1) - F(EX, ix,iy,iz)) -
cnx * (F(EZ, ix+1,iy,iz) - F(EZ, ix,iy,iz));
F(HZ, ix,iy,iz) -=
cnx * (F(EY, ix+1,iy,iz) - F(EY, ix,iy,iz)) -
cny * (F(EX, ix,iy+1,iz) - F(EX, ix,iy,iz));
}
#define BLOCKSIZE_X 8
#define BLOCKSIZE_Y 8
#define BLOCKSIZE_Z 8
void
cuda_push_fields_E_xyz(struct cuda_mfields *cmflds, float dt)
{
if (cmflds->n_patches() == 0) {
return;
}
assert(cmflds->n_comps() == NR_FIELDS);
assert(cmflds->ib(0) == -BND);
assert(cmflds->ib(1) == -BND);
assert(cmflds->ib(2) == -BND);
float cnx = dt / cmflds->grid().domain.dx[0];
float cny = dt / cmflds->grid().domain.dx[1];
float cnz = dt / cmflds->grid().domain.dx[2];
int grid[3] = { (cmflds->im(0) + BLOCKSIZE_X - 1) / BLOCKSIZE_X,
(cmflds->im(1) + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(cmflds->im(2) + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
dim3 dimBlock(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);
dim3 dimGrid(grid[0], grid[1], grid[2] * cmflds->n_patches());
push_fields_E_xyz<<<dimGrid, dimBlock>>>(*cmflds, dt, cnx, cny, cnz, grid[2]);
cuda_sync_if_enabled();
}
void
cuda_push_fields_H_xyz(struct cuda_mfields *cmflds, float dt)
{
if (cmflds->n_patches() == 0) {
return;
}
float cnx = dt / cmflds->grid().domain.dx[0];
float cny = dt / cmflds->grid().domain.dx[1];
float cnz = dt / cmflds->grid().domain.dx[2];
int grid[3] = { (cmflds->im(0) + BLOCKSIZE_X - 1) / BLOCKSIZE_X,
(cmflds->im(1) + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(cmflds->im(2) + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
dim3 dimBlock(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);
dim3 dimGrid(grid[0], grid[1], grid[2] * cmflds->n_patches());
push_fields_H_xyz<<<dimGrid, dimBlock>>>(*cmflds, cnx, cny, cnz, grid[2]);
cuda_sync_if_enabled();
}
|
b940f07b232d746be18b33719dd08f10f908f0ca.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/datamov_utils.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace bfmatcher
{
///////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// General funcs //////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Mask strategy
struct SingleMask
{
explicit SingleMask(const PtrStep& mask_) : mask(mask_) {}
__device__ __forceinline__ bool operator()(int queryIdx, int trainIdx) const
{
return mask.ptr(queryIdx)[trainIdx] != 0;
}
const PtrStep mask;
};
struct MaskCollection
{
explicit MaskCollection(PtrStep* maskCollection_) : maskCollection(maskCollection_) {}
__device__ __forceinline__ void nextMask()
{
curMask = *maskCollection++;
}
__device__ __forceinline__ bool operator()(int queryIdx, int trainIdx) const
{
uchar val;
return curMask.data == 0 || (ForceGlob<uchar>::Load(curMask.ptr(queryIdx), trainIdx, val), (val != 0));
}
const PtrStep* maskCollection;
PtrStep curMask;
};
class WithOutMask
{
public:
__device__ __forceinline__ void nextMask() const
{
}
__device__ __forceinline__ bool operator()(int queryIdx, int trainIdx) const
{
return true;
}
};
///////////////////////////////////////////////////////////////////////////////
// Reduce Sum
template <int BLOCK_DIM_X> struct SumReductor;
template <> struct SumReductor<16>
{
template <typename T> static __device__ void reduce(T* sdiff_row, T& mySum)
{
volatile T* smem = sdiff_row;
smem[threadIdx.x] = mySum;
if (threadIdx.x < 8)
{
smem[threadIdx.x] = mySum += smem[threadIdx.x + 8];
smem[threadIdx.x] = mySum += smem[threadIdx.x + 4];
smem[threadIdx.x] = mySum += smem[threadIdx.x + 2];
smem[threadIdx.x] = mySum += smem[threadIdx.x + 1];
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Distance
template <typename T> struct L1Dist
{
typedef int ResultType;
typedef int ValueType;
__device__ __forceinline__ L1Dist() : mySum(0) {}
__device__ __forceinline__ void reduceIter(int val1, int val2)
{
mySum = __sad(val1, val2, mySum);
}
template <int BLOCK_DIM_X> __device__ __forceinline__ void reduceAll(int* sdiff_row)
{
SumReductor<BLOCK_DIM_X>::reduce(sdiff_row, mySum);
}
__device__ __forceinline__ operator int() const
{
return mySum;
}
int mySum;
};
template <> struct L1Dist<float>
{
typedef float ResultType;
typedef float ValueType;
__device__ __forceinline__ L1Dist() : mySum(0.0f) {}
__device__ __forceinline__ void reduceIter(float val1, float val2)
{
mySum += fabs(val1 - val2);
}
template <int BLOCK_DIM_X> __device__ __forceinline__ void reduceAll(float* sdiff_row)
{
SumReductor<BLOCK_DIM_X>::reduce(sdiff_row, mySum);
}
__device__ __forceinline__ operator float() const
{
return mySum;
}
float mySum;
};
struct L2Dist
{
typedef float ResultType;
typedef float ValueType;
__device__ __forceinline__ L2Dist() : mySum(0.0f) {}
__device__ __forceinline__ void reduceIter(float val1, float val2)
{
float reg = val1 - val2;
mySum += reg * reg;
}
template <int BLOCK_DIM_X> __device__ __forceinline__ void reduceAll(float* sdiff_row)
{
SumReductor<BLOCK_DIM_X>::reduce(sdiff_row, mySum);
}
__device__ __forceinline__ operator float() const
{
return sqrtf(mySum);
}
float mySum;
};
struct HammingDist
{
typedef int ResultType;
typedef int ValueType;
__device__ __forceinline__ HammingDist() : mySum(0) {}
__device__ __forceinline__ void reduceIter(int val1, int val2)
{
mySum += __popc(val1 ^ val2);
}
template <int BLOCK_DIM_X> __device__ __forceinline__ void reduceAll(int* sdiff_row)
{
SumReductor<BLOCK_DIM_X>::reduce(sdiff_row, mySum);
}
__device__ __forceinline__ operator int() const
{
return mySum;
}
int mySum;
};
///////////////////////////////////////////////////////////////////////////////
// reduceDescDiff
template <int BLOCK_DIM_X, typename Dist, typename T>
__device__ void reduceDescDiff(const T* queryDescs, const T* trainDescs, int desc_len, Dist& dist, typename Dist::ResultType* sdiff_row)
{
for (int i = threadIdx.x; i < desc_len; i += BLOCK_DIM_X)
{
T trainVal;
ForceGlob<T>::Load(trainDescs, i, trainVal);
dist.reduceIter(queryDescs[i], trainVal);
}
dist.reduceAll<BLOCK_DIM_X>(sdiff_row);
}
///////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////// Match //////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// loadDescsVals
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN, typename T, typename U>
__device__ void loadDescsVals(const T* descs, int desc_len, U* queryVals, U* smem)
{
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < desc_len)
{
smem[tid] = descs[tid];
}
__syncthreads();
#pragma unroll
for (int i = threadIdx.x; i < MAX_DESCRIPTORS_LEN; i += BLOCK_DIM_X)
{
*queryVals = smem[i];
++queryVals;
}
}
///////////////////////////////////////////////////////////////////////////////
// reduceDescDiffCached
template <int N> struct UnrollDescDiff
{
template <typename Dist, typename T>
static __device__ void calcCheck(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len, Dist& dist, int ind)
{
if (ind < desc_len)
{
T trainVal;
ForceGlob<T>::Load(trainDescs, ind, trainVal);
dist.reduceIter(*queryVals, trainVal);
++queryVals;
UnrollDescDiff<N - 1>::calcCheck(queryVals, trainDescs, desc_len, dist, ind + blockDim.x);
}
}
template <typename Dist, typename T>
static __device__ void calcWithoutCheck(const typename Dist::ValueType* queryVals, const T* trainDescs, Dist& dist)
{
T trainVal;
ForceGlob<T>::Load(trainDescs, 0, trainVal);
dist.reduceIter(*queryVals, trainVal);
++queryVals;
trainDescs += blockDim.x;
UnrollDescDiff<N - 1>::calcWithoutCheck(queryVals, trainDescs, dist);
}
};
template <> struct UnrollDescDiff<0>
{
template <typename Dist, typename T>
static __device__ __forceinline__ void calcCheck(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len,
Dist& dist, int ind)
{
}
template <typename Dist, typename T>
static __device__ __forceinline__ void calcWithoutCheck(const typename Dist::ValueType* queryVals, const T* trainDescs, Dist& dist)
{
}
};
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN, bool WITH_OUT_CHECK> struct DescDiffCalculator;
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN>
struct DescDiffCalculator<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, false>
{
template <typename Dist, typename T>
static __device__ __forceinline__ void calc(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len, Dist& dist)
{
UnrollDescDiff<MAX_DESCRIPTORS_LEN / BLOCK_DIM_X>::calcCheck(queryVals, trainDescs, desc_len, dist, threadIdx.x);
}
};
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN>
struct DescDiffCalculator<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, true>
{
template <typename Dist, typename T>
static __device__ __forceinline__ void calc(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len, Dist& dist)
{
UnrollDescDiff<MAX_DESCRIPTORS_LEN / BLOCK_DIM_X>::calcWithoutCheck(queryVals, trainDescs + threadIdx.x, dist);
}
};
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN, bool DESC_LEN_EQ_MAX_LEN, typename Dist, typename T>
__device__ __forceinline__ void reduceDescDiffCached(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len, Dist& dist, typename Dist::ResultType* sdiff_row)
{
DescDiffCalculator<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, DESC_LEN_EQ_MAX_LEN>::calc(queryVals, trainDescs, desc_len, dist);
dist.reduceAll<BLOCK_DIM_X>(sdiff_row);
}
///////////////////////////////////////////////////////////////////////////////
// warpReduceMinIdxIdx
template <int BLOCK_DIM_Y> struct MinIdxIdxWarpReductor;
template <> struct MinIdxIdxWarpReductor<16>
{
template <typename T>
static __device__ void reduce(T& myMin, int& myBestTrainIdx, int& myBestImgIdx, volatile T* smin, volatile int* strainIdx, volatile int* simgIdx)
{
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < 8)
{
myMin = smin[tid];
myBestTrainIdx = strainIdx[tid];
myBestImgIdx = simgIdx[tid];
float reg = smin[tid + 8];
if (reg < myMin)
{
smin[tid] = myMin = reg;
strainIdx[tid] = myBestTrainIdx = strainIdx[tid + 8];
simgIdx[tid] = myBestImgIdx = simgIdx[tid + 8];
}
reg = smin[tid + 4];
if (reg < myMin)
{
smin[tid] = myMin = reg;
strainIdx[tid] = myBestTrainIdx = strainIdx[tid + 4];
simgIdx[tid] = myBestImgIdx = simgIdx[tid + 4];
}
reg = smin[tid + 2];
if (reg < myMin)
{
smin[tid] = myMin = reg;
strainIdx[tid] = myBestTrainIdx = strainIdx[tid + 2];
simgIdx[tid] = myBestImgIdx = simgIdx[tid + 2];
}
reg = smin[tid + 1];
if (reg < myMin)
{
smin[tid] = myMin = reg;
strainIdx[tid] = myBestTrainIdx = strainIdx[tid + 1];
simgIdx[tid] = myBestImgIdx = simgIdx[tid + 1];
}
}
}
};
///////////////////////////////////////////////////////////////////////////////
// findBestMatch
template <int BLOCK_DIM_Y, typename T>
__device__ void findBestMatch(T& myMin, int& myBestTrainIdx, int& myBestImgIdx, T* smin, int* strainIdx, int* simgIdx)
{
if (threadIdx.x == 0)
{
smin[threadIdx.y] = myMin;
strainIdx[threadIdx.y] = myBestTrainIdx;
simgIdx[threadIdx.y] = myBestImgIdx;
}
__syncthreads();
MinIdxIdxWarpReductor<BLOCK_DIM_Y>::reduce(myMin, myBestTrainIdx, myBestImgIdx, smin, strainIdx, simgIdx);
}
///////////////////////////////////////////////////////////////////////////////
// ReduceDescCalculator
template <int BLOCK_DIM_X, typename T> struct ReduceDescCalculatorSimple
{
__device__ __forceinline__ void prepare(const T* queryDescs_, int, void*)
{
queryDescs = queryDescs_;
}
template <typename Dist>
__device__ __forceinline__ void calc(const T* trainDescs, int desc_len, Dist& dist, typename Dist::ResultType* sdiff_row) const
{
reduceDescDiff<BLOCK_DIM_X>(queryDescs, trainDescs, desc_len, dist, sdiff_row);
}
const T* queryDescs;
};
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN, bool DESC_LEN_EQ_MAX_LEN, typename T, typename U>
struct ReduceDescCalculatorCached
{
__device__ __forceinline__ void prepare(const T* queryDescs, int desc_len, U* smem)
{
loadDescsVals<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN>(queryDescs, desc_len, queryVals, smem);
}
template <typename Dist>
__device__ __forceinline__ void calc(const T* trainDescs, int desc_len, Dist& dist, typename Dist::ResultType* sdiff_row) const
{
reduceDescDiffCached<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, DESC_LEN_EQ_MAX_LEN>(queryVals, trainDescs, desc_len, dist, sdiff_row);
}
U queryVals[MAX_DESCRIPTORS_LEN / BLOCK_DIM_X];
};
///////////////////////////////////////////////////////////////////////////////
// matchDescs loop
template <typename Dist, typename ReduceDescCalculator, typename T, typename Mask>
__device__ void matchDescs(int queryIdx, int imgIdx, const DevMem2D_<T>& trainDescs_,
const Mask& m, const ReduceDescCalculator& reduceDescCalc,
typename Dist::ResultType& myMin, int& myBestTrainIdx, int& myBestImgIdx, typename Dist::ResultType* sdiff_row)
{
for (int trainIdx = threadIdx.y; trainIdx < trainDescs_.rows; trainIdx += blockDim.y)
{
if (m(queryIdx, trainIdx))
{
const T* trainDescs = trainDescs_.ptr(trainIdx);
Dist dist;
reduceDescCalc.calc(trainDescs, trainDescs_.cols, dist, sdiff_row);
if (threadIdx.x == 0)
{
if (dist < myMin)
{
myMin = dist;
myBestTrainIdx = trainIdx;
myBestImgIdx = imgIdx;
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////
// Train collection loop strategy
template <typename T> struct SingleTrain
{
explicit SingleTrain(const DevMem2D_<T>& trainDescs_) : trainDescs(trainDescs_)
{
}
template <typename Dist, typename ReduceDescCalculator, typename Mask>
__device__ __forceinline__ void loop(int queryIdx, Mask& m, const ReduceDescCalculator& reduceDescCalc,
typename Dist::ResultType& myMin, int& myBestTrainIdx, int& myBestImgIdx, typename Dist::ResultType* sdiff_row) const
{
matchDescs<Dist>(queryIdx, 0, trainDescs, m, reduceDescCalc, myMin, myBestTrainIdx, myBestImgIdx, sdiff_row);
}
__device__ __forceinline__ int desc_len() const
{
return trainDescs.cols;
}
const DevMem2D_<T> trainDescs;
};
template <typename T> struct TrainCollection
{
TrainCollection(const DevMem2D_<T>* trainCollection_, int nImg_, int desclen_) :
trainCollection(trainCollection_), nImg(nImg_), desclen(desclen_)
{
}
template <typename Dist, typename ReduceDescCalculator, typename Mask>
__device__ void loop(int queryIdx, Mask& m, const ReduceDescCalculator& reduceDescCalc,
typename Dist::ResultType& myMin, int& myBestTrainIdx, int& myBestImgIdx, typename Dist::ResultType* sdiff_row) const
{
for (int imgIdx = 0; imgIdx < nImg; ++imgIdx)
{
const DevMem2D_<T> trainDescs = trainCollection[imgIdx];
m.nextMask();
matchDescs<Dist>(queryIdx, imgIdx, trainDescs, m, reduceDescCalc, myMin, myBestTrainIdx, myBestImgIdx, sdiff_row);
}
}
__device__ __forceinline__ int desc_len() const
{
return desclen;
}
const DevMem2D_<T>* trainCollection;
int nImg;
int desclen;
};
///////////////////////////////////////////////////////////////////////////////
// Match kernel
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename ReduceDescCalculator, typename Dist, typename T, typename Train, typename Mask>
__global__ void match(const PtrStep_<T> queryDescs_, const Train train, const Mask mask, int* trainIdx, int* imgIdx, float* distance)
{
__shared__ typename Dist::ResultType smem[BLOCK_DIM_X * BLOCK_DIM_Y];
const int queryIdx = blockIdx.x;
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::ResultType myMin = numeric_limits<typename Dist::ResultType>::max();
{
typename Dist::ResultType* sdiff_row = smem + BLOCK_DIM_X * threadIdx.y;
Mask m = mask;
ReduceDescCalculator reduceDescCalc;
reduceDescCalc.prepare(queryDescs_.ptr(queryIdx), train.desc_len(), (typename Dist::ValueType*)smem);
train.template loop<Dist>(queryIdx, m, reduceDescCalc, myMin, myBestTrainIdx, myBestImgIdx, sdiff_row);
}
__syncthreads();
typename Dist::ResultType* smin = smem;
int* strainIdx = (int*)(smin + BLOCK_DIM_Y);
int* simgIdx = strainIdx + BLOCK_DIM_Y;
findBestMatch<BLOCK_DIM_Y>(myMin, myBestTrainIdx, myBestImgIdx, smin, strainIdx, simgIdx);
if (threadIdx.x == 0 && threadIdx.y == 0)
{
imgIdx[queryIdx] = myBestImgIdx;
trainIdx[queryIdx] = myBestTrainIdx;
distance[queryIdx] = myMin;
}
}
///////////////////////////////////////////////////////////////////////////////
// Match kernel callers
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Train, typename Mask>
void matchSimple_caller(const DevMem2D_<T>& queryDescs, const Train& train,
const Mask& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, hipStream_t stream)
{
StaticAssert<BLOCK_DIM_Y <= 64>::check(); // blockDimY vals must reduce by warp
dim3 grid(queryDescs.rows, 1, 1);
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
hipLaunchKernelGGL(( match<BLOCK_DIM_X, BLOCK_DIM_Y, ReduceDescCalculatorSimple<BLOCK_DIM_X, T>, Dist, T>)
, dim3(grid), dim3(threads), 0, stream, queryDescs, train, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int MAX_DESCRIPTORS_LEN, bool DESC_LEN_EQ_MAX_LEN, typename Dist, typename T, typename Train, typename Mask>
void matchCached_caller(const DevMem2D_<T>& queryDescs, const Train& train,
const Mask& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, hipStream_t stream)
{
StaticAssert<BLOCK_DIM_Y <= 64>::check(); // blockDimY vals must reduce by warp
StaticAssert<BLOCK_DIM_X * BLOCK_DIM_Y >= MAX_DESCRIPTORS_LEN>::check(); // block size must be greter than descriptors length
StaticAssert<MAX_DESCRIPTORS_LEN % BLOCK_DIM_X == 0>::check(); // max descriptors length must divide to blockDimX
dim3 grid(queryDescs.rows, 1, 1);
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
hipLaunchKernelGGL(( match<BLOCK_DIM_X, BLOCK_DIM_Y, ReduceDescCalculatorCached<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, DESC_LEN_EQ_MAX_LEN, T, typename Dist::ValueType>, Dist, T>)
, dim3(grid), dim3(threads), 0, stream, queryDescs, train, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match caller
template <typename Dist, typename T, typename Train, typename Mask>
void matchDispatcher(const DevMem2D_<T>& queryDescs, const Train& train,
const Mask& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
bool cc_12, hipStream_t stream)
{
if (queryDescs.cols < 64)
matchCached_caller<16, 16, 64, false, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols == 64)
matchCached_caller<16, 16, 64, true, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols < 128)
matchCached_caller<16, 16, 128, false, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols == 128 && cc_12)
matchCached_caller<16, 16, 128, true, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols < 256 && cc_12)
matchCached_caller<16, 16, 256, false, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols == 256 && cc_12)
matchCached_caller<16, 16, 256, true, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else
matchSimple_caller<16, 16, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
}
template <typename T>
void matchSingleL1_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
bool cc_12, hipStream_t stream)
{
SingleTrain<T> train((DevMem2D_<T>)trainDescs);
if (mask.data)
{
SingleMask m(mask);
matchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, train, m, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchSingleL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template <typename T>
void matchSingleL2_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
bool cc_12, hipStream_t stream)
{
SingleTrain<T> train((DevMem2D_<T>)trainDescs);
if (mask.data)
{
SingleMask m(mask);
matchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, train, m, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchSingleL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template <typename T>
void matchSingleHamming_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
bool cc_12, hipStream_t stream)
{
SingleTrain<T> train((DevMem2D_<T>)trainDescs);
if (mask.data)
{
SingleMask m(mask);
matchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, train, m, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchSingleHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchSingleHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template <typename T>
void matchCollectionL1_gpu(const DevMem2D& queryDescs, const DevMem2D& trainCollection,
const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx,
const DevMem2Df& distance, bool cc_12, hipStream_t stream)
{
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, queryDescs.cols);
if (maskCollection.data)
{
MaskCollection mask(maskCollection.data);
matchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, train, mask, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchCollectionL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template <typename T>
void matchCollectionL2_gpu(const DevMem2D& queryDescs, const DevMem2D& trainCollection,
const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx,
const DevMem2Df& distance, bool cc_12, hipStream_t stream)
{
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, queryDescs.cols);
if (maskCollection.data)
{
MaskCollection mask(maskCollection.data);
matchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, train, mask, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchCollectionL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template <typename T>
void matchCollectionHamming_gpu(const DevMem2D& queryDescs, const DevMem2D& trainCollection,
const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx,
const DevMem2Df& distance, bool cc_12, hipStream_t stream)
{
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, queryDescs.cols);
if (maskCollection.data)
{
MaskCollection mask(maskCollection.data);
matchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, train, mask, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchCollectionHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
template void matchCollectionHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, hipStream_t stream);
///////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////// Knn Match ////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Calc distance kernel
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>
__global__ void calcDistance(const PtrStep_<T> queryDescs_, const DevMem2D_<T> trainDescs_, const Mask mask, PtrStepf distance)
{
__shared__ typename Dist::ResultType sdiff[BLOCK_DIM_X * BLOCK_DIM_Y];
typename Dist::ResultType* sdiff_row = sdiff + BLOCK_DIM_X * threadIdx.y;
const int queryIdx = blockIdx.x;
const T* queryDescs = queryDescs_.ptr(queryIdx);
const int trainIdx = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (trainIdx < trainDescs_.rows)
{
const T* trainDescs = trainDescs_.ptr(trainIdx);
typename Dist::ResultType myDist = numeric_limits<typename Dist::ResultType>::max();
if (mask(queryIdx, trainIdx))
{
Dist dist;
reduceDescDiff<BLOCK_DIM_X>(queryDescs, trainDescs, trainDescs_.cols, dist, sdiff_row);
if (threadIdx.x == 0)
myDist = dist;
}
if (threadIdx.x == 0)
distance.ptr(queryIdx)[trainIdx] = myDist;
}
}
///////////////////////////////////////////////////////////////////////////////
// Calc distance kernel caller
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>
void calcDistance_caller(const DevMem2D_<T>& queryDescs, const DevMem2D_<T>& trainDescs,
const Mask& mask, const DevMem2Df& distance, hipStream_t stream)
{
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 grid(queryDescs.rows, divUp(trainDescs.rows, BLOCK_DIM_Y), 1);
hipLaunchKernelGGL(( calcDistance<BLOCK_DIM_X, BLOCK_DIM_Y, Dist, T>), dim3(grid), dim3(threads), 0, stream,
queryDescs, trainDescs, mask, distance);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// warpReduceMinIdx
template <int BLOCK_SIZE, typename T>
__device__ void warpReduceMinIdx(volatile T* sdist, volatile int* strainIdx, T& myMin, int tid)
{
if (tid < 32)
{
if (BLOCK_SIZE >= 64)
{
T reg = sdist[tid + 32];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 32];
}
}
if (BLOCK_SIZE >= 32)
{
T reg = sdist[tid + 16];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 16];
}
}
if (BLOCK_SIZE >= 16)
{
T reg = sdist[tid + 8];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 8];
}
}
if (BLOCK_SIZE >= 8)
{
T reg = sdist[tid + 4];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 4];
}
}
if (BLOCK_SIZE >= 4)
{
T reg = sdist[tid + 2];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 2];
}
}
if (BLOCK_SIZE >= 2)
{
T reg = sdist[tid + 1];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 1];
}
}
}
}
template <int BLOCK_SIZE, typename T>
__device__ void reduceMinIdx(const T* dist, int n, T* sdist, int* strainIdx)
{
const int tid = threadIdx.x;
T myMin = numeric_limits<T>::max();
int myMinIdx = -1;
for (int i = tid; i < n; i += BLOCK_SIZE)
{
T reg = dist[i];
if (reg < myMin)
{
myMin = reg;
myMinIdx = i;
}
}
sdist[tid] = myMin;
strainIdx[tid] = myMinIdx;
__syncthreads();
if (BLOCK_SIZE >= 512 && tid < 256)
{
T reg = sdist[tid + 256];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 256];
}
__syncthreads();
}
if (BLOCK_SIZE >= 256 && tid < 128)
{
T reg = sdist[tid + 128];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 128];
}
__syncthreads();
}
if (BLOCK_SIZE >= 128 && tid < 64)
{
T reg = sdist[tid + 64];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 64];
}
__syncthreads();
}
warpReduceMinIdx<BLOCK_SIZE>(sdist, strainIdx, myMin, tid);
}
///////////////////////////////////////////////////////////////////////////////
// find knn match kernel
template <int BLOCK_SIZE> __global__ void findBestMatch(DevMem2Df allDist_, int i, PtrStepi trainIdx_, PtrStepf distance_)
{
const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64;
__shared__ float sdist[SMEM_SIZE];
__shared__ int strainIdx[SMEM_SIZE];
const int queryIdx = blockIdx.x;
float* allDist = allDist_.ptr(queryIdx);
int* trainIdx = trainIdx_.ptr(queryIdx);
float* distance = distance_.ptr(queryIdx);
reduceMinIdx<BLOCK_SIZE>(allDist, allDist_.cols, sdist, strainIdx);
if (threadIdx.x == 0)
{
float dist = sdist[0];
if (dist < numeric_limits<float>::max())
{
int bestIdx = strainIdx[0];
allDist[bestIdx] = numeric_limits<float>::max();
trainIdx[i] = bestIdx;
distance[i] = dist;
}
}
}
///////////////////////////////////////////////////////////////////////////////
// find knn match kernel caller
template <int BLOCK_SIZE>
void findKnnMatch_caller(int knn, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream)
{
dim3 threads(BLOCK_SIZE, 1, 1);
dim3 grid(trainIdx.rows, 1, 1);
for (int i = 0; i < knn; ++i)
{
hipLaunchKernelGGL(( findBestMatch<BLOCK_SIZE>), dim3(grid), dim3(threads), 0, stream, allDist, i, trainIdx, distance);
cudaSafeCall( hipGetLastError() );
}
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// knn match caller
template <typename Dist, typename T, typename Mask>
void calcDistanceDispatcher(const DevMem2D_<T>& queryDescs, const DevMem2D_<T>& trainDescs, const Mask& mask, const DevMem2Df& allDist, hipStream_t stream)
{
calcDistance_caller<16, 16, Dist>(queryDescs, trainDescs, mask, allDist, stream);
}
void findKnnMatchDispatcher(int knn, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream)
{
findKnnMatch_caller<256>(knn, trainIdx, distance, allDist, stream);
}
template <typename T>
void knnMatchL1_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream)
{
if (mask.data)
{
calcDistanceDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs, SingleMask(mask), allDist, stream);
}
else
{
calcDistanceDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs, WithOutMask(), allDist, stream);
}
findKnnMatchDispatcher(knn, trainIdx, distance, allDist, stream);
}
template void knnMatchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template <typename T>
void knnMatchL2_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream)
{
if (mask.data)
{
calcDistanceDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
SingleMask(mask), allDist, stream);
}
else
{
calcDistanceDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
WithOutMask(), allDist, stream);
}
findKnnMatchDispatcher(knn, trainIdx, distance, allDist, stream);
}
template void knnMatchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template <typename T>
void knnMatchHamming_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream)
{
if (mask.data)
{
calcDistanceDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
SingleMask(mask), allDist, stream);
}
else
{
calcDistanceDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
WithOutMask(), allDist, stream);
}
findKnnMatchDispatcher(knn, trainIdx, distance, allDist, stream);
}
template void knnMatchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
template void knnMatchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, hipStream_t stream);
///////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// Radius Match //////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Radius Match kernel
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>
__global__ void radiusMatch(const PtrStep_<T> queryDescs_, const DevMem2D_<T> trainDescs_,
float maxDistance, const Mask mask, DevMem2Di trainIdx_, unsigned int* nMatches, PtrStepf distance)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ typename Dist::ResultType smem[BLOCK_DIM_X * BLOCK_DIM_Y];
typename Dist::ResultType* sdiff_row = smem + BLOCK_DIM_X * threadIdx.y;
const int queryIdx = blockIdx.x;
const T* queryDescs = queryDescs_.ptr(queryIdx);
const int trainIdx = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (trainIdx < trainDescs_.rows)
{
const T* trainDescs = trainDescs_.ptr(trainIdx);
if (mask(queryIdx, trainIdx))
{
Dist dist;
reduceDescDiff<BLOCK_DIM_X>(queryDescs, trainDescs, trainDescs_.cols, dist, sdiff_row);
if (threadIdx.x == 0)
{
if (dist < maxDistance)
{
unsigned int i = atomicInc(nMatches + queryIdx, (unsigned int) -1);
if (i < trainIdx_.cols)
{
distance.ptr(queryIdx)[i] = dist;
trainIdx_.ptr(queryIdx)[i] = trainIdx;
}
}
}
}
}
#endif
}
///////////////////////////////////////////////////////////////////////////////
// Radius Match kernel caller
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>
void radiusMatch_caller(const DevMem2D_<T>& queryDescs, const DevMem2D_<T>& trainDescs,
float maxDistance, const Mask& mask, const DevMem2Di& trainIdx, unsigned int* nMatches,
const DevMem2Df& distance, hipStream_t stream)
{
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 grid(queryDescs.rows, divUp(trainDescs.rows, BLOCK_DIM_Y), 1);
hipLaunchKernelGGL(( radiusMatch<BLOCK_DIM_X, BLOCK_DIM_Y, Dist, T>), dim3(grid), dim3(threads), 0, stream,
queryDescs, trainDescs, maxDistance, mask, trainIdx, nMatches, distance);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Radius Match caller
template <typename Dist, typename T, typename Mask>
void radiusMatchDispatcher(const DevMem2D_<T>& queryDescs, const DevMem2D_<T>& trainDescs,
float maxDistance, const Mask& mask, const DevMem2Di& trainIdx, unsigned int* nMatches,
const DevMem2Df& distance, hipStream_t stream)
{
radiusMatch_caller<16, 16, Dist>(queryDescs, trainDescs, maxDistance, mask,
trainIdx, nMatches, distance, stream);
}
template <typename T>
void radiusMatchL1_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance,
const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream)
{
if (mask.data)
{
radiusMatchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, SingleMask(mask), trainIdx, nMatches, distance, stream);
}
else
{
radiusMatchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, WithOutMask(), trainIdx, nMatches, distance, stream);
}
}
template void radiusMatchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template <typename T>
void radiusMatchL2_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance,
const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream)
{
if (mask.data)
{
radiusMatchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, SingleMask(mask), trainIdx, nMatches, distance, stream);
}
else
{
radiusMatchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, WithOutMask(), trainIdx, nMatches, distance, stream);
}
}
template void radiusMatchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template <typename T>
void radiusMatchHamming_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance,
const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream)
{
if (mask.data)
{
radiusMatchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, SingleMask(mask), trainIdx, nMatches, distance, stream);
}
else
{
radiusMatchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, WithOutMask(), trainIdx, nMatches, distance, stream);
}
}
template void radiusMatchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
template void radiusMatchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, hipStream_t stream);
}}}
|
b940f07b232d746be18b33719dd08f10f908f0ca.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/datamov_utils.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace bfmatcher
{
///////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// General funcs //////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Mask strategy
struct SingleMask
{
explicit SingleMask(const PtrStep& mask_) : mask(mask_) {}
__device__ __forceinline__ bool operator()(int queryIdx, int trainIdx) const
{
return mask.ptr(queryIdx)[trainIdx] != 0;
}
const PtrStep mask;
};
struct MaskCollection
{
explicit MaskCollection(PtrStep* maskCollection_) : maskCollection(maskCollection_) {}
__device__ __forceinline__ void nextMask()
{
curMask = *maskCollection++;
}
__device__ __forceinline__ bool operator()(int queryIdx, int trainIdx) const
{
uchar val;
return curMask.data == 0 || (ForceGlob<uchar>::Load(curMask.ptr(queryIdx), trainIdx, val), (val != 0));
}
const PtrStep* maskCollection;
PtrStep curMask;
};
class WithOutMask
{
public:
__device__ __forceinline__ void nextMask() const
{
}
__device__ __forceinline__ bool operator()(int queryIdx, int trainIdx) const
{
return true;
}
};
///////////////////////////////////////////////////////////////////////////////
// Reduce Sum
template <int BLOCK_DIM_X> struct SumReductor;
template <> struct SumReductor<16>
{
template <typename T> static __device__ void reduce(T* sdiff_row, T& mySum)
{
volatile T* smem = sdiff_row;
smem[threadIdx.x] = mySum;
if (threadIdx.x < 8)
{
smem[threadIdx.x] = mySum += smem[threadIdx.x + 8];
smem[threadIdx.x] = mySum += smem[threadIdx.x + 4];
smem[threadIdx.x] = mySum += smem[threadIdx.x + 2];
smem[threadIdx.x] = mySum += smem[threadIdx.x + 1];
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Distance
template <typename T> struct L1Dist
{
typedef int ResultType;
typedef int ValueType;
__device__ __forceinline__ L1Dist() : mySum(0) {}
__device__ __forceinline__ void reduceIter(int val1, int val2)
{
mySum = __sad(val1, val2, mySum);
}
template <int BLOCK_DIM_X> __device__ __forceinline__ void reduceAll(int* sdiff_row)
{
SumReductor<BLOCK_DIM_X>::reduce(sdiff_row, mySum);
}
__device__ __forceinline__ operator int() const
{
return mySum;
}
int mySum;
};
template <> struct L1Dist<float>
{
typedef float ResultType;
typedef float ValueType;
__device__ __forceinline__ L1Dist() : mySum(0.0f) {}
__device__ __forceinline__ void reduceIter(float val1, float val2)
{
mySum += fabs(val1 - val2);
}
template <int BLOCK_DIM_X> __device__ __forceinline__ void reduceAll(float* sdiff_row)
{
SumReductor<BLOCK_DIM_X>::reduce(sdiff_row, mySum);
}
__device__ __forceinline__ operator float() const
{
return mySum;
}
float mySum;
};
struct L2Dist
{
typedef float ResultType;
typedef float ValueType;
__device__ __forceinline__ L2Dist() : mySum(0.0f) {}
__device__ __forceinline__ void reduceIter(float val1, float val2)
{
float reg = val1 - val2;
mySum += reg * reg;
}
template <int BLOCK_DIM_X> __device__ __forceinline__ void reduceAll(float* sdiff_row)
{
SumReductor<BLOCK_DIM_X>::reduce(sdiff_row, mySum);
}
__device__ __forceinline__ operator float() const
{
return sqrtf(mySum);
}
float mySum;
};
struct HammingDist
{
typedef int ResultType;
typedef int ValueType;
__device__ __forceinline__ HammingDist() : mySum(0) {}
__device__ __forceinline__ void reduceIter(int val1, int val2)
{
mySum += __popc(val1 ^ val2);
}
template <int BLOCK_DIM_X> __device__ __forceinline__ void reduceAll(int* sdiff_row)
{
SumReductor<BLOCK_DIM_X>::reduce(sdiff_row, mySum);
}
__device__ __forceinline__ operator int() const
{
return mySum;
}
int mySum;
};
///////////////////////////////////////////////////////////////////////////////
// reduceDescDiff
template <int BLOCK_DIM_X, typename Dist, typename T>
__device__ void reduceDescDiff(const T* queryDescs, const T* trainDescs, int desc_len, Dist& dist, typename Dist::ResultType* sdiff_row)
{
for (int i = threadIdx.x; i < desc_len; i += BLOCK_DIM_X)
{
T trainVal;
ForceGlob<T>::Load(trainDescs, i, trainVal);
dist.reduceIter(queryDescs[i], trainVal);
}
dist.reduceAll<BLOCK_DIM_X>(sdiff_row);
}
///////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////// Match //////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// loadDescsVals
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN, typename T, typename U>
__device__ void loadDescsVals(const T* descs, int desc_len, U* queryVals, U* smem)
{
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < desc_len)
{
smem[tid] = descs[tid];
}
__syncthreads();
#pragma unroll
for (int i = threadIdx.x; i < MAX_DESCRIPTORS_LEN; i += BLOCK_DIM_X)
{
*queryVals = smem[i];
++queryVals;
}
}
///////////////////////////////////////////////////////////////////////////////
// reduceDescDiffCached
template <int N> struct UnrollDescDiff
{
template <typename Dist, typename T>
static __device__ void calcCheck(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len, Dist& dist, int ind)
{
if (ind < desc_len)
{
T trainVal;
ForceGlob<T>::Load(trainDescs, ind, trainVal);
dist.reduceIter(*queryVals, trainVal);
++queryVals;
UnrollDescDiff<N - 1>::calcCheck(queryVals, trainDescs, desc_len, dist, ind + blockDim.x);
}
}
template <typename Dist, typename T>
static __device__ void calcWithoutCheck(const typename Dist::ValueType* queryVals, const T* trainDescs, Dist& dist)
{
T trainVal;
ForceGlob<T>::Load(trainDescs, 0, trainVal);
dist.reduceIter(*queryVals, trainVal);
++queryVals;
trainDescs += blockDim.x;
UnrollDescDiff<N - 1>::calcWithoutCheck(queryVals, trainDescs, dist);
}
};
template <> struct UnrollDescDiff<0>
{
template <typename Dist, typename T>
static __device__ __forceinline__ void calcCheck(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len,
Dist& dist, int ind)
{
}
template <typename Dist, typename T>
static __device__ __forceinline__ void calcWithoutCheck(const typename Dist::ValueType* queryVals, const T* trainDescs, Dist& dist)
{
}
};
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN, bool WITH_OUT_CHECK> struct DescDiffCalculator;
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN>
struct DescDiffCalculator<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, false>
{
template <typename Dist, typename T>
static __device__ __forceinline__ void calc(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len, Dist& dist)
{
UnrollDescDiff<MAX_DESCRIPTORS_LEN / BLOCK_DIM_X>::calcCheck(queryVals, trainDescs, desc_len, dist, threadIdx.x);
}
};
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN>
struct DescDiffCalculator<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, true>
{
template <typename Dist, typename T>
static __device__ __forceinline__ void calc(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len, Dist& dist)
{
UnrollDescDiff<MAX_DESCRIPTORS_LEN / BLOCK_DIM_X>::calcWithoutCheck(queryVals, trainDescs + threadIdx.x, dist);
}
};
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN, bool DESC_LEN_EQ_MAX_LEN, typename Dist, typename T>
__device__ __forceinline__ void reduceDescDiffCached(const typename Dist::ValueType* queryVals, const T* trainDescs, int desc_len, Dist& dist, typename Dist::ResultType* sdiff_row)
{
DescDiffCalculator<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, DESC_LEN_EQ_MAX_LEN>::calc(queryVals, trainDescs, desc_len, dist);
dist.reduceAll<BLOCK_DIM_X>(sdiff_row);
}
///////////////////////////////////////////////////////////////////////////////
// warpReduceMinIdxIdx
template <int BLOCK_DIM_Y> struct MinIdxIdxWarpReductor;
template <> struct MinIdxIdxWarpReductor<16>
{
template <typename T>
static __device__ void reduce(T& myMin, int& myBestTrainIdx, int& myBestImgIdx, volatile T* smin, volatile int* strainIdx, volatile int* simgIdx)
{
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < 8)
{
myMin = smin[tid];
myBestTrainIdx = strainIdx[tid];
myBestImgIdx = simgIdx[tid];
float reg = smin[tid + 8];
if (reg < myMin)
{
smin[tid] = myMin = reg;
strainIdx[tid] = myBestTrainIdx = strainIdx[tid + 8];
simgIdx[tid] = myBestImgIdx = simgIdx[tid + 8];
}
reg = smin[tid + 4];
if (reg < myMin)
{
smin[tid] = myMin = reg;
strainIdx[tid] = myBestTrainIdx = strainIdx[tid + 4];
simgIdx[tid] = myBestImgIdx = simgIdx[tid + 4];
}
reg = smin[tid + 2];
if (reg < myMin)
{
smin[tid] = myMin = reg;
strainIdx[tid] = myBestTrainIdx = strainIdx[tid + 2];
simgIdx[tid] = myBestImgIdx = simgIdx[tid + 2];
}
reg = smin[tid + 1];
if (reg < myMin)
{
smin[tid] = myMin = reg;
strainIdx[tid] = myBestTrainIdx = strainIdx[tid + 1];
simgIdx[tid] = myBestImgIdx = simgIdx[tid + 1];
}
}
}
};
///////////////////////////////////////////////////////////////////////////////
// findBestMatch
template <int BLOCK_DIM_Y, typename T>
__device__ void findBestMatch(T& myMin, int& myBestTrainIdx, int& myBestImgIdx, T* smin, int* strainIdx, int* simgIdx)
{
if (threadIdx.x == 0)
{
smin[threadIdx.y] = myMin;
strainIdx[threadIdx.y] = myBestTrainIdx;
simgIdx[threadIdx.y] = myBestImgIdx;
}
__syncthreads();
MinIdxIdxWarpReductor<BLOCK_DIM_Y>::reduce(myMin, myBestTrainIdx, myBestImgIdx, smin, strainIdx, simgIdx);
}
///////////////////////////////////////////////////////////////////////////////
// ReduceDescCalculator
template <int BLOCK_DIM_X, typename T> struct ReduceDescCalculatorSimple
{
__device__ __forceinline__ void prepare(const T* queryDescs_, int, void*)
{
queryDescs = queryDescs_;
}
template <typename Dist>
__device__ __forceinline__ void calc(const T* trainDescs, int desc_len, Dist& dist, typename Dist::ResultType* sdiff_row) const
{
reduceDescDiff<BLOCK_DIM_X>(queryDescs, trainDescs, desc_len, dist, sdiff_row);
}
const T* queryDescs;
};
template <int BLOCK_DIM_X, int MAX_DESCRIPTORS_LEN, bool DESC_LEN_EQ_MAX_LEN, typename T, typename U>
struct ReduceDescCalculatorCached
{
__device__ __forceinline__ void prepare(const T* queryDescs, int desc_len, U* smem)
{
loadDescsVals<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN>(queryDescs, desc_len, queryVals, smem);
}
template <typename Dist>
__device__ __forceinline__ void calc(const T* trainDescs, int desc_len, Dist& dist, typename Dist::ResultType* sdiff_row) const
{
reduceDescDiffCached<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, DESC_LEN_EQ_MAX_LEN>(queryVals, trainDescs, desc_len, dist, sdiff_row);
}
U queryVals[MAX_DESCRIPTORS_LEN / BLOCK_DIM_X];
};
///////////////////////////////////////////////////////////////////////////////
// matchDescs loop
template <typename Dist, typename ReduceDescCalculator, typename T, typename Mask>
__device__ void matchDescs(int queryIdx, int imgIdx, const DevMem2D_<T>& trainDescs_,
const Mask& m, const ReduceDescCalculator& reduceDescCalc,
typename Dist::ResultType& myMin, int& myBestTrainIdx, int& myBestImgIdx, typename Dist::ResultType* sdiff_row)
{
for (int trainIdx = threadIdx.y; trainIdx < trainDescs_.rows; trainIdx += blockDim.y)
{
if (m(queryIdx, trainIdx))
{
const T* trainDescs = trainDescs_.ptr(trainIdx);
Dist dist;
reduceDescCalc.calc(trainDescs, trainDescs_.cols, dist, sdiff_row);
if (threadIdx.x == 0)
{
if (dist < myMin)
{
myMin = dist;
myBestTrainIdx = trainIdx;
myBestImgIdx = imgIdx;
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////
// Train collection loop strategy
template <typename T> struct SingleTrain
{
explicit SingleTrain(const DevMem2D_<T>& trainDescs_) : trainDescs(trainDescs_)
{
}
template <typename Dist, typename ReduceDescCalculator, typename Mask>
__device__ __forceinline__ void loop(int queryIdx, Mask& m, const ReduceDescCalculator& reduceDescCalc,
typename Dist::ResultType& myMin, int& myBestTrainIdx, int& myBestImgIdx, typename Dist::ResultType* sdiff_row) const
{
matchDescs<Dist>(queryIdx, 0, trainDescs, m, reduceDescCalc, myMin, myBestTrainIdx, myBestImgIdx, sdiff_row);
}
__device__ __forceinline__ int desc_len() const
{
return trainDescs.cols;
}
const DevMem2D_<T> trainDescs;
};
template <typename T> struct TrainCollection
{
TrainCollection(const DevMem2D_<T>* trainCollection_, int nImg_, int desclen_) :
trainCollection(trainCollection_), nImg(nImg_), desclen(desclen_)
{
}
template <typename Dist, typename ReduceDescCalculator, typename Mask>
__device__ void loop(int queryIdx, Mask& m, const ReduceDescCalculator& reduceDescCalc,
typename Dist::ResultType& myMin, int& myBestTrainIdx, int& myBestImgIdx, typename Dist::ResultType* sdiff_row) const
{
for (int imgIdx = 0; imgIdx < nImg; ++imgIdx)
{
const DevMem2D_<T> trainDescs = trainCollection[imgIdx];
m.nextMask();
matchDescs<Dist>(queryIdx, imgIdx, trainDescs, m, reduceDescCalc, myMin, myBestTrainIdx, myBestImgIdx, sdiff_row);
}
}
__device__ __forceinline__ int desc_len() const
{
return desclen;
}
const DevMem2D_<T>* trainCollection;
int nImg;
int desclen;
};
///////////////////////////////////////////////////////////////////////////////
// Match kernel
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename ReduceDescCalculator, typename Dist, typename T, typename Train, typename Mask>
__global__ void match(const PtrStep_<T> queryDescs_, const Train train, const Mask mask, int* trainIdx, int* imgIdx, float* distance)
{
__shared__ typename Dist::ResultType smem[BLOCK_DIM_X * BLOCK_DIM_Y];
const int queryIdx = blockIdx.x;
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::ResultType myMin = numeric_limits<typename Dist::ResultType>::max();
{
typename Dist::ResultType* sdiff_row = smem + BLOCK_DIM_X * threadIdx.y;
Mask m = mask;
ReduceDescCalculator reduceDescCalc;
reduceDescCalc.prepare(queryDescs_.ptr(queryIdx), train.desc_len(), (typename Dist::ValueType*)smem);
train.template loop<Dist>(queryIdx, m, reduceDescCalc, myMin, myBestTrainIdx, myBestImgIdx, sdiff_row);
}
__syncthreads();
typename Dist::ResultType* smin = smem;
int* strainIdx = (int*)(smin + BLOCK_DIM_Y);
int* simgIdx = strainIdx + BLOCK_DIM_Y;
findBestMatch<BLOCK_DIM_Y>(myMin, myBestTrainIdx, myBestImgIdx, smin, strainIdx, simgIdx);
if (threadIdx.x == 0 && threadIdx.y == 0)
{
imgIdx[queryIdx] = myBestImgIdx;
trainIdx[queryIdx] = myBestTrainIdx;
distance[queryIdx] = myMin;
}
}
///////////////////////////////////////////////////////////////////////////////
// Match kernel callers
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Train, typename Mask>
void matchSimple_caller(const DevMem2D_<T>& queryDescs, const Train& train,
const Mask& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, cudaStream_t stream)
{
StaticAssert<BLOCK_DIM_Y <= 64>::check(); // blockDimY vals must reduce by warp
dim3 grid(queryDescs.rows, 1, 1);
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
match<BLOCK_DIM_X, BLOCK_DIM_Y, ReduceDescCalculatorSimple<BLOCK_DIM_X, T>, Dist, T>
<<<grid, threads, 0, stream>>>(queryDescs, train, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int MAX_DESCRIPTORS_LEN, bool DESC_LEN_EQ_MAX_LEN, typename Dist, typename T, typename Train, typename Mask>
void matchCached_caller(const DevMem2D_<T>& queryDescs, const Train& train,
const Mask& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, cudaStream_t stream)
{
StaticAssert<BLOCK_DIM_Y <= 64>::check(); // blockDimY vals must reduce by warp
StaticAssert<BLOCK_DIM_X * BLOCK_DIM_Y >= MAX_DESCRIPTORS_LEN>::check(); // block size must be greter than descriptors length
StaticAssert<MAX_DESCRIPTORS_LEN % BLOCK_DIM_X == 0>::check(); // max descriptors length must divide to blockDimX
dim3 grid(queryDescs.rows, 1, 1);
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
match<BLOCK_DIM_X, BLOCK_DIM_Y, ReduceDescCalculatorCached<BLOCK_DIM_X, MAX_DESCRIPTORS_LEN, DESC_LEN_EQ_MAX_LEN, T, typename Dist::ValueType>, Dist, T>
<<<grid, threads, 0, stream>>>(queryDescs, train, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match caller
template <typename Dist, typename T, typename Train, typename Mask>
void matchDispatcher(const DevMem2D_<T>& queryDescs, const Train& train,
const Mask& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
bool cc_12, cudaStream_t stream)
{
if (queryDescs.cols < 64)
matchCached_caller<16, 16, 64, false, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols == 64)
matchCached_caller<16, 16, 64, true, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols < 128)
matchCached_caller<16, 16, 128, false, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols == 128 && cc_12)
matchCached_caller<16, 16, 128, true, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols < 256 && cc_12)
matchCached_caller<16, 16, 256, false, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else if (queryDescs.cols == 256 && cc_12)
matchCached_caller<16, 16, 256, true, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
else
matchSimple_caller<16, 16, Dist>(queryDescs, train, mask, trainIdx, imgIdx, distance, stream);
}
template <typename T>
void matchSingleL1_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
bool cc_12, cudaStream_t stream)
{
SingleTrain<T> train((DevMem2D_<T>)trainDescs);
if (mask.data)
{
SingleMask m(mask);
matchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, train, m, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchSingleL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template <typename T>
void matchSingleL2_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
bool cc_12, cudaStream_t stream)
{
SingleTrain<T> train((DevMem2D_<T>)trainDescs);
if (mask.data)
{
SingleMask m(mask);
matchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, train, m, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchSingleL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template <typename T>
void matchSingleHamming_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
bool cc_12, cudaStream_t stream)
{
SingleTrain<T> train((DevMem2D_<T>)trainDescs);
if (mask.data)
{
SingleMask m(mask);
matchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, train, m, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchSingleHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchSingleHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template <typename T>
void matchCollectionL1_gpu(const DevMem2D& queryDescs, const DevMem2D& trainCollection,
const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx,
const DevMem2Df& distance, bool cc_12, cudaStream_t stream)
{
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, queryDescs.cols);
if (maskCollection.data)
{
MaskCollection mask(maskCollection.data);
matchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, train, mask, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchCollectionL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template <typename T>
void matchCollectionL2_gpu(const DevMem2D& queryDescs, const DevMem2D& trainCollection,
const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx,
const DevMem2Df& distance, bool cc_12, cudaStream_t stream)
{
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, queryDescs.cols);
if (maskCollection.data)
{
MaskCollection mask(maskCollection.data);
matchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, train, mask, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchCollectionL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template <typename T>
void matchCollectionHamming_gpu(const DevMem2D& queryDescs, const DevMem2D& trainCollection,
const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx,
const DevMem2Df& distance, bool cc_12, cudaStream_t stream)
{
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, queryDescs.cols);
if (maskCollection.data)
{
MaskCollection mask(maskCollection.data);
matchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, train, mask, trainIdx, imgIdx, distance, cc_12, stream);
}
else
{
matchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, train, WithOutMask(), trainIdx, imgIdx, distance, cc_12, stream);
}
}
template void matchCollectionHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
template void matchCollectionHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainCollection, const DevMem2D_<PtrStep>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, bool cc_12, cudaStream_t stream);
///////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////// Knn Match ////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Calc distance kernel
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>
__global__ void calcDistance(const PtrStep_<T> queryDescs_, const DevMem2D_<T> trainDescs_, const Mask mask, PtrStepf distance)
{
__shared__ typename Dist::ResultType sdiff[BLOCK_DIM_X * BLOCK_DIM_Y];
typename Dist::ResultType* sdiff_row = sdiff + BLOCK_DIM_X * threadIdx.y;
const int queryIdx = blockIdx.x;
const T* queryDescs = queryDescs_.ptr(queryIdx);
const int trainIdx = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (trainIdx < trainDescs_.rows)
{
const T* trainDescs = trainDescs_.ptr(trainIdx);
typename Dist::ResultType myDist = numeric_limits<typename Dist::ResultType>::max();
if (mask(queryIdx, trainIdx))
{
Dist dist;
reduceDescDiff<BLOCK_DIM_X>(queryDescs, trainDescs, trainDescs_.cols, dist, sdiff_row);
if (threadIdx.x == 0)
myDist = dist;
}
if (threadIdx.x == 0)
distance.ptr(queryIdx)[trainIdx] = myDist;
}
}
///////////////////////////////////////////////////////////////////////////////
// Calc distance kernel caller
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>
void calcDistance_caller(const DevMem2D_<T>& queryDescs, const DevMem2D_<T>& trainDescs,
const Mask& mask, const DevMem2Df& distance, cudaStream_t stream)
{
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 grid(queryDescs.rows, divUp(trainDescs.rows, BLOCK_DIM_Y), 1);
calcDistance<BLOCK_DIM_X, BLOCK_DIM_Y, Dist, T><<<grid, threads, 0, stream>>>(
queryDescs, trainDescs, mask, distance);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// warpReduceMinIdx
template <int BLOCK_SIZE, typename T>
__device__ void warpReduceMinIdx(volatile T* sdist, volatile int* strainIdx, T& myMin, int tid)
{
if (tid < 32)
{
if (BLOCK_SIZE >= 64)
{
T reg = sdist[tid + 32];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 32];
}
}
if (BLOCK_SIZE >= 32)
{
T reg = sdist[tid + 16];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 16];
}
}
if (BLOCK_SIZE >= 16)
{
T reg = sdist[tid + 8];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 8];
}
}
if (BLOCK_SIZE >= 8)
{
T reg = sdist[tid + 4];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 4];
}
}
if (BLOCK_SIZE >= 4)
{
T reg = sdist[tid + 2];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 2];
}
}
if (BLOCK_SIZE >= 2)
{
T reg = sdist[tid + 1];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 1];
}
}
}
}
template <int BLOCK_SIZE, typename T>
__device__ void reduceMinIdx(const T* dist, int n, T* sdist, int* strainIdx)
{
const int tid = threadIdx.x;
T myMin = numeric_limits<T>::max();
int myMinIdx = -1;
for (int i = tid; i < n; i += BLOCK_SIZE)
{
T reg = dist[i];
if (reg < myMin)
{
myMin = reg;
myMinIdx = i;
}
}
sdist[tid] = myMin;
strainIdx[tid] = myMinIdx;
__syncthreads();
if (BLOCK_SIZE >= 512 && tid < 256)
{
T reg = sdist[tid + 256];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 256];
}
__syncthreads();
}
if (BLOCK_SIZE >= 256 && tid < 128)
{
T reg = sdist[tid + 128];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 128];
}
__syncthreads();
}
if (BLOCK_SIZE >= 128 && tid < 64)
{
T reg = sdist[tid + 64];
if (reg < myMin)
{
sdist[tid] = myMin = reg;
strainIdx[tid] = strainIdx[tid + 64];
}
__syncthreads();
}
warpReduceMinIdx<BLOCK_SIZE>(sdist, strainIdx, myMin, tid);
}
///////////////////////////////////////////////////////////////////////////////
// find knn match kernel
template <int BLOCK_SIZE> __global__ void findBestMatch(DevMem2Df allDist_, int i, PtrStepi trainIdx_, PtrStepf distance_)
{
const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64;
__shared__ float sdist[SMEM_SIZE];
__shared__ int strainIdx[SMEM_SIZE];
const int queryIdx = blockIdx.x;
float* allDist = allDist_.ptr(queryIdx);
int* trainIdx = trainIdx_.ptr(queryIdx);
float* distance = distance_.ptr(queryIdx);
reduceMinIdx<BLOCK_SIZE>(allDist, allDist_.cols, sdist, strainIdx);
if (threadIdx.x == 0)
{
float dist = sdist[0];
if (dist < numeric_limits<float>::max())
{
int bestIdx = strainIdx[0];
allDist[bestIdx] = numeric_limits<float>::max();
trainIdx[i] = bestIdx;
distance[i] = dist;
}
}
}
///////////////////////////////////////////////////////////////////////////////
// find knn match kernel caller
template <int BLOCK_SIZE>
void findKnnMatch_caller(int knn, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream)
{
dim3 threads(BLOCK_SIZE, 1, 1);
dim3 grid(trainIdx.rows, 1, 1);
for (int i = 0; i < knn; ++i)
{
findBestMatch<BLOCK_SIZE><<<grid, threads, 0, stream>>>(allDist, i, trainIdx, distance);
cudaSafeCall( cudaGetLastError() );
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// knn match caller
template <typename Dist, typename T, typename Mask>
void calcDistanceDispatcher(const DevMem2D_<T>& queryDescs, const DevMem2D_<T>& trainDescs, const Mask& mask, const DevMem2Df& allDist, cudaStream_t stream)
{
calcDistance_caller<16, 16, Dist>(queryDescs, trainDescs, mask, allDist, stream);
}
void findKnnMatchDispatcher(int knn, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream)
{
findKnnMatch_caller<256>(knn, trainIdx, distance, allDist, stream);
}
template <typename T>
void knnMatchL1_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream)
{
if (mask.data)
{
calcDistanceDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs, SingleMask(mask), allDist, stream);
}
else
{
calcDistanceDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs, WithOutMask(), allDist, stream);
}
findKnnMatchDispatcher(knn, trainIdx, distance, allDist, stream);
}
template void knnMatchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template <typename T>
void knnMatchL2_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream)
{
if (mask.data)
{
calcDistanceDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
SingleMask(mask), allDist, stream);
}
else
{
calcDistanceDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
WithOutMask(), allDist, stream);
}
findKnnMatchDispatcher(knn, trainIdx, distance, allDist, stream);
}
template void knnMatchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template <typename T>
void knnMatchHamming_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn,
const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream)
{
if (mask.data)
{
calcDistanceDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
SingleMask(mask), allDist, stream);
}
else
{
calcDistanceDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
WithOutMask(), allDist, stream);
}
findKnnMatchDispatcher(knn, trainIdx, distance, allDist, stream);
}
template void knnMatchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
template void knnMatchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, int knn, const DevMem2D& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream);
///////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// Radius Match //////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Radius Match kernel
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>
__global__ void radiusMatch(const PtrStep_<T> queryDescs_, const DevMem2D_<T> trainDescs_,
float maxDistance, const Mask mask, DevMem2Di trainIdx_, unsigned int* nMatches, PtrStepf distance)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ typename Dist::ResultType smem[BLOCK_DIM_X * BLOCK_DIM_Y];
typename Dist::ResultType* sdiff_row = smem + BLOCK_DIM_X * threadIdx.y;
const int queryIdx = blockIdx.x;
const T* queryDescs = queryDescs_.ptr(queryIdx);
const int trainIdx = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (trainIdx < trainDescs_.rows)
{
const T* trainDescs = trainDescs_.ptr(trainIdx);
if (mask(queryIdx, trainIdx))
{
Dist dist;
reduceDescDiff<BLOCK_DIM_X>(queryDescs, trainDescs, trainDescs_.cols, dist, sdiff_row);
if (threadIdx.x == 0)
{
if (dist < maxDistance)
{
unsigned int i = atomicInc(nMatches + queryIdx, (unsigned int) -1);
if (i < trainIdx_.cols)
{
distance.ptr(queryIdx)[i] = dist;
trainIdx_.ptr(queryIdx)[i] = trainIdx;
}
}
}
}
}
#endif
}
///////////////////////////////////////////////////////////////////////////////
// Radius Match kernel caller
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, typename Dist, typename T, typename Mask>
void radiusMatch_caller(const DevMem2D_<T>& queryDescs, const DevMem2D_<T>& trainDescs,
float maxDistance, const Mask& mask, const DevMem2Di& trainIdx, unsigned int* nMatches,
const DevMem2Df& distance, cudaStream_t stream)
{
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 grid(queryDescs.rows, divUp(trainDescs.rows, BLOCK_DIM_Y), 1);
radiusMatch<BLOCK_DIM_X, BLOCK_DIM_Y, Dist, T><<<grid, threads, 0, stream>>>(
queryDescs, trainDescs, maxDistance, mask, trainIdx, nMatches, distance);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Radius Match caller
template <typename Dist, typename T, typename Mask>
void radiusMatchDispatcher(const DevMem2D_<T>& queryDescs, const DevMem2D_<T>& trainDescs,
float maxDistance, const Mask& mask, const DevMem2Di& trainIdx, unsigned int* nMatches,
const DevMem2Df& distance, cudaStream_t stream)
{
radiusMatch_caller<16, 16, Dist>(queryDescs, trainDescs, maxDistance, mask,
trainIdx, nMatches, distance, stream);
}
template <typename T>
void radiusMatchL1_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance,
const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream)
{
if (mask.data)
{
radiusMatchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, SingleMask(mask), trainIdx, nMatches, distance, stream);
}
else
{
radiusMatchDispatcher< L1Dist<T> >((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, WithOutMask(), trainIdx, nMatches, distance, stream);
}
}
template void radiusMatchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template <typename T>
void radiusMatchL2_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance,
const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream)
{
if (mask.data)
{
radiusMatchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, SingleMask(mask), trainIdx, nMatches, distance, stream);
}
else
{
radiusMatchDispatcher<L2Dist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, WithOutMask(), trainIdx, nMatches, distance, stream);
}
}
template void radiusMatchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template <typename T>
void radiusMatchHamming_gpu(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance,
const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream)
{
if (mask.data)
{
radiusMatchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, SingleMask(mask), trainIdx, nMatches, distance, stream);
}
else
{
radiusMatchDispatcher<HammingDist>((DevMem2D_<T>)queryDescs, (DevMem2D_<T>)trainDescs,
maxDistance, WithOutMask(), trainIdx, nMatches, distance, stream);
}
}
template void radiusMatchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
template void radiusMatchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2Di& trainIdx, unsigned int* nMatches, const DevMem2Df& distance, cudaStream_t stream);
}}}
|
83805b5934bcc9b05453151c1322b8527bf6baeb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data_ptr<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
83805b5934bcc9b05453151c1322b8527bf6baeb.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data_ptr<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.