hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
85a6a66f1232e242956965f26e967fd2ba959e78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void total(float *input, float *output, int len){
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t=threadIdx.x,start=2*blockIdx.x*BLOCK_SIZE;
if(start+t<len) partialSum[t] = input[start+t];
else partialSum[t]=0;
__syncthreads();
if(start+BLOCK_SIZE+t<len)partialSum[BLOCK_SIZE+t]=input[start+BLOCK_SIZE+t];
else partialSum[BLOCK_SIZE+t]=0;
__syncthreads();
for(unsigned int stride=BLOCK_SIZE;stride>=1; stride>>=1){
__syncthreads();
if (t<stride) partialSum[t]+=partialSum[t+stride];
__syncthreads();
}
if(t==0) output[blockIdx.x]=partialSum[0];
} | 85a6a66f1232e242956965f26e967fd2ba959e78.cu | #include "includes.h"
__global__ void total(float *input, float *output, int len){
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t=threadIdx.x,start=2*blockIdx.x*BLOCK_SIZE;
if(start+t<len) partialSum[t] = input[start+t];
else partialSum[t]=0;
__syncthreads();
if(start+BLOCK_SIZE+t<len)partialSum[BLOCK_SIZE+t]=input[start+BLOCK_SIZE+t];
else partialSum[BLOCK_SIZE+t]=0;
__syncthreads();
for(unsigned int stride=BLOCK_SIZE;stride>=1; stride>>=1){
__syncthreads();
if (t<stride) partialSum[t]+=partialSum[t+stride];
__syncthreads();
}
if(t==0) output[blockIdx.x]=partialSum[0];
} |
45977b329f8cb3d1b4c1923c2354a0701510dd65.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h" //
#include "CUDAdll.cuh" //
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
//CUDA
extern int count = 0;
bool InitCUDA(void)//CUDA
{
printf("Start to detecte devices.........\n");//
hipGetDeviceCount(&count);//1.0
if (count == 0){
fprintf(stderr, "There is no device.\n");
return false;
}
printf("%d device/s detected.\n", count);//
int i;
for (i = 0; i < count; i++){//CUDA
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {//
if (prop.major >= 1)//1
{
printf("Device %d: %s supports CUDA %d.%d.\n", i + 1, prop.name, prop.major, prop.minor);//CUDA
break;
}
}
}
if (i == count) {//CUDA1.x
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);//
return true;
}
void showHelloCuda(void)//CUDA
{
if (!InitCUDA()) //
{
printf("Sorry,CUDA has not been initialized.\n");
return;
}
printf("Hello GPU! CUDA has been initialized.\n");
}
void Allocate_Memory(int n)
{
size_t size = n*sizeof(float);
h_Z = (float*)malloc(size);
hipError_t error = hipMalloc((void**)&d_Z, size);
printf("Allocate mem : %s\n", hipGetErrorString(error));
}
void Free_Memory()
{
if (h_Z)
free(h_Z);
hipError_t error = hipFree(d_Z);
printf("Free mem : %s\n", hipGetErrorString(error));
}
void CopyMemToDevice(float *data,int n)
{
for (int i = 0; i < n; i++)
{
h_Z[i] = data[i];
}
size_t size = n*sizeof(float);
hipError_t error = hipMemcpy(d_Z, h_Z, size, hipMemcpyHostToDevice);
printf("Memcpy Host to Device : %s\n", hipGetErrorString(error));
}
void CopyMemToHost(float *data,int n)
{
hipError_t error = hipMemcpy(h_Z, d_Z, n*sizeof(float), hipMemcpyDeviceToHost);
printf("Memcpy Device to Host : %s\n", hipGetErrorString(error));
for (int i = 0; i < n; i++)
{
data[i] = h_Z[i];
}
}
void Call_cuda_CalZ( float toolx, float tooly, float toolz, float toolr, float dx, float dy, int max_ix, int max_iy, int n)
{
int ix = toolx / dx - toolr / dx;
int iy = tooly / dy - toolr / dy;
if (ix < 0)
ix = 0;
if (iy < 0)
iy = 0;
int xcount = ix + 2 * toolr / dx;
int ycount = iy + 2 * toolr / dy;
if (xcount > max_ix)
xcount = max_ix;
if (ycount > max_iy)
ycount = max_iy;
int num = xcount*ycount;
int TPB = 256;
int BPG = (num + TPB - 1) / TPB;
int init_index = iy*max_iy + ix;
printf("cuda node num = %d\n", num);
Cal_Z << <BPG, TPB >> >(d_Z, toolx, tooly, toolz, toolr, dx, dy, max_ix, max_iy, init_index, num);
}
__global__ void Cal_Z(float*Z_data, float toolx, float tooly,float toolz, float toolr, float dx, float dy, int max_ix, int max_iy,int init_index,int n)
{
int I = blockDim.x*blockIdx.x + threadIdx.x + init_index;
int i, j;
if (I < n)
{
i = I % max_ix;
j = I / max_ix;
if (pow((i*dx - toolx), 2) + pow((j*dy - tooly), 2) <= pow(toolr, 2)&& Z_data[i*max_iy + j] >= toolz)
{
float z_ball = -pow(pow(toolr, 2) - pow((i*dx - toolx), 2) - pow((j*dy - tooly), 2), float(0.5)) + toolr + toolz;
if (Z_data[i*max_iy + j] > z_ball&&z_ball >= 0){
Z_data[i*max_iy + j] = z_ball;
}
if (z_ball < 0){
Z_data[i*max_iy + j] = 0;
}
}
}
} | 45977b329f8cb3d1b4c1923c2354a0701510dd65.cu |
#include "stdafx.h" //引入预编译头文件
#include "CUDAdll.cuh" //引入导出函数声明头文件
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
//初始化CUDA
extern int count = 0;
bool InitCUDA(void)//CUDA初始化函数
{
printf("Start to detecte devices.........\n");//显示检测到的设备数
cudaGetDeviceCount(&count);//检测计算能力大于等于1.0的设备数
if (count == 0){
fprintf(stderr, "There is no device.\n");
return false;
}
printf("%d device/s detected.\n", count);//显示检测到的设备数
int i;
for (i = 0; i < count; i++){//依次验证检测到的设备是否支持CUDA
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {//获得设备属性并验证是否正确
if (prop.major >= 1)//验证主计算能力,即计算能力的第一位数是否大于1
{
printf("Device %d: %s supports CUDA %d.%d.\n", i + 1, prop.name, prop.major, prop.minor);//显示检测到的设备支持的CUDA版本
break;
}
}
}
if (i == count) {//没有支持CUDA1.x的设备
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);//设置设备为主叫线程的当前设备
return true;
}
void showHelloCuda(void)//测试CUDA初始化函数
{
if (!InitCUDA()) //初始化失败
{
printf("Sorry,CUDA has not been initialized.\n");
return;
}
printf("Hello GPU! CUDA has been initialized.\n");
}
void Allocate_Memory(int n)
{
size_t size = n*sizeof(float);
h_Z = (float*)malloc(size);
cudaError_t error = cudaMalloc((void**)&d_Z, size);
printf("Allocate mem : %s\n", cudaGetErrorString(error));
}
void Free_Memory()
{
if (h_Z)
free(h_Z);
cudaError_t error = cudaFree(d_Z);
printf("Free mem : %s\n", cudaGetErrorString(error));
}
void CopyMemToDevice(float *data,int n)
{
for (int i = 0; i < n; i++)
{
h_Z[i] = data[i];
}
size_t size = n*sizeof(float);
cudaError_t error = cudaMemcpy(d_Z, h_Z, size, cudaMemcpyHostToDevice);
printf("Memcpy Host to Device : %s\n", cudaGetErrorString(error));
}
void CopyMemToHost(float *data,int n)
{
cudaError_t error = cudaMemcpy(h_Z, d_Z, n*sizeof(float), cudaMemcpyDeviceToHost);
printf("Memcpy Device to Host : %s\n", cudaGetErrorString(error));
for (int i = 0; i < n; i++)
{
data[i] = h_Z[i];
}
}
void Call_cuda_CalZ( float toolx, float tooly, float toolz, float toolr, float dx, float dy, int max_ix, int max_iy, int n)
{
int ix = toolx / dx - toolr / dx;
int iy = tooly / dy - toolr / dy;
if (ix < 0)
ix = 0;
if (iy < 0)
iy = 0;
int xcount = ix + 2 * toolr / dx;
int ycount = iy + 2 * toolr / dy;
if (xcount > max_ix)
xcount = max_ix;
if (ycount > max_iy)
ycount = max_iy;
int num = xcount*ycount;
int TPB = 256;
int BPG = (num + TPB - 1) / TPB;
int init_index = iy*max_iy + ix;
printf("cuda node num = %d\n", num);
Cal_Z << <BPG, TPB >> >(d_Z, toolx, tooly, toolz, toolr, dx, dy, max_ix, max_iy, init_index, num);
}
__global__ void Cal_Z(float*Z_data, float toolx, float tooly,float toolz, float toolr, float dx, float dy, int max_ix, int max_iy,int init_index,int n)
{
int I = blockDim.x*blockIdx.x + threadIdx.x + init_index;
int i, j;
if (I < n)
{
i = I % max_ix;
j = I / max_ix;
if (pow((i*dx - toolx), 2) + pow((j*dy - tooly), 2) <= pow(toolr, 2)&& Z_data[i*max_iy + j] >= toolz)
{
float z_ball = -pow(pow(toolr, 2) - pow((i*dx - toolx), 2) - pow((j*dy - tooly), 2), float(0.5)) + toolr + toolz;
if (Z_data[i*max_iy + j] > z_ball&&z_ball >= 0){
Z_data[i*max_iy + j] = z_ball;
}
if (z_ball < 0){
Z_data[i*max_iy + j] = 0;
}
}
}
} |
2ec860bfc1d299f44d7319b12c791ecdddca603f.hip | // !!! This is a file automatically generated by hipify!!!
#include <unistd.h>
#include <string.h>
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h>
#include "elastic_kernel.h"
#include "BS/BS.h"
#include "VA/VA.h"
#include "MM/MM.h"
//#include "RSC/RSC.h"
#include "SPMV/SPMV.h"
#include "PF/PF.h"
#include "Reduction/reduction.h"
#include "FDTD3d/FDTD3dGPU.h"
//#include "Dummy/Dummy.h"
#include "CONV/CONV.h"
#include "CEDD/CEDD.h"
#include "HST/HST256.h"
//#define DATA_SET_1
int create_stubinfo(t_kernel_stub **stub, int deviceId, t_Kernel id, hipStream_t *transfer_s, hipStream_t *preemp_s)
{
hipError_t err;
t_kernel_stub *k_stub = (t_kernel_stub *)calloc(1, sizeof(t_kernel_stub)); // Create kernel stub
k_stub->deviceId = deviceId;
k_stub->id = id;
k_stub->kernel_finished = 0;
k_stub->HtD_tranfers_finished = 0;
k_stub->DtH_tranfers_finished = 0;
// Streams
hipStream_t *kernel_s, *m_transfer_s;
kernel_s = (hipStream_t *)malloc(sizeof(hipStream_t));
err = hipStreamCreate(kernel_s);
checkCudaErrors(err);
m_transfer_s = (hipStream_t *)malloc(2*sizeof(hipStream_t));
err = hipStreamCreate(&m_transfer_s[0]);
err = hipStreamCreate(&m_transfer_s[1]);
checkCudaErrors(err);
k_stub->execution_s = kernel_s;
k_stub->transfer_s = m_transfer_s;
k_stub->preemp_s = preemp_s;
/** Get device name*/
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceId);
char *device_name = deviceProp.name;
// Updating kernel info
switch (id) {
case BS:
t_BS_params *BS_params;
BS_params = (t_BS_params *)calloc(1, sizeof(t_BS_params));
k_stub->params = (void *)BS_params;
k_stub->launchCKEkernel = launch_preemp_BS;
k_stub->launchORIkernel = launch_orig_BS;
k_stub->startKernel = BS_start_kernel_dummy;
k_stub->startMallocs = BS_start_mallocs;
k_stub->startTransfers = BS_start_transfers;
k_stub->endKernel = BS_end_kernel_dummy;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 128,1,1;
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.gridsize.x = 25 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
#ifdef DATA_SET_1
k_stub->kconf.gridsize.x = 25 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
#else
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
#endif
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case VA:
t_VA_params *VA_params;
VA_params = (t_VA_params *)calloc(1, sizeof(t_VA_params));
k_stub->params = (void *)VA_params;
k_stub->launchCKEkernel = launch_preemp_VA;
k_stub->launchORIkernel = launch_orig_VA;
k_stub->startKernel = VA_start_kernel_dummy;
k_stub->startMallocs = VA_start_mallocs;
k_stub->startTransfers = VA_start_transfers;
k_stub->endKernel = VA_end_kernel_dummy;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 128;
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
#ifdef DATA_SET_1
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
#else
k_stub->kconf.gridsize.x = 100 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
#endif
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case MM:
t_MM_params *MM_params;
MM_params = (t_MM_params *)calloc(1, sizeof(t_MM_params));
#ifdef DATA_SET_1
MM_params->Asize.x=4096;MM_params->Asize.y=4096;
MM_params->Bsize.x=4096;MM_params->Bsize.y=4096;
#else
MM_params->Asize.x=2048;MM_params->Asize.y=2048;
MM_params->Bsize.x=2048;MM_params->Bsize.y=2048;
#endif
k_stub->params = (void *)MM_params;
k_stub->launchCKEkernel = launch_preemp_MM;
k_stub->launchORIkernel = launch_orig_MM;
k_stub->startKernel = MM_start_kernel;
k_stub->startMallocs = MM_start_mallocs;
k_stub->startTransfers = MM_start_transfers;
k_stub->endKernel = MM_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
MM_params->gridDimX = MM_params->Bsize.x/k_stub->kconf.blocksize.x; // Add information loss during linearization
k_stub->kconf.gridsize.x = MM_params->Bsize.x/k_stub->kconf.blocksize.x * MM_params->Asize.y/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
MM_params->gridDimX = MM_params->Bsize.x/k_stub->kconf.blocksize.x; // Add information loss during linearization
k_stub->kconf.gridsize.x = MM_params->Bsize.x/k_stub->kconf.blocksize.x * MM_params->Asize.y/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
MM_params->gridDimX = MM_params->Bsize.x/k_stub->kconf.blocksize.x; // Add information loss during linearization
k_stub->kconf.gridsize.x = MM_params->Bsize.x/k_stub->kconf.blocksize.x * MM_params->Asize.y/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
/*
case RSC_MODEL:
t_RSC_params *RSC_params;
RSC_params = (t_RSC_params *)calloc(1, sizeof(t_RSC_params));
k_stub->params = (void *)RSC_params;
k_stub->launchCKEkernel = launch_preemp_RSC_model;
k_stub->launchORIkernel = launch_orig_RSC_model;
k_stub->startKernel = RSC_model_start_kernel;
k_stub->endKernel = RSC_model_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 25; //6;
k_stub->kconf.blocksize.x = 64 ;//256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = 104;
k_stub->kconf.gridsize.y = 1;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "Gefore GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 6;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = 104; //6 bloques permanentes * 13 SMs
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
break;
case RSC_EVALUATE:
t_RSC_params *RSCE_params;
//RSCE_params = (t_RSC_params *)calloc(1, sizeof(t_RSC_params));
//k_stub->params = (void *)RSCE_params;
k_stub->launchCKEkernel = launch_preemp_RSC_evaluate;
k_stub->launchORIkernel = launch_orig_RSC_evaluate;
k_stub->startKernel = RSC_evaluate_start_kernel;
k_stub->endKernel = RSC_evaluate_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = 104;
k_stub->kconf.gridsize.y = 1;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "Gefore GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = 104;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
break;
*/
case SPMV_CSRscalar:
t_SPMV_params *SPMV_params;
SPMV_params = (t_SPMV_params *)calloc(1, sizeof(t_SPMV_params));
k_stub->params = (void *)SPMV_params;
k_stub->launchCKEkernel = launch_preemp_SPMVcsr;
k_stub->launchORIkernel = launch_orig_SPMVcsr;
k_stub->startKernel = SPMVcsr_start_kernel;
k_stub->startMallocs = SPMVcsr_start_mallocs;
k_stub->startTransfers = SPMVcsr_start_transfers;
k_stub->endKernel = SPMVcsr_end_kernel;
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 16;
//k_stub->kconf.blocksize.x = 32;
k_stub->kconf.blocksize.x = 128;
k_stub->kconf.blocksize.y = 1;
//->esto estaba antes k_stub->kconf.gridsize.x = k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks * k_stub->kconf.blocksize.x / 2;
k_stub->kconf.gridsize.x = k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks * k_stub->kconf.blocksize.x ;//One row per thread when all thread in original version
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 10;
k_stub->total_tasks = k_stub->kconf.gridsize.x * k_stub->kconf.coarsening;
}
else{
printf("Error: Unknown device\n");
return -1;
}
// Esto estaba antes --> SPMV_params->numRows = k_stub->kconf.gridsize.x * k_stub->kconf.blocksize.x * k_stub->kconf.coarsening;
SPMV_params->numRows = k_stub->total_tasks;
#ifdef DATA_SET_1
SPMV_params->nItems = (long int)SPMV_params->numRows * (long int)SPMV_params->numRows * 0.05; // 5% of entries will be non-zero
#else
SPMV_params->nItems = (long int)SPMV_params->numRows * (long int)SPMV_params->numRows * 0.00017; //
//--> ESto habia antes SPMV_params->nItems = SPMV_params->numRows * SPMV_params->numRows / 20;
#endif
SPMV_params->numNonZeroes = SPMV_params->nItems;
break;
case Reduction:
t_reduction_params *reduction_params;
reduction_params = (t_reduction_params *)calloc(1, sizeof(t_reduction_params));
k_stub->params = (void *)reduction_params;
k_stub->launchCKEkernel = launch_preemp_reduce;
k_stub->launchORIkernel = launch_orig_reduce;
k_stub->startKernel = reduce_start_kernel;
k_stub->startMallocs = reduce_start_mallocs;
k_stub->startTransfers = reduce_start_transfers;
k_stub->endKernel = reduce_end_kernel;
// reduction_params->size = 1<<24;
// reduction_params->size *= 50;
reduction_params->size = 802816000 / 2;
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = reduction_params->size / (2*k_stub->kconf.blocksize.x);
//#ifdef DATA_SET_1
//k_stub->kconf.gridsize.x = 64*28*8;
//#else
//k_stub->kconf.gridsize.x = 640*28*8; // 64 * number_of_permanent_blocks, a ver que tal
//k_stub->kconf.gridsize.x = 64 * 7;
//#endif
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 2;
// k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->total_tasks = reduction_params->size / (k_stub->kconf.blocksize.x * 2 * k_stub->kconf.coarsening);
}
else{
printf("Error: Unknown device\n");
return -1;
}
break;
/*case FDTD3d:
k_stub->launchCKEkernel = launch_preemp_FDTD3d;
k_stub->launchORIkernel = launch_orig_FDTD3d;
k_stub->startKernel = FDTD3d_start_kernel;
k_stub->startMallocs = FDTD3d_start_mallocs;
k_stub->startTransfers = FDTD3d_start_transfers;
k_stub->endKernel = FDTD3d_end_kernel;
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 1; // Needs many registers by thread
k_stub->kconf.blocksize.x = 32;
k_stub->kconf.blocksize.y = 16;
k_stub->kconf.gridsize.x = 12; // Original is 12
k_stub->kconf.gridsize.y = 24; // Original is 24
k_stub->total_tasks = k_stub->kconf.gridsize.x*k_stub->kconf.gridsize.y;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
break;
*/
case PF:
t_PF_params *PF_params;
PF_params = (t_PF_params *)calloc(1, sizeof(t_PF_params));
#ifdef DATA_SET_1
PF_params->nRows = 500;
PF_params->nCols = 6000;
#else
PF_params->nRows = 500;
PF_params->nCols = 30000;
#endif
PF_params->param_pyramid_height = 126;
k_stub->params = (void *)PF_params;
k_stub->launchCKEkernel = launch_preemp_PF;
k_stub->launchORIkernel = launch_orig_PF;
k_stub->startKernel = PF_start_kernel;
k_stub->startMallocs = PF_start_mallocs;
k_stub->startTransfers = PF_start_transfers;
k_stub->endKernel = PF_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
int smallBlockCol = k_stub->kconf.blocksize.x-(PF_params->param_pyramid_height)*2;
k_stub->kconf.gridsize.x = PF_params->nCols/smallBlockCol+((PF_params->nCols%smallBlockCol==0)?0:1);
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
int smallBlockCol = k_stub->kconf.blocksize.x-(PF_params->param_pyramid_height)*2;
k_stub->kconf.gridsize.x = PF_params->nCols/smallBlockCol+((PF_params->nCols%smallBlockCol==0)?0:1);
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
int smallBlockCol = k_stub->kconf.blocksize.x-(PF_params->param_pyramid_height)*2;
k_stub->kconf.gridsize.x = PF_params->nCols/smallBlockCol+((PF_params->nCols%smallBlockCol==0)?0:1);
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case RCONV:
t_CONV_params *CONV_params;
CONV_params = (t_CONV_params *)calloc(1, sizeof(t_CONV_params));
#ifdef DATA_SET_1
CONV_params->conv_rows=6144;
CONV_params->conv_cols=6144;
#else
CONV_params->conv_rows=8192;//16384;
CONV_params->conv_cols=8192; //16384;
#endif
k_stub->params = (void *)CONV_params;
k_stub->launchCKEkernel = launch_preemp_RCONV;
k_stub->launchORIkernel = launch_orig_RCONV;
k_stub->startKernel = RCONV_start_kernel;
k_stub->startMallocs = RCONV_start_mallocs;
k_stub->startTransfers = RCONV_start_transfers;
k_stub->endKernel = RCONV_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
//RCONV
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 4;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / (8 * 16)) * (CONV_params->conv_cols / 4);
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
//RCONV
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 32;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 4;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / (8 * 16)) * (CONV_params->conv_cols / 4);
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
//RCONV
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 32;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 4;
/*
CONV_params->gridDimX[0] = CONV_params->conv_cols / k_stub->kconf.blocksize.x;
CONV_params->gridDimY[0] = CONV_params->conv_cols / k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / (8 * 16)) * (CONV_params->conv_cols / 4);
k_stub->kconf.gridsize.y = 1; //Grid Linearization*/
k_stub->kconf.gridsize.x = CONV_params->conv_cols / (8 * k_stub->kconf.blocksize.x );
k_stub->kconf.gridsize.y = CONV_params->conv_rows / k_stub->kconf.blocksize.y;
k_stub->kconf.coarsening = 1;
k_stub->total_tasks = (k_stub->kconf.gridsize.x * k_stub->kconf.gridsize.y)/k_stub->kconf.coarsening;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case CCONV:
/*t_CONV_params *CONV_params;
CONV_params = (t_CONV_params *)calloc(1, sizeof(t_CONV_params));
#ifdef DATA_SEnT_1
CONV_params->conv_rows=6144;
CONV_params->conv_cols=6144;
#else
CONV_params->conv_rows=18048;
CONV_params->conv_cols=18048;
#endif*/
CONV_params->gridDimY[1] = CONV_params->conv_cols / (8 * 8);
k_stub->params = (void *)CONV_params;
k_stub->launchCKEkernel = launch_preemp_CCONV;
k_stub->launchORIkernel = launch_orig_CCONV;
k_stub->startKernel = CCONV_start_kernel;
k_stub->startMallocs = CCONV_start_mallocs;
k_stub->startTransfers = CCONV_start_transfers;
k_stub->endKernel = CCONV_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 9;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
/*case Dummy:
k_stub->launchCKEkernel = launch_preemp_dummy;
k_stub->launchORIkernel = launch_orig_dummy;
k_stub->startKernel = dummy_start_kernel;
k_stub->startMallocs = dummy_start_mallocs;
k_stub->startTransfers = dummy_start_transfers;
k_stub->endKernel = dummy_end_kernel;
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
k_stub->kconf.gridsize.x = 64;
k_stub->kconf.gridsize.y = 64;
k_stub->total_tasks = k_stub->kconf.gridsize.x*k_stub->kconf.gridsize.y;
k_stub->kconf.coarsening = 5000;
}
else{
printf("Error: Unknown device\n");
return -1;
}
break;
*/
case GCEDD:
t_CEDD_params *CEDD_params;
CEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
#ifdef DATA_SET_1
CEDD_params->nRows=3072 * 2;
CEDD_params->nCols=4608 * 2;
#else
CEDD_params->nRows=4608 * 2.6;
CEDD_params->nCols=4608 * 2.6;
#endif
k_stub->params = (void *)CEDD_params;
k_stub->launchCKEkernel = launch_preemp_GCEDD;
k_stub->launchORIkernel = launch_orig_GCEDD;
k_stub->startKernel = GCEDD_start_kernel;
k_stub->endKernel = GCEDD_end_kernel;
k_stub->startMallocs = GCEDD_start_mallocs;
k_stub->startTransfers = GCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 16;
k_stub->total_tasks = k_stub->kconf.gridsize.x/k_stub->kconf.coarsening ;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case SCEDD:
// t_CEDD_params *SCEDD_params;
// SCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// SCEDD_params->nRows=3072 * 2;
// SCEDD_params->nCols=4608 * 2;
// #else
// SCEDD_params->nRows=4608 * 2.6;
// SCEDD_params->nCols=4608 * 2.6;
// #endif
// *SCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// SCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// SCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// SCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
k_stub->params = (void *)CEDD_params;
k_stub->launchCKEkernel = launch_preemp_SCEDD;
k_stub->launchORIkernel = launch_orig_SCEDD;
k_stub->startKernel = SCEDD_start_kernel;
k_stub->endKernel = SCEDD_end_kernel;
k_stub->startMallocs = SCEDD_start_mallocs;
k_stub->startTransfers = SCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = (CEDD_params->gridDimX * CEDD_params->gridDimY) / 1;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case NCEDD:
// t_CEDD_params *NCEDD_params;
// NCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// NCEDD_params->nRows=3072 * 2;
// NCEDD_params->nCols=4608 * 2;
// #else
// NCEDD_params->nRows=4608 * 2.6;
// NCEDD_params->nCols=4608 * 2.6;
// #endif
// *NCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// NCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// NCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// NCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
k_stub->params = (void *)CEDD_params;
k_stub->launchCKEkernel = launch_preemp_NCEDD;
k_stub->launchORIkernel = launch_orig_NCEDD;
k_stub->startKernel = NCEDD_start_kernel;
k_stub->endKernel = NCEDD_end_kernel;
k_stub->startMallocs = NCEDD_start_mallocs;
k_stub->startTransfers = NCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case HCEDD:
// t_CEDD_params *HCEDD_params;
// HCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// HCEDD_params->nRows=3072 * 2;
// HCEDD_params->nCols=4608 * 2;
// #else
// HCEDD_params->nRows=4608 * 2.6;
// HCEDD_params->nCols=4608 * 2.6;
// #endif
// *HCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// HCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// HCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// HCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
k_stub->params = (void *)CEDD_params;
k_stub->launchCKEkernel = launch_preemp_HCEDD;
k_stub->launchORIkernel = launch_orig_HCEDD;
k_stub->startKernel = HCEDD_start_kernel;
k_stub->endKernel = HCEDD_end_kernel;
k_stub->startMallocs = HCEDD_start_mallocs;
k_stub->startTransfers = HCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case HST256:
t_HST256_params *HST256_params;
HST256_params = (t_HST256_params *)calloc(1, sizeof(t_HST256_params));
k_stub->params = (void *)HST256_params;
k_stub->launchCKEkernel = launch_preemp_HST256;
k_stub->launchORIkernel = launch_orig_HST256;
k_stub->startKernel = HST256_start_kernel;
k_stub->endKernel = HST256_end_kernel;
k_stub->startMallocs = HST256_start_mallocs;
k_stub->startTransfers = HST256_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
HST256_params->warp_count = 6;
HST256_params->histogram256_threadblock_size = HST256_params->warp_count * WARP_SIZE;
HST256_params->histogram256_threadblock_memory = HST256_params->warp_count * HISTOGRAM256_BIN_COUNT;
#ifdef DATA_SET_1
HST256_params->byteCount256 = 64 * 1048576 * HST256_params->warp_count;
#else
HST256_params->byteCount256 = 64 * 1048576 * HST256_params->warp_count;
#endif
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 192;
k_stub->kconf.gridsize.x = 240;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
//k_stub->total_tasks = (64 * 1048576)/k_stub->kconf.blocksize.x + (((64 * 1048576)%k_stub->kconf.blocksize.x==0)?0:1);
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
HST256_params->warp_count = 6;
HST256_params->histogram256_threadblock_size = HST256_params->warp_count * WARP_SIZE;
HST256_params->histogram256_threadblock_memory = HST256_params->warp_count * HISTOGRAM256_BIN_COUNT;
#ifdef DATA_SET_1
HST256_params->byteCount256 = 64 * 1048576 * HST256_params->warp_count;
#else
HST256_params->byteCount256 = 64 * 1048576 * HST256_params->warp_count;
#endif
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 10;
k_stub->kconf.blocksize.x = 192;
k_stub->kconf.gridsize.x = 240;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
//k_stub->total_tasks = (64 * 1048576)/k_stub->kconf.blocksize.x + (((64 * 1048576)%k_stub->kconf.blocksize.x==0)?0:1);
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
HST256_params->warp_count = 8;
HST256_params->histogram256_threadblock_size = HST256_params->warp_count * WARP_SIZE;
HST256_params->histogram256_threadblock_memory = HST256_params->warp_count * HISTOGRAM256_BIN_COUNT;
#ifdef DATA_SET_1
HST256_params->byteCount256 = 64 * 1089536 * 8;
#else
HST256_params->byteCount256 = 64 * 1089536 * 8 *2;
#endif
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.coarsening = 8;
//k_stub->kconf.gridsize.x = HST256_params->byteCount256 / (sizeof(uint) * k_stub->kconf.coarsening * k_stub->kconf.blocksize.x);
k_stub->kconf.gridsize.x = k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
//k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->total_tasks = HST256_params->byteCount256 / (sizeof(uint) * k_stub->kconf.blocksize.x * k_stub->kconf.coarsening);
// k_stub->total_tasks = (k_stub->kconf.gridsize.x * ((HST256_params->byteCount256 / sizeof(uint)) / (k_stub->kconf.blocksize.x * k_stub->kconf.gridsize.x))) / k_stub->kconf.coarsening;
//k_stub->total_tasks = (64 * 1048576)/k_stub->kconf.blocksize.x + (((64 * 1048576)%k_stub->kconf.blocksize.x==0)?0:1);
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
default:
printf("Unknown kernel\n");
return -1;
}
// Allocate task support on CPU memory (pinned memory)
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_state), sizeof(State) * MAX_STREAMS_PER_KERNEL, hipHostMallocDefault)); // In Pinned memory
for (int i=0; i<MAX_STREAMS_PER_KERNEL; i++)
k_stub->h_state[i] = PREP;
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_executed_tasks), sizeof(int), hipHostMallocDefault)); // In Pinned memory
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_SMs_cont), sizeof(int)*k_stub->kconf.numSMs, hipHostMallocDefault)); // In Pinned memory
// Proxy support for zero-copy
#ifdef ZEROCOPY
// Zero-copy eviction state (host process indicates eviction to proxy)
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_proxy_eviction), sizeof(int), hipHostMallocMapped));
checkCudaErrors(hipHostGetDevicePointer((void **)&(k_stub->d_proxy_eviction), (void *)(k_stub->h_proxy_eviction) , 0));
// Zero-copy: proxy send to host, when kernels finishes, the number of kernel excecuted tasks
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_exec_tasks_proxy), sizeof(int), hipHostMallocMapped));
checkCudaErrors(hipHostGetDevicePointer((void **)&(k_stub->d_exec_tasks_proxy), (void *)(k_stub->h_exec_tasks_proxy) , 0));
// Stream to launch proxy
k_stub->proxy_s = (hipStream_t *)malloc(sizeof(hipStream_t));
err = hipStreamCreate(k_stub->proxy_s);
checkCudaErrors(err);
#endif
// Allocate and initialize task support in device memory
checkCudaErrors(hipMalloc((void **)&k_stub->d_executed_tasks, sizeof(int))); // Subtask counter: kernel use it to obtain id substak
hipMemset(k_stub->d_executed_tasks, 0, sizeof(int));
checkCudaErrors(hipMalloc((void **)&k_stub->gm_state, sizeof(State) * MAX_STREAMS_PER_KERNEL)); //get the pointer to global memory position to communicate from CPU evicted state
hipMemcpy(k_stub->gm_state, k_stub->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, hipMemcpyHostToDevice);
checkCudaErrors(hipMalloc((void **)&k_stub->d_SMs_cont, sizeof(int)*k_stub->kconf.numSMs)); // create an array (one position per SM) for SMK specific support
hipMemset(k_stub->d_SMs_cont, 0, sizeof(int)*k_stub->kconf.numSMs);
*stub = k_stub;
return 0;
}
// Create stub info but alspo pass a t_params sturncture pointer from a previous kstub: It is used for kstubs fron an applications (several kernels)
int create_stubinfo_with_params(t_kernel_stub **stub, int deviceId, t_Kernel id, hipStream_t *transfer_s, hipStream_t *preemp_s, void *params)
{
hipError_t err;
t_kernel_stub *k_stub = (t_kernel_stub *)calloc(1, sizeof(t_kernel_stub)); // Create kernel stub
k_stub->deviceId = deviceId;
k_stub->id = id;
k_stub->kernel_finished = 0;
k_stub->HtD_tranfers_finished = 0;
k_stub->DtH_tranfers_finished = 0;
// Streams
hipStream_t *kernel_s, *m_transfer_s;
kernel_s = (hipStream_t *)malloc(sizeof(hipStream_t));
err = hipStreamCreate(kernel_s);
checkCudaErrors(err);
m_transfer_s = (hipStream_t *)malloc(2*sizeof(hipStream_t));
err = hipStreamCreate(&m_transfer_s[0]);
err = hipStreamCreate(&m_transfer_s[1]);
checkCudaErrors(err);
k_stub->execution_s = kernel_s;
k_stub->transfer_s = m_transfer_s;
k_stub->preemp_s = preemp_s;
/** Get device name*/
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceId);
char *device_name = deviceProp.name;
// Updating kernel info
switch (id) {
case CCONV:
{
t_CONV_params *CONV_params = (t_CONV_params *)params;
CONV_params->gridDimY[1] = CONV_params->conv_cols / (8 * 8);
k_stub->params = (void *)CONV_params;
k_stub->launchCKEkernel = launch_preemp_CCONV;
k_stub->launchORIkernel = launch_orig_CCONV;
k_stub->startKernel = CCONV_start_kernel;
k_stub->startMallocs = CCONV_start_mallocs;
k_stub->startTransfers = CCONV_start_transfers;
k_stub->endKernel = CCONV_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 12;
k_stub->kconf.max_persistent_blocks = 9;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
//k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.x = CONV_params->conv_cols /16;
k_stub->kconf.gridsize.y = CONV_params->conv_rows / ( 8 * 8);
k_stub->kconf.coarsening = 1;
k_stub->total_tasks = k_stub->kconf.gridsize.x * k_stub->kconf.gridsize.y / k_stub->kconf.coarsening ;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
}
break;
case SCEDD:
// t_CEDD_params *SCEDD_params;
// SCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// SCEDD_params->nRows=3072 * 2;
// SCEDD_params->nCols=4608 * 2;
// #else
// SCEDD_params->nRows=4608 * 2.6;
// SCEDD_params->nCols=4608 * 2.6;
// #endif
// *SCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// SCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// SCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// SCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
{
t_CEDD_params *CEDD_params = (t_CEDD_params *)params;
k_stub->params = params;
k_stub->launchCKEkernel = launch_preemp_SCEDD;
k_stub->launchORIkernel = launch_orig_SCEDD;
k_stub->startKernel = SCEDD_start_kernel;
k_stub->endKernel = SCEDD_end_kernel;
k_stub->startMallocs = SCEDD_start_mallocs;
k_stub->startTransfers = SCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = (CEDD_params->gridDimX * CEDD_params->gridDimY) / 1;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
}
break;
case NCEDD:
// t_CEDD_params *NCEDD_params;
// NCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// NCEDD_params->nRows=3072 * 2;
// NCEDD_params->nCols=4608 * 2;
// #else
// NCEDD_params->nRows=4608 * 2.6;
// NCEDD_params->nCols=4608 * 2.6;
// #endif
// *NCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// NCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// NCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// NCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
{
t_CEDD_params *CEDD_params = (t_CEDD_params *)params;
k_stub->params = params;
k_stub->launchCKEkernel = launch_preemp_NCEDD;
k_stub->launchORIkernel = launch_orig_NCEDD;
k_stub->startKernel = NCEDD_start_kernel;
k_stub->endKernel = NCEDD_end_kernel;
k_stub->startMallocs = NCEDD_start_mallocs;
k_stub->startTransfers = NCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 2;
k_stub->total_tasks = k_stub->kconf.gridsize.x/k_stub->kconf.coarsening;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
}
break;
case HCEDD:
// t_CEDD_params *HCEDD_params;
// HCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// HCEDD_params->nRows=3072 * 2;
// HCEDD_params->nCols=4608 * 2;
// #else
// HCEDD_params->nRows=4608 * 2.6;
// HCEDD_params->nCols=4608 * 2.6;
// #endif
// *HCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// HCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// HCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// HCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
{
t_CEDD_params *CEDD_params = (t_CEDD_params *)params;
k_stub->params = params;
k_stub->launchCKEkernel = launch_preemp_HCEDD;
k_stub->launchORIkernel = launch_orig_HCEDD;
k_stub->startKernel = HCEDD_start_kernel;
k_stub->endKernel = HCEDD_end_kernel;
k_stub->startMallocs = HCEDD_start_mallocs;
k_stub->startTransfers = HCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 4;
k_stub->total_tasks = k_stub->kconf.gridsize.x/k_stub->kconf.coarsening;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
}
break;
default:
printf("Unknown kernel\n");
return -1;
}
// Allocate task support on CPU memory (pinned memory)
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_state), sizeof(State) * MAX_STREAMS_PER_KERNEL, hipHostMallocDefault)); // In Pinned memory
for (int i=0; i<MAX_STREAMS_PER_KERNEL; i++)
k_stub->h_state[i] = PREP;
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_executed_tasks), sizeof(int), hipHostMallocDefault)); // In Pinned memory
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_SMs_cont), sizeof(int)*k_stub->kconf.numSMs, hipHostMallocDefault)); // In Pinned memory
// Allocate and initialize memory address calculation support in CP U memory
k_stub->num_addr_counters = 2;
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_numUniqueAddr), k_stub->num_addr_counters * sizeof(int), hipHostMallocDefault)); // In Pinn ed memory
// Proxy support for zero-copy
#ifdef ZEROCOPY
// Zero-copy eviction state (host process indicates eviction to proxy)
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_proxy_eviction), sizeof(int), hipHostMallocMapped));
checkCudaErrors(hipHostGetDevicePointer((void **)&(k_stub->d_proxy_eviction), (void *)(k_stub->h_proxy_eviction) , 0));
// Zero-copy: proxy send to host, when kernels finishes, the number of kernel excecuted tasks
checkCudaErrors(hipHostMalloc((void **)&(k_stub->h_exec_tasks_proxy), sizeof(int), hipHostMallocMapped));
checkCudaErrors(hipHostGetDevicePointer((void **)&(k_stub->d_exec_tasks_proxy), (void *)(k_stub->h_exec_tasks_proxy) , 0));
// Stream to launch proxy
k_stub->proxy_s = (hipStream_t *)malloc(sizeof(hipStream_t));
err = hipStreamCreate(k_stub->proxy_s);
checkCudaErrors(err);
#endif
// Allocate and initialize task support in device memory
checkCudaErrors(hipMalloc((void **)&k_stub->d_executed_tasks, sizeof(int))); // Subtask counter: kernel use it to obtain id substak
hipMemset(k_stub->d_executed_tasks, 0, sizeof(int));
checkCudaErrors(hipMalloc((void **)&k_stub->gm_state, sizeof(State) * MAX_STREAMS_PER_KERNEL)); //get the pointer to global memory position to communicate from CPU evicted state
hipMemcpy(k_stub->gm_state, k_stub->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, hipMemcpyHostToDevice);
checkCudaErrors(hipMalloc((void **)&k_stub->d_SMs_cont, sizeof(int)*k_stub->kconf.numSMs)); // create an array (one position per SM) for SMK specific support
hipMemset(k_stub->d_SMs_cont, 0, sizeof(int)*k_stub->kconf.numSMs);
*stub = k_stub;
return 0;
}
| 2ec860bfc1d299f44d7319b12c791ecdddca603f.cu | #include <unistd.h>
#include <string.h>
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h>
#include "elastic_kernel.h"
#include "BS/BS.h"
#include "VA/VA.h"
#include "MM/MM.h"
//#include "RSC/RSC.h"
#include "SPMV/SPMV.h"
#include "PF/PF.h"
#include "Reduction/reduction.h"
#include "FDTD3d/FDTD3dGPU.h"
//#include "Dummy/Dummy.h"
#include "CONV/CONV.h"
#include "CEDD/CEDD.h"
#include "HST/HST256.h"
//#define DATA_SET_1
int create_stubinfo(t_kernel_stub **stub, int deviceId, t_Kernel id, cudaStream_t *transfer_s, cudaStream_t *preemp_s)
{
cudaError_t err;
t_kernel_stub *k_stub = (t_kernel_stub *)calloc(1, sizeof(t_kernel_stub)); // Create kernel stub
k_stub->deviceId = deviceId;
k_stub->id = id;
k_stub->kernel_finished = 0;
k_stub->HtD_tranfers_finished = 0;
k_stub->DtH_tranfers_finished = 0;
// Streams
cudaStream_t *kernel_s, *m_transfer_s;
kernel_s = (cudaStream_t *)malloc(sizeof(cudaStream_t));
err = cudaStreamCreate(kernel_s);
checkCudaErrors(err);
m_transfer_s = (cudaStream_t *)malloc(2*sizeof(cudaStream_t));
err = cudaStreamCreate(&m_transfer_s[0]);
err = cudaStreamCreate(&m_transfer_s[1]);
checkCudaErrors(err);
k_stub->execution_s = kernel_s;
k_stub->transfer_s = m_transfer_s;
k_stub->preemp_s = preemp_s;
/** Get device name*/
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceId);
char *device_name = deviceProp.name;
// Updating kernel info
switch (id) {
case BS:
t_BS_params *BS_params;
BS_params = (t_BS_params *)calloc(1, sizeof(t_BS_params));
k_stub->params = (void *)BS_params;
k_stub->launchCKEkernel = launch_preemp_BS;
k_stub->launchORIkernel = launch_orig_BS;
k_stub->startKernel = BS_start_kernel_dummy;
k_stub->startMallocs = BS_start_mallocs;
k_stub->startTransfers = BS_start_transfers;
k_stub->endKernel = BS_end_kernel_dummy;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 128,1,1;
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.gridsize.x = 25 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
#ifdef DATA_SET_1
k_stub->kconf.gridsize.x = 25 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
#else
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
#endif
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case VA:
t_VA_params *VA_params;
VA_params = (t_VA_params *)calloc(1, sizeof(t_VA_params));
k_stub->params = (void *)VA_params;
k_stub->launchCKEkernel = launch_preemp_VA;
k_stub->launchORIkernel = launch_orig_VA;
k_stub->startKernel = VA_start_kernel_dummy;
k_stub->startMallocs = VA_start_mallocs;
k_stub->startTransfers = VA_start_transfers;
k_stub->endKernel = VA_end_kernel_dummy;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 128;
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
#ifdef DATA_SET_1
k_stub->kconf.gridsize.x = 50 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
#else
k_stub->kconf.gridsize.x = 100 * k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
#endif
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 40;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case MM:
t_MM_params *MM_params;
MM_params = (t_MM_params *)calloc(1, sizeof(t_MM_params));
#ifdef DATA_SET_1
MM_params->Asize.x=4096;MM_params->Asize.y=4096;
MM_params->Bsize.x=4096;MM_params->Bsize.y=4096;
#else
MM_params->Asize.x=2048;MM_params->Asize.y=2048;
MM_params->Bsize.x=2048;MM_params->Bsize.y=2048;
#endif
k_stub->params = (void *)MM_params;
k_stub->launchCKEkernel = launch_preemp_MM;
k_stub->launchORIkernel = launch_orig_MM;
k_stub->startKernel = MM_start_kernel;
k_stub->startMallocs = MM_start_mallocs;
k_stub->startTransfers = MM_start_transfers;
k_stub->endKernel = MM_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
MM_params->gridDimX = MM_params->Bsize.x/k_stub->kconf.blocksize.x; // Add information loss during linearization
k_stub->kconf.gridsize.x = MM_params->Bsize.x/k_stub->kconf.blocksize.x * MM_params->Asize.y/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
MM_params->gridDimX = MM_params->Bsize.x/k_stub->kconf.blocksize.x; // Add information loss during linearization
k_stub->kconf.gridsize.x = MM_params->Bsize.x/k_stub->kconf.blocksize.x * MM_params->Asize.y/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
MM_params->gridDimX = MM_params->Bsize.x/k_stub->kconf.blocksize.x; // Add information loss during linearization
k_stub->kconf.gridsize.x = MM_params->Bsize.x/k_stub->kconf.blocksize.x * MM_params->Asize.y/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
/*
case RSC_MODEL:
t_RSC_params *RSC_params;
RSC_params = (t_RSC_params *)calloc(1, sizeof(t_RSC_params));
k_stub->params = (void *)RSC_params;
k_stub->launchCKEkernel = launch_preemp_RSC_model;
k_stub->launchORIkernel = launch_orig_RSC_model;
k_stub->startKernel = RSC_model_start_kernel;
k_stub->endKernel = RSC_model_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 25; //6;
k_stub->kconf.blocksize.x = 64 ;//256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = 104;
k_stub->kconf.gridsize.y = 1;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "Gefore GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 6;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = 104; //6 bloques permanentes * 13 SMs
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
break;
case RSC_EVALUATE:
t_RSC_params *RSCE_params;
//RSCE_params = (t_RSC_params *)calloc(1, sizeof(t_RSC_params));
//k_stub->params = (void *)RSCE_params;
k_stub->launchCKEkernel = launch_preemp_RSC_evaluate;
k_stub->launchORIkernel = launch_orig_RSC_evaluate;
k_stub->startKernel = RSC_evaluate_start_kernel;
k_stub->endKernel = RSC_evaluate_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = 104;
k_stub->kconf.gridsize.y = 1;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "Gefore GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = 104;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
break;
*/
case SPMV_CSRscalar:
t_SPMV_params *SPMV_params;
SPMV_params = (t_SPMV_params *)calloc(1, sizeof(t_SPMV_params));
k_stub->params = (void *)SPMV_params;
k_stub->launchCKEkernel = launch_preemp_SPMVcsr;
k_stub->launchORIkernel = launch_orig_SPMVcsr;
k_stub->startKernel = SPMVcsr_start_kernel;
k_stub->startMallocs = SPMVcsr_start_mallocs;
k_stub->startTransfers = SPMVcsr_start_transfers;
k_stub->endKernel = SPMVcsr_end_kernel;
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 16;
//k_stub->kconf.blocksize.x = 32;
k_stub->kconf.blocksize.x = 128;
k_stub->kconf.blocksize.y = 1;
//->esto estaba antes k_stub->kconf.gridsize.x = k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks * k_stub->kconf.blocksize.x / 2;
k_stub->kconf.gridsize.x = k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks * k_stub->kconf.blocksize.x ;//One row per thread when all thread in original version
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 10;
k_stub->total_tasks = k_stub->kconf.gridsize.x * k_stub->kconf.coarsening;
}
else{
printf("Error: Unknown device\n");
return -1;
}
// Esto estaba antes --> SPMV_params->numRows = k_stub->kconf.gridsize.x * k_stub->kconf.blocksize.x * k_stub->kconf.coarsening;
SPMV_params->numRows = k_stub->total_tasks;
#ifdef DATA_SET_1
SPMV_params->nItems = (long int)SPMV_params->numRows * (long int)SPMV_params->numRows * 0.05; // 5% of entries will be non-zero
#else
SPMV_params->nItems = (long int)SPMV_params->numRows * (long int)SPMV_params->numRows * 0.00017; //
//--> ESto habia antes SPMV_params->nItems = SPMV_params->numRows * SPMV_params->numRows / 20;
#endif
SPMV_params->numNonZeroes = SPMV_params->nItems;
break;
case Reduction:
t_reduction_params *reduction_params;
reduction_params = (t_reduction_params *)calloc(1, sizeof(t_reduction_params));
k_stub->params = (void *)reduction_params;
k_stub->launchCKEkernel = launch_preemp_reduce;
k_stub->launchORIkernel = launch_orig_reduce;
k_stub->startKernel = reduce_start_kernel;
k_stub->startMallocs = reduce_start_mallocs;
k_stub->startTransfers = reduce_start_transfers;
k_stub->endKernel = reduce_end_kernel;
// reduction_params->size = 1<<24;
// reduction_params->size *= 50;
reduction_params->size = 802816000 / 2;
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.blocksize.y = 1;
k_stub->kconf.gridsize.x = reduction_params->size / (2*k_stub->kconf.blocksize.x);
//#ifdef DATA_SET_1
//k_stub->kconf.gridsize.x = 64*28*8;
//#else
//k_stub->kconf.gridsize.x = 640*28*8; // 64 * number_of_permanent_blocks, a ver que tal
//k_stub->kconf.gridsize.x = 64 * 7;
//#endif
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 2;
// k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->total_tasks = reduction_params->size / (k_stub->kconf.blocksize.x * 2 * k_stub->kconf.coarsening);
}
else{
printf("Error: Unknown device\n");
return -1;
}
break;
/*case FDTD3d:
k_stub->launchCKEkernel = launch_preemp_FDTD3d;
k_stub->launchORIkernel = launch_orig_FDTD3d;
k_stub->startKernel = FDTD3d_start_kernel;
k_stub->startMallocs = FDTD3d_start_mallocs;
k_stub->startTransfers = FDTD3d_start_transfers;
k_stub->endKernel = FDTD3d_end_kernel;
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 1; // Needs many registers by thread
k_stub->kconf.blocksize.x = 32;
k_stub->kconf.blocksize.y = 16;
k_stub->kconf.gridsize.x = 12; // Original is 12
k_stub->kconf.gridsize.y = 24; // Original is 24
k_stub->total_tasks = k_stub->kconf.gridsize.x*k_stub->kconf.gridsize.y;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
break;
*/
case PF:
t_PF_params *PF_params;
PF_params = (t_PF_params *)calloc(1, sizeof(t_PF_params));
#ifdef DATA_SET_1
PF_params->nRows = 500;
PF_params->nCols = 6000;
#else
PF_params->nRows = 500;
PF_params->nCols = 30000;
#endif
PF_params->param_pyramid_height = 126;
k_stub->params = (void *)PF_params;
k_stub->launchCKEkernel = launch_preemp_PF;
k_stub->launchORIkernel = launch_orig_PF;
k_stub->startKernel = PF_start_kernel;
k_stub->startMallocs = PF_start_mallocs;
k_stub->startTransfers = PF_start_transfers;
k_stub->endKernel = PF_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
int smallBlockCol = k_stub->kconf.blocksize.x-(PF_params->param_pyramid_height)*2;
k_stub->kconf.gridsize.x = PF_params->nCols/smallBlockCol+((PF_params->nCols%smallBlockCol==0)?0:1);
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
int smallBlockCol = k_stub->kconf.blocksize.x-(PF_params->param_pyramid_height)*2;
k_stub->kconf.gridsize.x = PF_params->nCols/smallBlockCol+((PF_params->nCols%smallBlockCol==0)?0:1);
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
int smallBlockCol = k_stub->kconf.blocksize.x-(PF_params->param_pyramid_height)*2;
k_stub->kconf.gridsize.x = PF_params->nCols/smallBlockCol+((PF_params->nCols%smallBlockCol==0)?0:1);
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case RCONV:
t_CONV_params *CONV_params;
CONV_params = (t_CONV_params *)calloc(1, sizeof(t_CONV_params));
#ifdef DATA_SET_1
CONV_params->conv_rows=6144;
CONV_params->conv_cols=6144;
#else
CONV_params->conv_rows=8192;//16384;
CONV_params->conv_cols=8192; //16384;
#endif
k_stub->params = (void *)CONV_params;
k_stub->launchCKEkernel = launch_preemp_RCONV;
k_stub->launchORIkernel = launch_orig_RCONV;
k_stub->startKernel = RCONV_start_kernel;
k_stub->startMallocs = RCONV_start_mallocs;
k_stub->startTransfers = RCONV_start_transfers;
k_stub->endKernel = RCONV_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
//RCONV
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 4;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / (8 * 16)) * (CONV_params->conv_cols / 4);
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
//RCONV
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 32;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 4;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / (8 * 16)) * (CONV_params->conv_cols / 4);
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
//RCONV
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 32;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 4;
/*
CONV_params->gridDimX[0] = CONV_params->conv_cols / k_stub->kconf.blocksize.x;
CONV_params->gridDimY[0] = CONV_params->conv_cols / k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / (8 * 16)) * (CONV_params->conv_cols / 4);
k_stub->kconf.gridsize.y = 1; //Grid Linearization*/
k_stub->kconf.gridsize.x = CONV_params->conv_cols / (8 * k_stub->kconf.blocksize.x );
k_stub->kconf.gridsize.y = CONV_params->conv_rows / k_stub->kconf.blocksize.y;
k_stub->kconf.coarsening = 1;
k_stub->total_tasks = (k_stub->kconf.gridsize.x * k_stub->kconf.gridsize.y)/k_stub->kconf.coarsening;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case CCONV:
/*t_CONV_params *CONV_params;
CONV_params = (t_CONV_params *)calloc(1, sizeof(t_CONV_params));
#ifdef DATA_SEnT_1
CONV_params->conv_rows=6144;
CONV_params->conv_cols=6144;
#else
CONV_params->conv_rows=18048;
CONV_params->conv_cols=18048;
#endif*/
CONV_params->gridDimY[1] = CONV_params->conv_cols / (8 * 8);
k_stub->params = (void *)CONV_params;
k_stub->launchCKEkernel = launch_preemp_CCONV;
k_stub->launchORIkernel = launch_orig_CCONV;
k_stub->startKernel = CCONV_start_kernel;
k_stub->startMallocs = CCONV_start_mallocs;
k_stub->startTransfers = CCONV_start_transfers;
k_stub->endKernel = CCONV_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 9;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
/*case Dummy:
k_stub->launchCKEkernel = launch_preemp_dummy;
k_stub->launchORIkernel = launch_orig_dummy;
k_stub->startKernel = dummy_start_kernel;
k_stub->startMallocs = dummy_start_mallocs;
k_stub->startTransfers = dummy_start_transfers;
k_stub->endKernel = dummy_end_kernel;
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
k_stub->kconf.gridsize.x = 64;
k_stub->kconf.gridsize.y = 64;
k_stub->total_tasks = k_stub->kconf.gridsize.x*k_stub->kconf.gridsize.y;
k_stub->kconf.coarsening = 5000;
}
else{
printf("Error: Unknown device\n");
return -1;
}
break;
*/
case GCEDD:
t_CEDD_params *CEDD_params;
CEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
#ifdef DATA_SET_1
CEDD_params->nRows=3072 * 2;
CEDD_params->nCols=4608 * 2;
#else
CEDD_params->nRows=4608 * 2.6;
CEDD_params->nCols=4608 * 2.6;
#endif
k_stub->params = (void *)CEDD_params;
k_stub->launchCKEkernel = launch_preemp_GCEDD;
k_stub->launchORIkernel = launch_orig_GCEDD;
k_stub->startKernel = GCEDD_start_kernel;
k_stub->endKernel = GCEDD_end_kernel;
k_stub->startMallocs = GCEDD_start_mallocs;
k_stub->startTransfers = GCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 16;
k_stub->total_tasks = k_stub->kconf.gridsize.x/k_stub->kconf.coarsening ;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case SCEDD:
// t_CEDD_params *SCEDD_params;
// SCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// SCEDD_params->nRows=3072 * 2;
// SCEDD_params->nCols=4608 * 2;
// #else
// SCEDD_params->nRows=4608 * 2.6;
// SCEDD_params->nCols=4608 * 2.6;
// #endif
// *SCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// SCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// SCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// SCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
k_stub->params = (void *)CEDD_params;
k_stub->launchCKEkernel = launch_preemp_SCEDD;
k_stub->launchORIkernel = launch_orig_SCEDD;
k_stub->startKernel = SCEDD_start_kernel;
k_stub->endKernel = SCEDD_end_kernel;
k_stub->startMallocs = SCEDD_start_mallocs;
k_stub->startTransfers = SCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = (CEDD_params->gridDimX * CEDD_params->gridDimY) / 1;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case NCEDD:
// t_CEDD_params *NCEDD_params;
// NCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// NCEDD_params->nRows=3072 * 2;
// NCEDD_params->nCols=4608 * 2;
// #else
// NCEDD_params->nRows=4608 * 2.6;
// NCEDD_params->nCols=4608 * 2.6;
// #endif
// *NCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// NCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// NCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// NCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
k_stub->params = (void *)CEDD_params;
k_stub->launchCKEkernel = launch_preemp_NCEDD;
k_stub->launchORIkernel = launch_orig_NCEDD;
k_stub->startKernel = NCEDD_start_kernel;
k_stub->endKernel = NCEDD_end_kernel;
k_stub->startMallocs = NCEDD_start_mallocs;
k_stub->startTransfers = NCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case HCEDD:
// t_CEDD_params *HCEDD_params;
// HCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// HCEDD_params->nRows=3072 * 2;
// HCEDD_params->nCols=4608 * 2;
// #else
// HCEDD_params->nRows=4608 * 2.6;
// HCEDD_params->nCols=4608 * 2.6;
// #endif
// *HCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// HCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// HCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// HCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
k_stub->params = (void *)CEDD_params;
k_stub->launchCKEkernel = launch_preemp_HCEDD;
k_stub->launchORIkernel = launch_orig_HCEDD;
k_stub->startKernel = HCEDD_start_kernel;
k_stub->endKernel = HCEDD_end_kernel;
k_stub->startMallocs = HCEDD_start_mallocs;
k_stub->startTransfers = HCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
case HST256:
t_HST256_params *HST256_params;
HST256_params = (t_HST256_params *)calloc(1, sizeof(t_HST256_params));
k_stub->params = (void *)HST256_params;
k_stub->launchCKEkernel = launch_preemp_HST256;
k_stub->launchORIkernel = launch_orig_HST256;
k_stub->startKernel = HST256_start_kernel;
k_stub->endKernel = HST256_end_kernel;
k_stub->startMallocs = HST256_start_mallocs;
k_stub->startTransfers = HST256_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
HST256_params->warp_count = 6;
HST256_params->histogram256_threadblock_size = HST256_params->warp_count * WARP_SIZE;
HST256_params->histogram256_threadblock_memory = HST256_params->warp_count * HISTOGRAM256_BIN_COUNT;
#ifdef DATA_SET_1
HST256_params->byteCount256 = 64 * 1048576 * HST256_params->warp_count;
#else
HST256_params->byteCount256 = 64 * 1048576 * HST256_params->warp_count;
#endif
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 192;
k_stub->kconf.gridsize.x = 240;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
//k_stub->total_tasks = (64 * 1048576)/k_stub->kconf.blocksize.x + (((64 * 1048576)%k_stub->kconf.blocksize.x==0)?0:1);
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
HST256_params->warp_count = 6;
HST256_params->histogram256_threadblock_size = HST256_params->warp_count * WARP_SIZE;
HST256_params->histogram256_threadblock_memory = HST256_params->warp_count * HISTOGRAM256_BIN_COUNT;
#ifdef DATA_SET_1
HST256_params->byteCount256 = 64 * 1048576 * HST256_params->warp_count;
#else
HST256_params->byteCount256 = 64 * 1048576 * HST256_params->warp_count;
#endif
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 10;
k_stub->kconf.blocksize.x = 192;
k_stub->kconf.gridsize.x = 240;
k_stub->total_tasks = k_stub->kconf.gridsize.x;
//k_stub->total_tasks = (64 * 1048576)/k_stub->kconf.blocksize.x + (((64 * 1048576)%k_stub->kconf.blocksize.x==0)?0:1);
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
HST256_params->warp_count = 8;
HST256_params->histogram256_threadblock_size = HST256_params->warp_count * WARP_SIZE;
HST256_params->histogram256_threadblock_memory = HST256_params->warp_count * HISTOGRAM256_BIN_COUNT;
#ifdef DATA_SET_1
HST256_params->byteCount256 = 64 * 1089536 * 8;
#else
HST256_params->byteCount256 = 64 * 1089536 * 8 *2;
#endif
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 256;
k_stub->kconf.coarsening = 8;
//k_stub->kconf.gridsize.x = HST256_params->byteCount256 / (sizeof(uint) * k_stub->kconf.coarsening * k_stub->kconf.blocksize.x);
k_stub->kconf.gridsize.x = k_stub->kconf.numSMs * k_stub->kconf.max_persistent_blocks;
//k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->total_tasks = HST256_params->byteCount256 / (sizeof(uint) * k_stub->kconf.blocksize.x * k_stub->kconf.coarsening);
// k_stub->total_tasks = (k_stub->kconf.gridsize.x * ((HST256_params->byteCount256 / sizeof(uint)) / (k_stub->kconf.blocksize.x * k_stub->kconf.gridsize.x))) / k_stub->kconf.coarsening;
//k_stub->total_tasks = (64 * 1048576)/k_stub->kconf.blocksize.x + (((64 * 1048576)%k_stub->kconf.blocksize.x==0)?0:1);
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
break;
default:
printf("Unknown kernel\n");
return -1;
}
// Allocate task support on CPU memory (pinned memory)
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_state), sizeof(State) * MAX_STREAMS_PER_KERNEL, cudaHostAllocDefault)); // In Pinned memory
for (int i=0; i<MAX_STREAMS_PER_KERNEL; i++)
k_stub->h_state[i] = PREP;
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_executed_tasks), sizeof(int), cudaHostAllocDefault)); // In Pinned memory
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_SMs_cont), sizeof(int)*k_stub->kconf.numSMs, cudaHostAllocDefault)); // In Pinned memory
// Proxy support for zero-copy
#ifdef ZEROCOPY
// Zero-copy eviction state (host process indicates eviction to proxy)
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_proxy_eviction), sizeof(int), cudaHostAllocMapped));
checkCudaErrors(cudaHostGetDevicePointer((void **)&(k_stub->d_proxy_eviction), (void *)(k_stub->h_proxy_eviction) , 0));
// Zero-copy: proxy send to host, when kernels finishes, the number of kernel excecuted tasks
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_exec_tasks_proxy), sizeof(int), cudaHostAllocMapped));
checkCudaErrors(cudaHostGetDevicePointer((void **)&(k_stub->d_exec_tasks_proxy), (void *)(k_stub->h_exec_tasks_proxy) , 0));
// Stream to launch proxy
k_stub->proxy_s = (cudaStream_t *)malloc(sizeof(cudaStream_t));
err = cudaStreamCreate(k_stub->proxy_s);
checkCudaErrors(err);
#endif
// Allocate and initialize task support in device memory
checkCudaErrors(cudaMalloc((void **)&k_stub->d_executed_tasks, sizeof(int))); // Subtask counter: kernel use it to obtain id substak
cudaMemset(k_stub->d_executed_tasks, 0, sizeof(int));
checkCudaErrors(cudaMalloc((void **)&k_stub->gm_state, sizeof(State) * MAX_STREAMS_PER_KERNEL)); //get the pointer to global memory position to communicate from CPU evicted state
cudaMemcpy(k_stub->gm_state, k_stub->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, cudaMemcpyHostToDevice);
checkCudaErrors(cudaMalloc((void **)&k_stub->d_SMs_cont, sizeof(int)*k_stub->kconf.numSMs)); // create an array (one position per SM) for SMK specific support
cudaMemset(k_stub->d_SMs_cont, 0, sizeof(int)*k_stub->kconf.numSMs);
*stub = k_stub;
return 0;
}
// Create stub info but alspo pass a t_params sturncture pointer from a previous kstub: It is used for kstubs fron an applications (several kernels)
int create_stubinfo_with_params(t_kernel_stub **stub, int deviceId, t_Kernel id, cudaStream_t *transfer_s, cudaStream_t *preemp_s, void *params)
{
cudaError_t err;
t_kernel_stub *k_stub = (t_kernel_stub *)calloc(1, sizeof(t_kernel_stub)); // Create kernel stub
k_stub->deviceId = deviceId;
k_stub->id = id;
k_stub->kernel_finished = 0;
k_stub->HtD_tranfers_finished = 0;
k_stub->DtH_tranfers_finished = 0;
// Streams
cudaStream_t *kernel_s, *m_transfer_s;
kernel_s = (cudaStream_t *)malloc(sizeof(cudaStream_t));
err = cudaStreamCreate(kernel_s);
checkCudaErrors(err);
m_transfer_s = (cudaStream_t *)malloc(2*sizeof(cudaStream_t));
err = cudaStreamCreate(&m_transfer_s[0]);
err = cudaStreamCreate(&m_transfer_s[1]);
checkCudaErrors(err);
k_stub->execution_s = kernel_s;
k_stub->transfer_s = m_transfer_s;
k_stub->preemp_s = preemp_s;
/** Get device name*/
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceId);
char *device_name = deviceProp.name;
// Updating kernel info
switch (id) {
case CCONV:
{
t_CONV_params *CONV_params = (t_CONV_params *)params;
CONV_params->gridDimY[1] = CONV_params->conv_cols / (8 * 8);
k_stub->params = (void *)CONV_params;
k_stub->launchCKEkernel = launch_preemp_CCONV;
k_stub->launchORIkernel = launch_orig_CCONV;
k_stub->startKernel = CCONV_start_kernel;
k_stub->startMallocs = CCONV_start_mallocs;
k_stub->startTransfers = CCONV_start_transfers;
k_stub->endKernel = CCONV_end_kernel;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 12;
k_stub->kconf.max_persistent_blocks = 9;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 16;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 8;
//k_stub->kconf.gridsize.x = (CONV_params->conv_rows / 16) * (CONV_params->conv_cols / (8 * 8));
k_stub->kconf.gridsize.x = CONV_params->conv_cols /16;
k_stub->kconf.gridsize.y = CONV_params->conv_rows / ( 8 * 8);
k_stub->kconf.coarsening = 1;
k_stub->total_tasks = k_stub->kconf.gridsize.x * k_stub->kconf.gridsize.y / k_stub->kconf.coarsening ;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
}
break;
case SCEDD:
// t_CEDD_params *SCEDD_params;
// SCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// SCEDD_params->nRows=3072 * 2;
// SCEDD_params->nCols=4608 * 2;
// #else
// SCEDD_params->nRows=4608 * 2.6;
// SCEDD_params->nCols=4608 * 2.6;
// #endif
// *SCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// SCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// SCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// SCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
{
t_CEDD_params *CEDD_params = (t_CEDD_params *)params;
k_stub->params = params;
k_stub->launchCKEkernel = launch_preemp_SCEDD;
k_stub->launchORIkernel = launch_orig_SCEDD;
k_stub->startKernel = SCEDD_start_kernel;
k_stub->endKernel = SCEDD_end_kernel;
k_stub->startMallocs = SCEDD_start_mallocs;
k_stub->startTransfers = SCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = (CEDD_params->gridDimX * CEDD_params->gridDimY) / 1;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
}
break;
case NCEDD:
// t_CEDD_params *NCEDD_params;
// NCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// NCEDD_params->nRows=3072 * 2;
// NCEDD_params->nCols=4608 * 2;
// #else
// NCEDD_params->nRows=4608 * 2.6;
// NCEDD_params->nCols=4608 * 2.6;
// #endif
// *NCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// NCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// NCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// NCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
{
t_CEDD_params *CEDD_params = (t_CEDD_params *)params;
k_stub->params = params;
k_stub->launchCKEkernel = launch_preemp_NCEDD;
k_stub->launchORIkernel = launch_orig_NCEDD;
k_stub->startKernel = NCEDD_start_kernel;
k_stub->endKernel = NCEDD_end_kernel;
k_stub->startMallocs = NCEDD_start_mallocs;
k_stub->startTransfers = NCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 2;
k_stub->total_tasks = k_stub->kconf.gridsize.x/k_stub->kconf.coarsening;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
}
break;
case HCEDD:
// t_CEDD_params *HCEDD_params;
// HCEDD_params = (t_CEDD_params *)calloc(1, sizeof(t_CEDD_params));
// #ifdef DATA_SET_1
// HCEDD_params->nRows=3072 * 2;
// HCEDD_params->nCols=4608 * 2;
// #else
// HCEDD_params->nRows=4608 * 2.6;
// HCEDD_params->nCols=4608 * 2.6;
// #endif
// *HCEDD_params->h_in_out = *GCEDD_params->h_in_out;
// HCEDD_params->data_CEDD = GCEDD_params->data_CEDD;
// HCEDD_params->out_CEDD = GCEDD_params->out_CEDD;
// HCEDD_params->theta_CEDD = GCEDD_params->theta_CEDD;
{
t_CEDD_params *CEDD_params = (t_CEDD_params *)params;
k_stub->params = params;
k_stub->launchCKEkernel = launch_preemp_HCEDD;
k_stub->launchORIkernel = launch_orig_HCEDD;
k_stub->startKernel = HCEDD_start_kernel;
k_stub->endKernel = HCEDD_end_kernel;
k_stub->startMallocs = HCEDD_start_mallocs;
k_stub->startTransfers = HCEDD_start_transfers;
if (strcmp(device_name, "Tesla K20c") == 0) {
k_stub->kconf.numSMs = 13;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else {
if (strcmp(device_name, "GeForce GTX 980") == 0) {
k_stub->kconf.numSMs = 16;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->total_tasks = k_stub->kconf.gridsize.x;
k_stub->kconf.coarsening = 1;
}
else{
if (strcmp(device_name, "TITAN X (Pascal)") == 0) {
k_stub->kconf.numSMs = 28;
k_stub->kconf.max_persistent_blocks = 8;
k_stub->kconf.blocksize.x = 16;
k_stub->kconf.blocksize.y = 16;
CEDD_params->gridDimX = (CEDD_params->nCols - 2)/k_stub->kconf.blocksize.x; // Add information loss during linearization
CEDD_params->gridDimY = (CEDD_params->nRows - 2)/k_stub->kconf.blocksize.y;
k_stub->kconf.gridsize.x = CEDD_params->gridDimX * CEDD_params->gridDimY;
k_stub->kconf.gridsize.y = 1; //Grid Linearization
k_stub->kconf.coarsening = 4;
k_stub->total_tasks = k_stub->kconf.gridsize.x/k_stub->kconf.coarsening;
}
else{
printf("Error: Unknown device\n");
return -1;
}
}
}
}
break;
default:
printf("Unknown kernel\n");
return -1;
}
// Allocate task support on CPU memory (pinned memory)
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_state), sizeof(State) * MAX_STREAMS_PER_KERNEL, cudaHostAllocDefault)); // In Pinned memory
for (int i=0; i<MAX_STREAMS_PER_KERNEL; i++)
k_stub->h_state[i] = PREP;
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_executed_tasks), sizeof(int), cudaHostAllocDefault)); // In Pinned memory
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_SMs_cont), sizeof(int)*k_stub->kconf.numSMs, cudaHostAllocDefault)); // In Pinned memory
// Allocate and initialize memory address calculation support in CP U memory
k_stub->num_addr_counters = 2;
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_numUniqueAddr), k_stub->num_addr_counters * sizeof(int), cudaHostAllocDefault)); // In Pinn ed memory
// Proxy support for zero-copy
#ifdef ZEROCOPY
// Zero-copy eviction state (host process indicates eviction to proxy)
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_proxy_eviction), sizeof(int), cudaHostAllocMapped));
checkCudaErrors(cudaHostGetDevicePointer((void **)&(k_stub->d_proxy_eviction), (void *)(k_stub->h_proxy_eviction) , 0));
// Zero-copy: proxy send to host, when kernels finishes, the number of kernel excecuted tasks
checkCudaErrors(cudaHostAlloc((void **)&(k_stub->h_exec_tasks_proxy), sizeof(int), cudaHostAllocMapped));
checkCudaErrors(cudaHostGetDevicePointer((void **)&(k_stub->d_exec_tasks_proxy), (void *)(k_stub->h_exec_tasks_proxy) , 0));
// Stream to launch proxy
k_stub->proxy_s = (cudaStream_t *)malloc(sizeof(cudaStream_t));
err = cudaStreamCreate(k_stub->proxy_s);
checkCudaErrors(err);
#endif
// Allocate and initialize task support in device memory
checkCudaErrors(cudaMalloc((void **)&k_stub->d_executed_tasks, sizeof(int))); // Subtask counter: kernel use it to obtain id substak
cudaMemset(k_stub->d_executed_tasks, 0, sizeof(int));
checkCudaErrors(cudaMalloc((void **)&k_stub->gm_state, sizeof(State) * MAX_STREAMS_PER_KERNEL)); //get the pointer to global memory position to communicate from CPU evicted state
cudaMemcpy(k_stub->gm_state, k_stub->h_state, sizeof(State) * MAX_STREAMS_PER_KERNEL, cudaMemcpyHostToDevice);
checkCudaErrors(cudaMalloc((void **)&k_stub->d_SMs_cont, sizeof(int)*k_stub->kconf.numSMs)); // create an array (one position per SM) for SMK specific support
cudaMemset(k_stub->d_SMs_cont, 0, sizeof(int)*k_stub->kconf.numSMs);
*stub = k_stub;
return 0;
}
|
050de765558c26b3add4ad766225579b8ef5b316.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "psamask_collect_backward_cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nthreads = 1;
const int feature_H_ = 1;
const int feature_W_ = 1;
const int mask_H_ = 1;
const int mask_W_ = 1;
const int half_mask_H_ = 1;
const int half_mask_W_ = 1;
const float *buffer_diff = NULL;
hipMalloc(&buffer_diff, XSIZE*YSIZE);
float *mask_diff = NULL;
hipMalloc(&mask_diff, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
psamask_collect_backward_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,feature_H_,feature_W_,mask_H_,mask_W_,half_mask_H_,half_mask_W_,buffer_diff,mask_diff);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
psamask_collect_backward_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,feature_H_,feature_W_,mask_H_,mask_W_,half_mask_H_,half_mask_W_,buffer_diff,mask_diff);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
psamask_collect_backward_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,feature_H_,feature_W_,mask_H_,mask_W_,half_mask_H_,half_mask_W_,buffer_diff,mask_diff);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 050de765558c26b3add4ad766225579b8ef5b316.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "psamask_collect_backward_cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nthreads = 1;
const int feature_H_ = 1;
const int feature_W_ = 1;
const int mask_H_ = 1;
const int mask_W_ = 1;
const int half_mask_H_ = 1;
const int half_mask_W_ = 1;
const float *buffer_diff = NULL;
cudaMalloc(&buffer_diff, XSIZE*YSIZE);
float *mask_diff = NULL;
cudaMalloc(&mask_diff, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
psamask_collect_backward_cuda<<<gridBlock,threadBlock>>>(nthreads,feature_H_,feature_W_,mask_H_,mask_W_,half_mask_H_,half_mask_W_,buffer_diff,mask_diff);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
psamask_collect_backward_cuda<<<gridBlock,threadBlock>>>(nthreads,feature_H_,feature_W_,mask_H_,mask_W_,half_mask_H_,half_mask_W_,buffer_diff,mask_diff);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
psamask_collect_backward_cuda<<<gridBlock,threadBlock>>>(nthreads,feature_H_,feature_W_,mask_H_,mask_W_,half_mask_H_,half_mask_W_,buffer_diff,mask_diff);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7f4ec9475160ffef37b61cc296b738de840e4906.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include<iostream>
#include <hip/hip_runtime.h>
#include <time.h>
#include <sys/time.h>
using namespace std;
//## KERNEL FOR VECTOR ADDITION IN 1 THREAD ##//
extern double mysecond();
void init_array(float *a, const int N);
void init_mat(float *a, const int N, const int M);
void print_array(float *a, const int N, char *d);
void print_mat(float *a, const int N, const int M, char *d);
int main (void) {
float *a, *b, *c, *d;
float *dev_a, *dev_b, *dev_c;
double t;
int N = 32768;
int M = N;
// Allocate host memory
a = (float*)malloc(sizeof(float)*N);
b = (float*)malloc(sizeof(float)*N*M);
c = (float*)malloc(sizeof(float)*M);
d = (float*)malloc(sizeof(float)*M);
// Initialize matrices
init_array(a, N);
init_mat(b, N, M);
init_array(c, M);
//## ALLOCATE MEMORY FOR VARIABLES IN DEVICE ##//
t = mysecond();
//## TRANSFER DATA FROM HOST TO DEVICE ##//
t = (mysecond() - t);
printf ("\nElapsed time for copy from host to device = %g\n", t );
t = mysecond();
// matrix vector product
hipLaunchKernelGGL(( matvec), dim3(1), dim3(1), 0, 0, dev_a, dev_b, dev_c, N, M);
hipDeviceSynchronize();
t = (mysecond() - t);
printf ("\nElapsed time for matrix vector product in 1 thread = %g\n", t );
t = mysecond();
// Transfer data from device to host memory
hipMemcpy(c, dev_c, sizeof(float)*M, hipMemcpyDeviceToHost);
t = (mysecond() - t);
printf ("\nElapsed time for copy from device to host = %g\n", t );
// verify the kernel implementation
float sum = 0;
for(int row = 0; row < N; row++)
{
sum = 0;
for(int col = 0; col < N; col++)
{
sum = sum + b[row*N + col]*a[col];
}
d[row] = sum;
}
float error = 0;
for(int i = 0; i < N; i++)
error += d[i] - c[i];
printf ("\nError = %g\n", error );
//## DEALLOCATE HOST AND DEVICE MEMORY ##//
return 0;
};
void init_array(float *a, const int N) {
int i;
for(i = 0; i < N; i++)
a[i] = 1.0;
}
void init_mat(float *a, const int N, const int M) {
int i, j;
for(i = 0; i < N; i++)
for(j = 0; j < M; j++)
a[i*M + j] = 2.0;
}
void print_array(float *a, const int N, char *d) {
int i;
for(i = 0; i < N; i++)
printf("\n%s[%d]: %f",d, i, a[i]);
printf("\n");
}
void print_mat(float *a, const int N, const int M, char *d) {
int i, j;
for(i = 0; i < N; i++){
printf("\n%s[%d]:", d, i);
for (j = 0; j < M; j++)
printf("\t%6.4f", a[i*M + j]);
}
printf("\n");
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
| 7f4ec9475160ffef37b61cc296b738de840e4906.cu | #include <stdio.h>
#include<iostream>
#include <cuda.h>
#include <time.h>
#include <sys/time.h>
using namespace std;
//## KERNEL FOR VECTOR ADDITION IN 1 THREAD ##//
extern double mysecond();
void init_array(float *a, const int N);
void init_mat(float *a, const int N, const int M);
void print_array(float *a, const int N, char *d);
void print_mat(float *a, const int N, const int M, char *d);
int main (void) {
float *a, *b, *c, *d;
float *dev_a, *dev_b, *dev_c;
double t;
int N = 32768;
int M = N;
// Allocate host memory
a = (float*)malloc(sizeof(float)*N);
b = (float*)malloc(sizeof(float)*N*M);
c = (float*)malloc(sizeof(float)*M);
d = (float*)malloc(sizeof(float)*M);
// Initialize matrices
init_array(a, N);
init_mat(b, N, M);
init_array(c, M);
//## ALLOCATE MEMORY FOR VARIABLES IN DEVICE ##//
t = mysecond();
//## TRANSFER DATA FROM HOST TO DEVICE ##//
t = (mysecond() - t);
printf ("\nElapsed time for copy from host to device = %g\n", t );
t = mysecond();
// matrix vector product
matvec<<<1, 1>>>(dev_a, dev_b, dev_c, N, M);
cudaDeviceSynchronize();
t = (mysecond() - t);
printf ("\nElapsed time for matrix vector product in 1 thread = %g\n", t );
t = mysecond();
// Transfer data from device to host memory
cudaMemcpy(c, dev_c, sizeof(float)*M, cudaMemcpyDeviceToHost);
t = (mysecond() - t);
printf ("\nElapsed time for copy from device to host = %g\n", t );
// verify the kernel implementation
float sum = 0;
for(int row = 0; row < N; row++)
{
sum = 0;
for(int col = 0; col < N; col++)
{
sum = sum + b[row*N + col]*a[col];
}
d[row] = sum;
}
float error = 0;
for(int i = 0; i < N; i++)
error += d[i] - c[i];
printf ("\nError = %g\n", error );
//## DEALLOCATE HOST AND DEVICE MEMORY ##//
return 0;
};
void init_array(float *a, const int N) {
int i;
for(i = 0; i < N; i++)
a[i] = 1.0;
}
void init_mat(float *a, const int N, const int M) {
int i, j;
for(i = 0; i < N; i++)
for(j = 0; j < M; j++)
a[i*M + j] = 2.0;
}
void print_array(float *a, const int N, char *d) {
int i;
for(i = 0; i < N; i++)
printf("\n%s[%d]: %f",d, i, a[i]);
printf("\n");
}
void print_mat(float *a, const int N, const int M, char *d) {
int i, j;
for(i = 0; i < N; i++){
printf("\n%s[%d]:", d, i);
for (j = 0; j < M; j++)
printf("\t%6.4f", a[i*M + j]);
}
printf("\n");
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
|
b481764df3ca5cdd30e75a7ecb769db82671481b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n, float* x, float* y){ //__global__ establishes device to managed by CUDA
for (int i = 0; i < n; i++) {
y[i] = x[i] + y[i];
}
}
int main()
{
int N = 1 << 20;
float*x, *y;
hipMallocManaged(&x, N * sizeof(float)); //allocate memory on GPU
hipMallocManaged(&y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
hipDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
cout << "Max error:" << maxError << endl;
delete[] x;
delete[] y;
return 0;
}
| b481764df3ca5cdd30e75a7ecb769db82671481b.cu | #include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n, float* x, float* y){ //__global__ establishes device to managed by CUDA
for (int i = 0; i < n; i++) {
y[i] = x[i] + y[i];
}
}
int main()
{
int N = 1 << 20;
float*x, *y;
cudaMallocManaged(&x, N * sizeof(float)); //allocate memory on GPU
cudaMallocManaged(&y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
add<<<1, 1>>>(N, x, y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
cout << "Max error:" << maxError << endl;
delete[] x;
delete[] y;
return 0;
}
|
d0cc7ce5d748205041042f23afe8b3e09dd24238.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define SIZE (100*1024*1024)
__global__ void histo_kernel( unsigned char *buffer,
long size,
unsigned int *histo ) {
// chaque thread initialise une case du tableau
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
// calculate the starting index and the offset to the next
// block that each thread will be processing
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd( &temp[buffer[i]], 1 );
i += stride;
}
//A la fin des threads, chacun consolide dans la mmoire globale une case du tableau
__syncthreads();
atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] );
}
int main( void ) {
unsigned char *buffer =
(unsigned char*)big_random_block( SIZE );
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
// allocate memory on the GPU for the file's data
unsigned char *dev_buffer;
unsigned int *dev_histo;
hipMalloc( (void**)&dev_buffer, SIZE );
hipMemcpy( dev_buffer, buffer, SIZE,
hipMemcpyHostToDevice );
hipMalloc( (void**)&dev_histo,
256 * sizeof( int ) );
hipMemset( dev_histo, 0,
256 * sizeof( int ) );
// kernel launch - 2x the number of mps gave best timing
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, 0 );
int blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histo_kernel), dim3(blocks*2),dim3(256), 0, 0, dev_buffer,
SIZE, dev_histo );
unsigned int histo[256];
hipMemcpy( histo, dev_histo,
256 * sizeof( int ),
hipMemcpyDeviceToHost );
// get stop time, and display the timing results
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime,
start, stop );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
long histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld\n", histoCount );
// verify that we have the same counts via CPU
for (int i=0; i<SIZE; i++)
histo[buffer[i]]--;
for (int i=0; i<256; i++) {
if (histo[i] != 0)
printf( "Failure at %d!\n", i );
}
hipEventDestroy( start );
hipEventDestroy( stop );
hipFree( dev_histo );
hipFree( dev_buffer );
free( buffer );
return 0;
}
| d0cc7ce5d748205041042f23afe8b3e09dd24238.cu | #include <stdio.h>
#define SIZE (100*1024*1024)
__global__ void histo_kernel( unsigned char *buffer,
long size,
unsigned int *histo ) {
// chaque thread initialise une case du tableau
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
// calculate the starting index and the offset to the next
// block that each thread will be processing
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd( &temp[buffer[i]], 1 );
i += stride;
}
//A la fin des threads, chacun consolide dans la mémoire globale une case du tableau
__syncthreads();
atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] );
}
int main( void ) {
unsigned char *buffer =
(unsigned char*)big_random_block( SIZE );
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
// allocate memory on the GPU for the file's data
unsigned char *dev_buffer;
unsigned int *dev_histo;
cudaMalloc( (void**)&dev_buffer, SIZE );
cudaMemcpy( dev_buffer, buffer, SIZE,
cudaMemcpyHostToDevice );
cudaMalloc( (void**)&dev_histo,
256 * sizeof( int ) );
cudaMemset( dev_histo, 0,
256 * sizeof( int ) );
// kernel launch - 2x the number of mps gave best timing
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
int blocks = prop.multiProcessorCount;
histo_kernel<<<blocks*2,256>>>( dev_buffer,
SIZE, dev_histo );
unsigned int histo[256];
cudaMemcpy( histo, dev_histo,
256 * sizeof( int ),
cudaMemcpyDeviceToHost );
// get stop time, and display the timing results
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime,
start, stop );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
long histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld\n", histoCount );
// verify that we have the same counts via CPU
for (int i=0; i<SIZE; i++)
histo[buffer[i]]--;
for (int i=0; i<256; i++) {
if (histo[i] != 0)
printf( "Failure at %d!\n", i );
}
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaFree( dev_histo );
cudaFree( dev_buffer );
free( buffer );
return 0;
}
|
517dda175630cdeee1b3097c81963c51d8f0a7d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include "stdio.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
__forceinline__ __device__ float clipp(float in, float low, float high)
{
return (in < low) ? low : (in > high ? high : in);
}
__global__ void copyKernel(unsigned char* input, unsigned char* output, int index, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size)
{
return;
}
//output[i+index*size] = input[i + index * size];
output[i + index * size * 3] = input[i];
output[i + size + index * size * 3] = input[i + size];
output[i + 2 * size + index * size * 3] = input[i + 2 * size];
}
__global__ void resizKernel(unsigned char *inputGpu, float *outputGpu, float* normGpu, int dstW, int dstH, int srcW, int srcH)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = i % (dstW*dstH);
int l = i / (dstW*dstH);
const int x = k % dstW;
const int y = k / dstW;
if (x >= dstW || y >= dstH)
return;
float ratio_h = float(srcH) / float(dstH);
float ratio_w = float(srcW) / float(dstW);
float x0 = float(x) * ratio_w;
float y0 = float(y) * ratio_h;
int left = int(clipp((float)floor(x0), 0.0f, float(srcW)));
int top = int(clipp((float)floor(y0), 0.0f, float(srcH)));
int right = int(clipp((float)ceil(x0), 0.0f, float(srcW)));
int bottom = int(clipp((float)ceil(y0), 0.0f, float(srcH)));
for (int c = 0; c < 3; ++c)
{
unsigned char left_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + left * (3) + c];
unsigned char right_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + right * (3) + c];
unsigned char left_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + left * (3) + c];
unsigned char right_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + right * (3) + c];
float top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left);
float bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left);
float lerp = clipp((top_lerp + (bottom_lerp - top_lerp) * (y0 - top)), 0.0f, 255.0f);
outputGpu[i * 3 + c] = lerp;
//float pixelMean[3]{ 123.68, 116.779, 103.939 };
if (c == 0)
{
normGpu[l*dstW*dstH * 3 + k] = float(outputGpu[i * 3 + c]) - 123.68;
}
if (c == 1)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - 116.779;
}
if (c == 2)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - 103.939;
}
}
}
extern "C" void copyImg(void* input, void* output, int index, int k)
{
const int dim = k;
const int BS = 512;
const int GS = (dim + BS - 1) / BS;
copyKernel << <GS, BS, 0>> > ((unsigned char *)input, (unsigned char *)output, index, dim);
}
extern "C" void resizeAndNorm(void* inputGpu, void* resizedOutputGpu, void* normGpu, int size, int dstW, int dstH, int srcW, int srcH)
{
int dim = size;
const int BS = 1024;
const int GS = (dim + BS - 1) / BS;
resizKernel << <GS, BS, 0 >> > ((unsigned char *)inputGpu, (float *)resizedOutputGpu, (float*)normGpu, dstW, dstH, srcW, srcH);
} | 517dda175630cdeee1b3097c81963c51d8f0a7d6.cu | #include "device_launch_parameters.h"
#include "device_functions.h"
#include "stdio.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
__forceinline__ __device__ float clipp(float in, float low, float high)
{
return (in < low) ? low : (in > high ? high : in);
}
__global__ void copyKernel(unsigned char* input, unsigned char* output, int index, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size)
{
return;
}
//output[i+index*size] = input[i + index * size];
output[i + index * size * 3] = input[i];
output[i + size + index * size * 3] = input[i + size];
output[i + 2 * size + index * size * 3] = input[i + 2 * size];
}
__global__ void resizKernel(unsigned char *inputGpu, float *outputGpu, float* normGpu, int dstW, int dstH, int srcW, int srcH)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = i % (dstW*dstH);
int l = i / (dstW*dstH);
const int x = k % dstW;
const int y = k / dstW;
if (x >= dstW || y >= dstH)
return;
float ratio_h = float(srcH) / float(dstH);
float ratio_w = float(srcW) / float(dstW);
float x0 = float(x) * ratio_w;
float y0 = float(y) * ratio_h;
int left = int(clipp((float)floor(x0), 0.0f, float(srcW)));
int top = int(clipp((float)floor(y0), 0.0f, float(srcH)));
int right = int(clipp((float)ceil(x0), 0.0f, float(srcW)));
int bottom = int(clipp((float)ceil(y0), 0.0f, float(srcH)));
for (int c = 0; c < 3; ++c)
{
unsigned char left_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + left * (3) + c];
unsigned char right_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + right * (3) + c];
unsigned char left_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + left * (3) + c];
unsigned char right_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + right * (3) + c];
float top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left);
float bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left);
float lerp = clipp((top_lerp + (bottom_lerp - top_lerp) * (y0 - top)), 0.0f, 255.0f);
outputGpu[i * 3 + c] = lerp;
//float pixelMean[3]{ 123.68, 116.779, 103.939 };
if (c == 0)
{
normGpu[l*dstW*dstH * 3 + k] = float(outputGpu[i * 3 + c]) - 123.68;
}
if (c == 1)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - 116.779;
}
if (c == 2)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - 103.939;
}
}
}
extern "C" void copyImg(void* input, void* output, int index, int k)
{
const int dim = k;
const int BS = 512;
const int GS = (dim + BS - 1) / BS;
copyKernel << <GS, BS, 0>> > ((unsigned char *)input, (unsigned char *)output, index, dim);
}
extern "C" void resizeAndNorm(void* inputGpu, void* resizedOutputGpu, void* normGpu, int size, int dstW, int dstH, int srcW, int srcH)
{
int dim = size;
const int BS = 1024;
const int GS = (dim + BS - 1) / BS;
resizKernel << <GS, BS, 0 >> > ((unsigned char *)inputGpu, (float *)resizedOutputGpu, (float*)normGpu, dstW, dstH, srcW, srcH);
} |
782b9be9d6e54cfe5f73ee1d5cb9963e3bc85f3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// /usr/local/cuda/bin/nvcc task1.cu -o task1
// nvcc task1.cu -o task1
//./task1
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void solver(double *T_new, const double *T_old, int cols, int rows)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int idx = (row*cols + col);
if (row < rows-1 && col < cols-1 && row > 0 && col > 0)
{
T_new[idx] = 0.25*(T_old[idx+cols] + T_old[idx-cols] + T_old[idx-1] + T_old[idx+1]);
}
}
int main()
{
int M = 200; //rows
int N = 200; //columns
int arrSize = M * N * sizeof(double);
int maxiter = 100;
double err;
int iter;
double *T_new = (double *) malloc(arrSize);
double *T_old = (double *) malloc(arrSize);
int i, j;
double temp = 1.0;
for(i = 0; i < M; i++)
{
for (j = 0; j < N; j++)
{
T_old[i*N + j] = 0.0;
T_new[i*N + j] = 0.0;
}
}
for (j = 0; j < N; j++)
{
T_old[0*N + j] = temp;
T_new[0*N + j] = temp;
}
for (j = 0; j < N; j++)
{
T_old[(N-1)*N + j] = 0.0;
T_new[(N-1)*N + j] = 0.0;
}
for (i = 0; i < M; i++)
{
T_old[i*N + 0] = 0.0;
T_new[i*N + 0] = 0.0;
}
for (i = 0; i < M; i++)
{
T_old[i*N + N-1] = 0.0;
T_new[i*N + N-1] = 0.0;
}
double *T_new_d, *T_old_d;
hipMalloc(&T_new_d, arrSize);
hipMalloc(&T_old_d, arrSize);
printf("Allocate Device memory for matrices\n");
hipMemcpy(T_new_d, T_new, arrSize, hipMemcpyHostToDevice);
hipMemcpy(T_old_d, T_old, arrSize, hipMemcpyHostToDevice);
printf("Copy matrices from the host memory to the CUDA device\n");
const dim3 BLOCK_DIM(32, 32); // 1024 threads
const dim3 GRID_DIM( (N-1)/BLOCK_DIM.x+1, (M-1)/BLOCK_DIM.y+1);
printf("CUDA kernel launch with BLOCK_DIM[%d %d] GRID_DIM[%d %d]\n", BLOCK_DIM.x, BLOCK_DIM.y, GRID_DIM.x, GRID_DIM.y);
for (iter = 0; iter < maxiter; iter++)
{
hipLaunchKernelGGL(( solver), dim3(GRID_DIM), dim3(BLOCK_DIM), 0, 0, T_new_d, T_old_d, N, M);
hipLaunchKernelGGL(( solver), dim3(GRID_DIM), dim3(BLOCK_DIM), 0, 0, T_old_d, T_new_d, N, M);
if (iter%10 == 0)
{
hipMemcpy(T_new, T_new_d, arrSize, hipMemcpyDeviceToHost);
hipMemcpy(T_old, T_old_d, arrSize, hipMemcpyDeviceToHost);
err = 0.0;
for(i = 1; i < (M-1); i++)
{
for(j = 1; j < (N-1); j++)
{
if (fabs(T_old[i*N + j]-T_new[i*N + j]) > err) err = fabs(T_old[i*N + j]-T_new[i*N + j]);
}
}
printf("|%d| %f\n", iter, err);
}
}
hipDeviceSynchronize();
printf("Done solving\n");
hipMemcpy(T_new, T_new_d, arrSize, hipMemcpyDeviceToHost);
hipMemcpy(T_old, T_old_d, arrSize, hipMemcpyDeviceToHost);
printf("Copy matrices from the CUDA device to the host memory\n");
hipFree(T_new_d);
hipFree(T_old_d);
free(T_new);
free(T_old);
printf("Free device and device memory\n");
}
| 782b9be9d6e54cfe5f73ee1d5cb9963e3bc85f3e.cu | // /usr/local/cuda/bin/nvcc task1.cu -o task1
// nvcc task1.cu -o task1
//./task1
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void solver(double *T_new, const double *T_old, int cols, int rows)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int idx = (row*cols + col);
if (row < rows-1 && col < cols-1 && row > 0 && col > 0)
{
T_new[idx] = 0.25*(T_old[idx+cols] + T_old[idx-cols] + T_old[idx-1] + T_old[idx+1]);
}
}
int main()
{
int M = 200; //rows
int N = 200; //columns
int arrSize = M * N * sizeof(double);
int maxiter = 100;
double err;
int iter;
double *T_new = (double *) malloc(arrSize);
double *T_old = (double *) malloc(arrSize);
int i, j;
double temp = 1.0;
for(i = 0; i < M; i++)
{
for (j = 0; j < N; j++)
{
T_old[i*N + j] = 0.0;
T_new[i*N + j] = 0.0;
}
}
for (j = 0; j < N; j++)
{
T_old[0*N + j] = temp;
T_new[0*N + j] = temp;
}
for (j = 0; j < N; j++)
{
T_old[(N-1)*N + j] = 0.0;
T_new[(N-1)*N + j] = 0.0;
}
for (i = 0; i < M; i++)
{
T_old[i*N + 0] = 0.0;
T_new[i*N + 0] = 0.0;
}
for (i = 0; i < M; i++)
{
T_old[i*N + N-1] = 0.0;
T_new[i*N + N-1] = 0.0;
}
double *T_new_d, *T_old_d;
cudaMalloc(&T_new_d, arrSize);
cudaMalloc(&T_old_d, arrSize);
printf("Allocate Device memory for matrices\n");
cudaMemcpy(T_new_d, T_new, arrSize, cudaMemcpyHostToDevice);
cudaMemcpy(T_old_d, T_old, arrSize, cudaMemcpyHostToDevice);
printf("Copy matrices from the host memory to the CUDA device\n");
const dim3 BLOCK_DIM(32, 32); // 1024 threads
const dim3 GRID_DIM( (N-1)/BLOCK_DIM.x+1, (M-1)/BLOCK_DIM.y+1);
printf("CUDA kernel launch with BLOCK_DIM[%d %d] GRID_DIM[%d %d]\n", BLOCK_DIM.x, BLOCK_DIM.y, GRID_DIM.x, GRID_DIM.y);
for (iter = 0; iter < maxiter; iter++)
{
solver<<<GRID_DIM, BLOCK_DIM>>>(T_new_d, T_old_d, N, M);
solver<<<GRID_DIM, BLOCK_DIM>>>(T_old_d, T_new_d, N, M);
if (iter%10 == 0)
{
cudaMemcpy(T_new, T_new_d, arrSize, cudaMemcpyDeviceToHost);
cudaMemcpy(T_old, T_old_d, arrSize, cudaMemcpyDeviceToHost);
err = 0.0;
for(i = 1; i < (M-1); i++)
{
for(j = 1; j < (N-1); j++)
{
if (fabs(T_old[i*N + j]-T_new[i*N + j]) > err) err = fabs(T_old[i*N + j]-T_new[i*N + j]);
}
}
printf("|%d| %f\n", iter, err);
}
}
cudaDeviceSynchronize();
printf("Done solving\n");
cudaMemcpy(T_new, T_new_d, arrSize, cudaMemcpyDeviceToHost);
cudaMemcpy(T_old, T_old_d, arrSize, cudaMemcpyDeviceToHost);
printf("Copy matrices from the CUDA device to the host memory\n");
cudaFree(T_new_d);
cudaFree(T_old_d);
free(T_new);
free(T_old);
printf("Free device and device memory\n");
}
|
efa1104d43c22162de93de809432880547ea9f97.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <time.h>
#include <hiprand/hiprand_kernel.h>
#define TRIALS_PER_THREAD 1024
#define BLOCKS 256
#define THREADS 256
__global__ void gpuPiCalculate(float *estimate, hiprandState_t *states) {
unsigned long id = threadIdx.x + blockDim.x * blockIdx.x;
int pointsInCircle = 0;
float x, y;
hiprand_init(id, id, 0, &states[id]); //initialize hiprand
for (int i = 0; i < TRIALS_PER_THREAD; i++) {
x = hiprand_uniform(&states[id]);
y = hiprand_uniform(&states[id]);
pointsInCircle += (x*x + y * y <= 1.0f);
}
estimate[id] = 4.0f * pointsInCircle / (float)TRIALS_PER_THREAD;
}
float cpuPiCalculate(long trials) {
float x, y;
long pointsInCircle = 0;
for (long i = 0; i < trials; i++) {
x = rand() / (float)RAND_MAX;
y = rand() / (float)RAND_MAX;
pointsInCircle += (x * x + y * y <= 1.0f);
}
return 4.0f * pointsInCircle / trials;
}
int main(int argc, char *argv[]) {
clock_t start, stop;
float host[BLOCKS * THREADS];
float *dev;
hiprandState_t *devStates;
start = clock();
hipMalloc((void **)&dev, BLOCKS * THREADS * sizeof(float));
hipMalloc((void **)&devStates, THREADS * BLOCKS * sizeof(hiprandState_t));
gpuPiCalculate << <BLOCKS, THREADS >> > (dev, devStates);
hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), hipMemcpyDeviceToHost);
float gpuPI = 0;
for (int i = 0; i < BLOCKS * THREADS; i++) {
gpuPI += host[i];
}
gpuPI /= (BLOCKS * THREADS);
stop = clock();
printf("GPU PI= %f\n", gpuPI);
printf("GPU PI calculate time %f s.\n", (stop - start) / (float)CLOCKS_PER_SEC);
start = clock();
float cpuPI = cpuPiCalculate(BLOCKS * THREADS * TRIALS_PER_THREAD);
stop = clock();
printf("CPU PI= %f\n", cpuPI);
printf("CPU PI calculate time %f s.\n", (stop - start) / (float)CLOCKS_PER_SEC);
return 0;
} | efa1104d43c22162de93de809432880547ea9f97.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
#define TRIALS_PER_THREAD 1024
#define BLOCKS 256
#define THREADS 256
__global__ void gpuPiCalculate(float *estimate, curandState *states) {
unsigned long id = threadIdx.x + blockDim.x * blockIdx.x;
int pointsInCircle = 0;
float x, y;
curand_init(id, id, 0, &states[id]); //initialize curand
for (int i = 0; i < TRIALS_PER_THREAD; i++) {
x = curand_uniform(&states[id]);
y = curand_uniform(&states[id]);
pointsInCircle += (x*x + y * y <= 1.0f);
}
estimate[id] = 4.0f * pointsInCircle / (float)TRIALS_PER_THREAD;
}
float cpuPiCalculate(long trials) {
float x, y;
long pointsInCircle = 0;
for (long i = 0; i < trials; i++) {
x = rand() / (float)RAND_MAX;
y = rand() / (float)RAND_MAX;
pointsInCircle += (x * x + y * y <= 1.0f);
}
return 4.0f * pointsInCircle / trials;
}
int main(int argc, char *argv[]) {
clock_t start, stop;
float host[BLOCKS * THREADS];
float *dev;
curandState *devStates;
start = clock();
cudaMalloc((void **)&dev, BLOCKS * THREADS * sizeof(float));
cudaMalloc((void **)&devStates, THREADS * BLOCKS * sizeof(curandState));
gpuPiCalculate << <BLOCKS, THREADS >> > (dev, devStates);
cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost);
float gpuPI = 0;
for (int i = 0; i < BLOCKS * THREADS; i++) {
gpuPI += host[i];
}
gpuPI /= (BLOCKS * THREADS);
stop = clock();
printf("GPU PI= %f\n", gpuPI);
printf("GPU PI calculate time %f s.\n", (stop - start) / (float)CLOCKS_PER_SEC);
start = clock();
float cpuPI = cpuPiCalculate(BLOCKS * THREADS * TRIALS_PER_THREAD);
stop = clock();
printf("CPU PI= %f\n", cpuPI);
printf("CPU PI calculate time %f s.\n", (stop - start) / (float)CLOCKS_PER_SEC);
return 0;
} |
ee52b32da40fa870988a19de4a34b4772c79e0ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
Jacob Sword
Parallelized multiplication of matrix and matrix of random values given fixed matrix dimensions
Comparisons in speed between regular block by block mult, using padding to combat unalignment,
and using mtrix transpose to combat strided memory access.
Matrix dimensions are (256 rows by 240 cols) x (240 rows by 512 cols)
**/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cassert>
#include <cstdlib>
#include "wtime.h"
#include "./error_handler.h"
using std::cout;
using std::endl;
const int A_ROWS = 256;
const int A_COLS = 240;
const int B_ROWS = 240;
const int B_COLS = 512;
const int C_ROWS = A_ROWS;
const int C_COLS = B_COLS;
//Sequential mat_mult for testing
void mat_mult(int *mat_a, int *mat_b, int *result, int a_rows, int a_cols, int b_cols, int padding = 0)
{
for (int i = 0; i < a_rows; i++) {
for (int j = 0; j < b_cols; j++) {
int temp_res = 0;
for (int k = 0; k < a_cols; k++) {
temp_res += mat_a[i * (a_cols + padding) + k] * mat_b[k * b_cols + j];
}
result[i * b_cols + j] = temp_res;
}
}
}
/*Parallel implementation of matrix a x matrix b
* 1 block per row
* matrix A is 256 x 240, matrix b is 240 * 512
* resultant matrix is 256 rows x 512 cols
* Supports optional padding of matrix a
*/
__global__ void mat_mult_kernel(int *mat_a, int *mat_b, int *res,
int a_rows, int a_cols, int b_cols, int padding = 0) {
// El for each thread, shared per block
__shared__ int smem[128];
for (int row_block = 0; row_block * gridDim.x < a_rows; row_block++) {
int a_row = blockIdx.x + (row_block * gridDim.x);
for (int b_col = 0; b_col < b_cols; b_col++) {
int total = 0;
for (int thread_i = 0; thread_i * blockDim.x < a_cols; thread_i++) {
int thread_col = threadIdx.x + (thread_i * blockDim.x);
// Need to check because 240 not even multiple of 128
if (thread_col >= a_cols)
smem[threadIdx.x] = 0;
else
smem[threadIdx.x] = mat_a[a_row * (a_cols + padding) + thread_col] * mat_b[thread_col * b_cols + b_col];
__syncthreads();
//Parallel reduction
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
if (threadIdx.x == 0) {
total += smem[threadIdx.x];
}
}
if (threadIdx.x == 0) {
res[a_row * b_cols + b_col] = total;
}
}
}
}
__global__ void mat_mult_transposed_kernel(int *mat_a, int *mat_b, int *res) {
int B_TRANS_ROWS = B_COLS;
int B_TRANS_COLS = B_ROWS;
// El for each thread, shared per block
__shared__ int smem[128];
for (int row_block = 0; row_block * gridDim.x < A_ROWS; row_block++) {
int a_row = blockIdx.x + (row_block * gridDim.x);
for (int b_row = 0; b_row < B_TRANS_ROWS; b_row++) {
int total = 0;
for (int thread_i = 0; thread_i * blockDim.x < A_COLS; thread_i++) {
int thread_col = threadIdx.x + (thread_i * blockDim.x);
// Need to check because 240 not even multiple of 128
if (thread_col >= A_COLS)
smem[threadIdx.x] = 0;
else
smem[threadIdx.x] = mat_a[a_row * A_COLS + thread_col] * mat_b[b_row * B_TRANS_COLS + thread_col];
__syncthreads();
//Parallel reduction
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
if (threadIdx.x == 0) {
total += smem[threadIdx.x];
}
}
if (threadIdx.x == 0) {
res[a_row * C_COLS + b_row] = total;
}
}
}
}
void basic_multiplication() {
int *a = (int *) malloc(sizeof(int) * A_ROWS * A_COLS);
int *b = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *c = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
srand(time(NULL));
// Initialize matrix a
for (int i = 0; i < A_ROWS; i++) {
for (int j = 0; j < A_COLS; j++) {
int el = rand() % 10;
a[i * A_COLS + j] = el;
}
}
// Initialize matrix b
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
int el = rand() % 5;
b[i * B_COLS + j] = el;
}
}
int *a_d, *b_d, *c_d;
HANDLE_ERR(hipMalloc((void **) &a_d, sizeof (int) * A_ROWS * A_COLS));
HANDLE_ERR(hipMalloc((void **) &b_d, sizeof (int) * B_ROWS * B_COLS));
HANDLE_ERR(hipMalloc((void **) &c_d, sizeof (int) * C_ROWS * C_COLS));
HANDLE_ERR(hipMemcpy (a_d, a, sizeof (int) * A_ROWS * A_COLS, hipMemcpyHostToDevice));
HANDLE_ERR(hipMemcpy (b_d, b, sizeof (int) * B_ROWS * B_COLS, hipMemcpyHostToDevice));
double starttime = wtime();
hipLaunchKernelGGL(( mat_mult_kernel) , dim3(128), dim3(128) , 0, 0, a_d, b_d, c_d, A_ROWS, A_COLS, B_COLS);
hipDeviceSynchronize();
double algotime = wtime() - starttime;
cout << "Base multiplication: " << algotime << endl;
HANDLE_ERR(hipMemcpy (c, c_d, sizeof (int) * C_ROWS * C_COLS, hipMemcpyDeviceToHost));
//Make sure parallel work is equal to sequential work (for testing)
int *test_res = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
mat_mult(a, b, test_res, A_ROWS, A_COLS, B_COLS);
for (int i = 0; i < C_ROWS; i++) {
for (int j = 0; j < C_COLS; j++){
int idx = i * C_COLS + j;
if (c[idx] != test_res[idx]) {
cout << "Not Equal at idx: " << i << ", " << j
<< " Parallel work " << c[idx] << ", Sequential Work: " << test_res[idx] << endl;
}
assert(c[idx] == test_res[idx]);
}
}
}
void padded_multiplication() {
int padding = 16;
int A_COLS_PADDED = A_COLS + padding;
int *a = (int *) malloc(sizeof(int) * A_ROWS * A_COLS_PADDED);
int *b = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *c = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
srand(time(NULL));
// Initialize matrix
for (int i = 0; i < A_ROWS; i++) {
for (int j = 0; j < A_COLS; j++) {
int el = rand() % 10;
a[i * A_COLS_PADDED + j] = el;
}
}
// Initialize vector
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
int el = rand() % 5;
b[i * B_COLS + j] = el;
}
}
int *a_d, *b_d, *c_d;
HANDLE_ERR(hipMalloc((void **) &a_d, sizeof (int) * A_ROWS * A_COLS_PADDED));
HANDLE_ERR(hipMalloc((void **) &b_d, sizeof (int) * B_ROWS * B_COLS));
HANDLE_ERR(hipMalloc((void **) &c_d, sizeof (int) * C_ROWS * C_COLS));
HANDLE_ERR(hipMemcpy (a_d, a, sizeof (int) * A_ROWS * A_COLS_PADDED, hipMemcpyHostToDevice));
HANDLE_ERR(hipMemcpy (b_d, b, sizeof (int) * B_ROWS * B_COLS, hipMemcpyHostToDevice));
double starttime = wtime();
hipLaunchKernelGGL(( mat_mult_kernel) , dim3(128), dim3(128) , 0, 0, a_d, b_d, c_d, A_ROWS, A_COLS, B_COLS, padding);
hipDeviceSynchronize();
double algotime = wtime() - starttime;
cout << "Padded multiplication time: " << algotime << endl;
HANDLE_ERR(hipMemcpy (c, c_d, sizeof (int) * C_ROWS * C_COLS, hipMemcpyDeviceToHost));
//Make sure parallel work is equal to sequential work (for testing)
int *test_res = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
mat_mult(a, b, test_res, A_ROWS, A_COLS, B_COLS, padding);
for (int i = 0; i < C_ROWS; i++) {
for (int j = 0; j < C_COLS; j++){
int idx = i * C_COLS + j;
if (c[idx] != test_res[idx]) {
cout << "Not Equal at idx: " << i << ", " << j
<< " Parallel work " << c[idx] << ", Sequential Work: " << test_res[idx] << endl;
}
assert(c[idx] == test_res[idx]);
}
}
}
void transpose_multiplication() {
int *a = (int *) malloc(sizeof(int) * A_ROWS * A_COLS);
int *b = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *trans = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *c = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
srand(time(NULL));
// Initialize matrix a
for (int i = 0; i < A_ROWS; i++) {
for (int j = 0; j < A_COLS; j++) {
int el = rand() % 10;
a[i * A_COLS + j] = el;
}
}
// Initialize matrix b
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
int el = rand() % 5;
b[i * B_COLS + j] = el;
}
}
// Transpose matrix b
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
trans[j * B_ROWS + i] = b[i * B_COLS + j];
}
}
int *a_d, *b_d, *c_d;
HANDLE_ERR(hipMalloc((void **) &a_d, sizeof (int) * A_ROWS * A_COLS));
HANDLE_ERR(hipMalloc((void **) &b_d, sizeof (int) * B_ROWS * B_COLS));
HANDLE_ERR(hipMalloc((void **) &c_d, sizeof (int) * C_ROWS * C_COLS));
HANDLE_ERR(hipMemcpy (a_d, a, sizeof (int) * A_ROWS * A_COLS, hipMemcpyHostToDevice));
HANDLE_ERR(hipMemcpy (b_d, trans, sizeof (int) * B_ROWS * B_COLS, hipMemcpyHostToDevice));
double starttime = wtime();
hipLaunchKernelGGL(( mat_mult_transposed_kernel) , dim3(128), dim3(128) , 0, 0, a_d, b_d, c_d);
hipDeviceSynchronize();
double algotime = wtime() - starttime;
cout << "Transposed multiplication time: " << algotime << endl;
HANDLE_ERR(hipMemcpy (c, c_d, sizeof (int) * C_ROWS * C_COLS, hipMemcpyDeviceToHost));
//Make sure parallel work is equal to sequential work (for testing)
int *test_res = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
mat_mult(a, b, test_res, A_ROWS, A_COLS, B_COLS);
for (int i = 0; i < C_ROWS; i++) {
for (int j = 0; j < C_COLS; j++){
int idx = i * C_COLS + j;
if (c[idx] != test_res[idx]) {
cout << "Not Equal at idx: " << i << ", " << j
<< " Parallel work " << c[idx] << ", Sequential Work: " << test_res[idx] << endl;
}
assert(c[idx] == test_res[idx]);
}
}
}
int main (int args, char **argv) {
basic_multiplication();
padded_multiplication();
transpose_multiplication();
}
| ee52b32da40fa870988a19de4a34b4772c79e0ed.cu | /**
Jacob Sword
Parallelized multiplication of matrix and matrix of random values given fixed matrix dimensions
Comparisons in speed between regular block by block mult, using padding to combat unalignment,
and using mtrix transpose to combat strided memory access.
Matrix dimensions are (256 rows by 240 cols) x (240 rows by 512 cols)
**/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cassert>
#include <cstdlib>
#include "wtime.h"
#include "./error_handler.h"
using std::cout;
using std::endl;
const int A_ROWS = 256;
const int A_COLS = 240;
const int B_ROWS = 240;
const int B_COLS = 512;
const int C_ROWS = A_ROWS;
const int C_COLS = B_COLS;
//Sequential mat_mult for testing
void mat_mult(int *mat_a, int *mat_b, int *result, int a_rows, int a_cols, int b_cols, int padding = 0)
{
for (int i = 0; i < a_rows; i++) {
for (int j = 0; j < b_cols; j++) {
int temp_res = 0;
for (int k = 0; k < a_cols; k++) {
temp_res += mat_a[i * (a_cols + padding) + k] * mat_b[k * b_cols + j];
}
result[i * b_cols + j] = temp_res;
}
}
}
/*Parallel implementation of matrix a x matrix b
* 1 block per row
* matrix A is 256 x 240, matrix b is 240 * 512
* resultant matrix is 256 rows x 512 cols
* Supports optional padding of matrix a
*/
__global__ void mat_mult_kernel(int *mat_a, int *mat_b, int *res,
int a_rows, int a_cols, int b_cols, int padding = 0) {
// El for each thread, shared per block
__shared__ int smem[128];
for (int row_block = 0; row_block * gridDim.x < a_rows; row_block++) {
int a_row = blockIdx.x + (row_block * gridDim.x);
for (int b_col = 0; b_col < b_cols; b_col++) {
int total = 0;
for (int thread_i = 0; thread_i * blockDim.x < a_cols; thread_i++) {
int thread_col = threadIdx.x + (thread_i * blockDim.x);
// Need to check because 240 not even multiple of 128
if (thread_col >= a_cols)
smem[threadIdx.x] = 0;
else
smem[threadIdx.x] = mat_a[a_row * (a_cols + padding) + thread_col] * mat_b[thread_col * b_cols + b_col];
__syncthreads();
//Parallel reduction
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
if (threadIdx.x == 0) {
total += smem[threadIdx.x];
}
}
if (threadIdx.x == 0) {
res[a_row * b_cols + b_col] = total;
}
}
}
}
__global__ void mat_mult_transposed_kernel(int *mat_a, int *mat_b, int *res) {
int B_TRANS_ROWS = B_COLS;
int B_TRANS_COLS = B_ROWS;
// El for each thread, shared per block
__shared__ int smem[128];
for (int row_block = 0; row_block * gridDim.x < A_ROWS; row_block++) {
int a_row = blockIdx.x + (row_block * gridDim.x);
for (int b_row = 0; b_row < B_TRANS_ROWS; b_row++) {
int total = 0;
for (int thread_i = 0; thread_i * blockDim.x < A_COLS; thread_i++) {
int thread_col = threadIdx.x + (thread_i * blockDim.x);
// Need to check because 240 not even multiple of 128
if (thread_col >= A_COLS)
smem[threadIdx.x] = 0;
else
smem[threadIdx.x] = mat_a[a_row * A_COLS + thread_col] * mat_b[b_row * B_TRANS_COLS + thread_col];
__syncthreads();
//Parallel reduction
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
if (threadIdx.x == 0) {
total += smem[threadIdx.x];
}
}
if (threadIdx.x == 0) {
res[a_row * C_COLS + b_row] = total;
}
}
}
}
void basic_multiplication() {
int *a = (int *) malloc(sizeof(int) * A_ROWS * A_COLS);
int *b = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *c = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
srand(time(NULL));
// Initialize matrix a
for (int i = 0; i < A_ROWS; i++) {
for (int j = 0; j < A_COLS; j++) {
int el = rand() % 10;
a[i * A_COLS + j] = el;
}
}
// Initialize matrix b
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
int el = rand() % 5;
b[i * B_COLS + j] = el;
}
}
int *a_d, *b_d, *c_d;
HANDLE_ERR(cudaMalloc((void **) &a_d, sizeof (int) * A_ROWS * A_COLS));
HANDLE_ERR(cudaMalloc((void **) &b_d, sizeof (int) * B_ROWS * B_COLS));
HANDLE_ERR(cudaMalloc((void **) &c_d, sizeof (int) * C_ROWS * C_COLS));
HANDLE_ERR(cudaMemcpy (a_d, a, sizeof (int) * A_ROWS * A_COLS, cudaMemcpyHostToDevice));
HANDLE_ERR(cudaMemcpy (b_d, b, sizeof (int) * B_ROWS * B_COLS, cudaMemcpyHostToDevice));
double starttime = wtime();
mat_mult_kernel <<< 128, 128 >>> (a_d, b_d, c_d, A_ROWS, A_COLS, B_COLS);
cudaDeviceSynchronize();
double algotime = wtime() - starttime;
cout << "Base multiplication: " << algotime << endl;
HANDLE_ERR(cudaMemcpy (c, c_d, sizeof (int) * C_ROWS * C_COLS, cudaMemcpyDeviceToHost));
//Make sure parallel work is equal to sequential work (for testing)
int *test_res = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
mat_mult(a, b, test_res, A_ROWS, A_COLS, B_COLS);
for (int i = 0; i < C_ROWS; i++) {
for (int j = 0; j < C_COLS; j++){
int idx = i * C_COLS + j;
if (c[idx] != test_res[idx]) {
cout << "Not Equal at idx: " << i << ", " << j
<< " Parallel work " << c[idx] << ", Sequential Work: " << test_res[idx] << endl;
}
assert(c[idx] == test_res[idx]);
}
}
}
void padded_multiplication() {
int padding = 16;
int A_COLS_PADDED = A_COLS + padding;
int *a = (int *) malloc(sizeof(int) * A_ROWS * A_COLS_PADDED);
int *b = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *c = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
srand(time(NULL));
// Initialize matrix
for (int i = 0; i < A_ROWS; i++) {
for (int j = 0; j < A_COLS; j++) {
int el = rand() % 10;
a[i * A_COLS_PADDED + j] = el;
}
}
// Initialize vector
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
int el = rand() % 5;
b[i * B_COLS + j] = el;
}
}
int *a_d, *b_d, *c_d;
HANDLE_ERR(cudaMalloc((void **) &a_d, sizeof (int) * A_ROWS * A_COLS_PADDED));
HANDLE_ERR(cudaMalloc((void **) &b_d, sizeof (int) * B_ROWS * B_COLS));
HANDLE_ERR(cudaMalloc((void **) &c_d, sizeof (int) * C_ROWS * C_COLS));
HANDLE_ERR(cudaMemcpy (a_d, a, sizeof (int) * A_ROWS * A_COLS_PADDED, cudaMemcpyHostToDevice));
HANDLE_ERR(cudaMemcpy (b_d, b, sizeof (int) * B_ROWS * B_COLS, cudaMemcpyHostToDevice));
double starttime = wtime();
mat_mult_kernel <<< 128, 128 >>> (a_d, b_d, c_d, A_ROWS, A_COLS, B_COLS, padding);
cudaDeviceSynchronize();
double algotime = wtime() - starttime;
cout << "Padded multiplication time: " << algotime << endl;
HANDLE_ERR(cudaMemcpy (c, c_d, sizeof (int) * C_ROWS * C_COLS, cudaMemcpyDeviceToHost));
//Make sure parallel work is equal to sequential work (for testing)
int *test_res = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
mat_mult(a, b, test_res, A_ROWS, A_COLS, B_COLS, padding);
for (int i = 0; i < C_ROWS; i++) {
for (int j = 0; j < C_COLS; j++){
int idx = i * C_COLS + j;
if (c[idx] != test_res[idx]) {
cout << "Not Equal at idx: " << i << ", " << j
<< " Parallel work " << c[idx] << ", Sequential Work: " << test_res[idx] << endl;
}
assert(c[idx] == test_res[idx]);
}
}
}
void transpose_multiplication() {
int *a = (int *) malloc(sizeof(int) * A_ROWS * A_COLS);
int *b = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *trans = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *c = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
srand(time(NULL));
// Initialize matrix a
for (int i = 0; i < A_ROWS; i++) {
for (int j = 0; j < A_COLS; j++) {
int el = rand() % 10;
a[i * A_COLS + j] = el;
}
}
// Initialize matrix b
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
int el = rand() % 5;
b[i * B_COLS + j] = el;
}
}
// Transpose matrix b
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
trans[j * B_ROWS + i] = b[i * B_COLS + j];
}
}
int *a_d, *b_d, *c_d;
HANDLE_ERR(cudaMalloc((void **) &a_d, sizeof (int) * A_ROWS * A_COLS));
HANDLE_ERR(cudaMalloc((void **) &b_d, sizeof (int) * B_ROWS * B_COLS));
HANDLE_ERR(cudaMalloc((void **) &c_d, sizeof (int) * C_ROWS * C_COLS));
HANDLE_ERR(cudaMemcpy (a_d, a, sizeof (int) * A_ROWS * A_COLS, cudaMemcpyHostToDevice));
HANDLE_ERR(cudaMemcpy (b_d, trans, sizeof (int) * B_ROWS * B_COLS, cudaMemcpyHostToDevice));
double starttime = wtime();
mat_mult_transposed_kernel <<< 128, 128 >>> (a_d, b_d, c_d);
cudaDeviceSynchronize();
double algotime = wtime() - starttime;
cout << "Transposed multiplication time: " << algotime << endl;
HANDLE_ERR(cudaMemcpy (c, c_d, sizeof (int) * C_ROWS * C_COLS, cudaMemcpyDeviceToHost));
//Make sure parallel work is equal to sequential work (for testing)
int *test_res = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
mat_mult(a, b, test_res, A_ROWS, A_COLS, B_COLS);
for (int i = 0; i < C_ROWS; i++) {
for (int j = 0; j < C_COLS; j++){
int idx = i * C_COLS + j;
if (c[idx] != test_res[idx]) {
cout << "Not Equal at idx: " << i << ", " << j
<< " Parallel work " << c[idx] << ", Sequential Work: " << test_res[idx] << endl;
}
assert(c[idx] == test_res[idx]);
}
}
}
int main (int args, char **argv) {
basic_multiplication();
padded_multiplication();
transpose_multiplication();
}
|
dc93fc9c327cf6364586d2351fcd0115c6129cf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <limits>
#include <tuple>
#include <iostream>
#include <cmath>
#include "libalg/basic_operations.hpp"
#include "libalg/alg.hpp"
#include "libalg/CPUMatrix.hpp"
#include "libalg/CPUView.hpp"
#include "libalg/broadcasting.hpp"
#include "error.hpp"
#include "cpu/tuple.hpp"
#include "gpu/icp.cuh"
#include "libgpualg/mult.cuh"
#include "libgpualg/euclidist.cuh"
#include "error.cuh"
#include "libgpualg/mean.cuh"
#include "libgpualg/ope.cuh"
#include "libgpualg/svd.cuh"
#include "libgpuicp/corresp_optimized.cuh"
#include "libgpuicp/dist.cuh"
#include "libgpuicp/batchcovs.cuh"
#define Tile_size 2
/* --------- CPU Version Calling GPU Kernel ------------ */
__host__ std::vector<std::tuple<size_t, int>> get_correspondence_indices(double *P, double *Q,
size_t P_r, size_t P_c, size_t Q_r, size_t Q_c)
{
std::vector<std::tuple<size_t, int>> correspondances = {};
for (size_t i = 0; i < P_r; i++)
{
double *p_point = P + i * P_c;
double min_dist = std::numeric_limits<double>::max();
int chosen_idx = -1;
for (size_t j = 0; j < Q_r; j++)
{
double *q_point = Q + j * Q_c;
double dist = std::sqrt(element_wise_reduce(p_point, q_point, 1, P_c, 1, Q_c,
squared_norm_2, add, add)); //norm 2 between 2 vectors
if (dist < min_dist)
{
min_dist = dist;
chosen_idx = j;
}
}
correspondances.push_back(std::make_tuple(i, chosen_idx));
}
return correspondances;
}
// Intermediation function to be replaced with element_wise_op
__host__ void increment_cov(double *P, double *Q)
{
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
P[i*3 + j] = P[i*3 + j] + Q[i*3 + j];
}
}
}
__host__ double* calling_transpose_kernel(double *A, size_t row, size_t column)
{
// Calling transpose kernel
size_t size = sizeof(double) * row * column;
// Allocations
double *d_source_transpose, *d_dest_transpose;
hipMalloc((void **)&d_source_transpose, size);
hipMalloc((void **)&d_dest_transpose, size);
double *transposed_Q = (double *)calloc(size, sizeof(double));
// Copy mem and exec
hipMemcpy(d_source_transpose, A, size, hipMemcpyHostToDevice);
gpuTranspose(d_source_transpose, d_dest_transpose, row, column);
hipDeviceSynchronize();
hipMemcpy(transposed_Q, d_dest_transpose, size, hipMemcpyDeviceToHost);
// Free cuda mem
hipFree(d_source_transpose);
hipFree(d_dest_transpose);
// End of transpose call
return transposed_Q;
}
__host__ double *calling_dot_kernel(double *A, double *B, size_t A_row, size_t A_col, size_t B_row, size_t B_col)
{
size_t sizeA = A_row * A_col * sizeof(double);
size_t sizeB = B_row * B_col * sizeof(double);
size_t sizeC = A_row * B_col * sizeof(double);
double *h_C = (double *)calloc(sizeC, sizeof(double));
double *d_A;
double *d_B;
double *d_C;
hipMalloc(&d_A, sizeA);
hipMalloc(&d_B, sizeB);
hipMalloc(&d_C, sizeC);
hipMemcpy(d_A, A, sizeA, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, sizeB, hipMemcpyHostToDevice);
matrixMultiplication(d_A, d_B, d_C, A_row, A_col, B_row, B_col, A_row, B_col);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, sizeC, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return h_C;
}
__host__ double *compute_cross_variance_cpu_call_gpu(double *P, double *Q, std::vector<std::tuple<size_t, int>> correspondences, size_t P_r, size_t P_c,
size_t Q_r, size_t Q_c) //set default function to lambda function??
{
UNUSED(Q_r);
double *cov = (double *)calloc(9, sizeof(double));
for (auto tup : correspondences)
{
auto i = std::get<0>(tup);
auto j = std::get<1>(tup);
double *q_point = Q + j * Q_c;
double *p_point = P + i * P_c;
double *doted_points = nullptr;
double *transposed_Q = calling_transpose_kernel(q_point, 1, Q_c);
dot_product(&doted_points, transposed_Q, p_point, Q_c, 1, 1, P_c); //dim of Q_r * P_r
free (transposed_Q);
increment_cov(cov, doted_points); //need to set element_wise_op but too complicated, doesn't work for some reason.
free(doted_points);
}
return cov;
}
/* -------------- Version GPU Kernel -----------*/
// Implementation with double arrays and no vector for full GPU usage
__global__ void get_correspondence_indices_array_gpu(tuple **correspondances, double *P, double *Q, size_t P_r, size_t P_c, size_t Q_r, size_t Q_c)
{
int push_index = 0;
for (size_t i = 0; i < P_r; i++)
{
double *p_point = P + i * P_c;
double min_dist = std::numeric_limits<double>::max();
int chosen_idx = -1;
for (size_t j = 0; j < Q_r; j++)
{
double *q_point = Q + j * Q_c;
double dist = std::sqrt(*p_point + *q_point);
if (dist < min_dist)
{
min_dist = dist;
chosen_idx = j;
}
}
tuple *new_tup = nullptr;
hipMalloc(&new_tup, sizeof(tuple));
new_tup->index = i;
new_tup->value = chosen_idx;
correspondances[push_index] = new_tup;
push_index++;
}
}
// Array implementation for GPU
void compute_cross_variance_array(double * cov, double *P, double *Q, std::tuple<size_t, int> *correspondences, size_t P_r, size_t P_c,
size_t Q_r, size_t Q_c) //set default function to lambda function??
{
UNUSED(Q_r);
UNUSED(P_r);
for (size_t index = 0; index < P_r; index ++)
{
auto i = std::get<0>(correspondences[index]);
auto j = std::get<1>(correspondences[index]);
double *q_point = Q + j * Q_c;
double *p_point = P + i * P_c;
double *transposed_Q = transpose(q_point, 1, Q_c);
double *doted_points = nullptr;
dot_product(&doted_points, transposed_Q, p_point, Q_c, 1, 1, P_c); //dim of Q_r * P_r
free (transposed_Q);
increment_cov(cov, doted_points); //need to set element_wise_op but too complicated, doesn't work for some reason.
free(doted_points);
}
}
dim3 get_gridsize(size_t a_0, size_t a_1, size_t b_0, size_t b_1, dim3 blocksize)
{
size_t r_0, r_1;
get_broadcastable_size(a_0, a_1, b_0, b_1, &r_0, &r_1);
int nbblocksx = ::ceil((float)r_1 / blocksize.x);
int nbblocksy = ::ceil((float)r_0 / blocksize.y);
return dim3(nbblocksx, nbblocksy);
}
// TODO: REMOVE ME since useless
__global__ void print_matrix_kern(char* d_A, int pitch, int nbvals)
{
int j;
int idx = threadIdx.x;
double* line = (double*)(d_A + idx * pitch);
printf("Line %d:\n", idx);
__syncthreads();
for (j = 0; j < nbvals; ++j) {
//printf("%6.2f\t", (double)(d_A[idx * pitch + j * sizeof(double)]));
printf("%6.2f\t", line[j]);
__syncthreads();
}
printf("\n");
__syncthreads();
}
void print_Mat_gpu(double *dmat, int m, int n, const char* name)
{
double* Mat = (double*)malloc(m * n * sizeof(double));
hipMemcpy(Mat, dmat, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for (int row = 0; row < m; row++) {
for (int col = 0; col < n; col++) {
double Areg = Mat[col + row * n];
printf("%s(%d,%d) = %f ", name, row, col, Areg);
}
printf("\n");
}
free(Mat);
}
void print_Mat_gpu(unsigned int* dmat, int m, int n, const char* name)
{
unsigned int* Mat = (unsigned int*)malloc(m * n * sizeof(unsigned int));
hipMemcpy(Mat, dmat, m * n * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for (int row = 0; row < m; row++) {
for (int col = 0; col < n; col++) {
unsigned int Areg = Mat[col + row * n];
printf("%s(%d,%d) = %u ", name, row, col, Areg);
}
printf("\n");
}
free(Mat);
}
void print_corresp_gpu(ICPCorresp* dmat, int m, int n, const char* name)
{
ICPCorresp* Mat = (ICPCorresp*)malloc(m * n * sizeof(ICPCorresp));
hipMemcpy(Mat, dmat, m * n * sizeof(ICPCorresp), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for (int row = 0; row < m; row++) {
for (int col = 0; col < n; col++) {
ICPCorresp Areg = Mat[col + row * n];
printf("%s(%d,%d) = (%f,%d) ", name, row, col, Areg.dist, Areg.id);
}
printf("\n");
}
free(Mat);
}
CPUMatrix icp_gpu(CPUMatrix& P, CPUMatrix& Q, unsigned iterations)
{
// Assuming most of the time P.getdim1() == Q.getdim1()
//----- MALLOC -----/
/*
hipMalloc(Q_center) dim(Q.dim1)
hipMalloc(Q_centered) dim(Q.dim0 * Q.dim1)
hipMalloc(P_copy) // the size won't change
hipMalloc(P_centered) dim(P.dim0 * P.dim1)
hipMalloc(P_center) (axis = 0) (sizeof * dim1)?
hipMalloc(cross_var) (3*3) aka (dim1 * dim1)
hipMalloc(U) and V_T ? S is not used
// U dim(cov.dim0 * cov.dim0) and V (cov.dim1 * cov.dim1)
hipMalloc(R) rotation matrix dim(U.dim0 * VT.dim1)
hipMalloc(t) translation matrix dim(Qcenter.Dim0 * Qcenter.dim1)
hipMalloc(corresps) dim(P
*/
// Device pointers
double* dQ_center, *dQ_centered,
*dP_copy, *dP_centered,*dP_center,
*dDot_temp,
*dU, *dS, *dV_T,
*dR, *dR_transpose, *dt;
// Corresps device pointers
ICPCorresp* dcorresps;
double* dcross_var;
double* d_R;
size_t dcorresps_pitch;
size_t cross_var_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
size_t reducepitch = Q.getDim1() * sizeof(double);
size_t r_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
size_t cov_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
size_t threads_num = 1024;
size_t batchsize = 16;
//==== Init ====
dQ_center = nullptr; // reduce_0 function does the allocation if nullptr
hipMalloc(&dQ_centered, Q.getDim0() * Q.getDim1() * sizeof(double));
hipMalloc(&dP_copy, P.getDim0() * P.getDim1() * sizeof(double));
hipMalloc(&dP_centered, P.getDim0() * P.getDim1() * sizeof(double));
dP_center = nullptr; // reduce_0 function does the allocation if nullptr
hipMalloc(&dDot_temp, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dU, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dS, P.getDim1() * P.getDim1() * sizeof(double)); // FIXME shape?
hipMalloc(&dV_T, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dR, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dR_transpose, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dt, Q.getDim1() * sizeof(double));
hipMallocPitch((void**)&dcorresps, &dcorresps_pitch, Q.getDim0() * sizeof(ICPCorresp), batchsize);
cudaCheckError();
hipMalloc((void**)&d_R, batchsize * r_pitch);
cudaCheckError();
hipMalloc((void**)&dcross_var, 1 * cov_pitch);
cudaCheckError();
//----- MEMCPY -----/
hipMemcpy(dQ_centered, Q.getArray(), Q.getDim0() * Q.getDim1() * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dP_copy, P.getArray(), P.getDim0() * P.getDim1() * sizeof(double), hipMemcpyHostToDevice);
// Center data P and Q
// Q_center cuda malloc and mean
// Move Q to device and call it Q_centered, apply Q_centered = Q_centered - Q_center
//------COMPUTATION------/
// pitch = dim1 * sizeof()
// Mean Q_center = Q.mean(0)
reduce_0(MatrixReduceOP::MEAN, dQ_centered, &dQ_center, Q.getDim1(), Q.getDim0(), Q.getDim1() * sizeof(double), &reducepitch, threads_num);
// Subtract Q -= Q_center
dim3 blocksize(32, 32);
auto gridsize = get_gridsize(Q.getDim0(), Q.getDim1(), 1, Q.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dQ_centered, dQ_center, dQ_centered, MatrixOP::SUBTRACT,
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double),
1, Q.getDim1(), Q.getDim1() * sizeof(double),
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double));
// cuda memcpy device to device to put equal P_centered and P_copy
for (unsigned i = 0; i < iterations; ++i)
{
// Mean calculation, pass P_center pointer directly as result
mean_0(dP_copy, &dP_center, P.getDim1(), P.getDim0(), P.getDim1() * sizeof(double), &reducepitch, threads_num);
// Center P
// Substract and put result in P_centered
// but first compute new gridsize
gridsize = get_gridsize(P.getDim0(), P.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dP_copy, dP_center, dP_centered, MatrixOP::SUBTRACT,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double));
// Compute correspondences indices
// Call correspondence indices gpu with (P_centered, Q_centered)
// Compute cross var GPU, call with (P_centered, Q_centered, corresps, default_kernel)
get_batch_cov(dP_centered, P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
dQ_centered, Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double),
dcorresps, batchsize, Q.getDim0(), dcorresps_pitch,
d_R, batchsize, P.getDim1() * Q.getDim1(), r_pitch,
dcross_var, P.getDim1(), Q.getDim1(), cov_pitch,
batchsize
);
// cross_var is here 3*3 mat
svd_gpu(dcross_var, P.getDim1(), P.getDim1(), dV_T, dS, dU);
// Rotation matrix
matrixMultiplication(dU, dV_T, dR,
P.getDim1(), P.getDim1(),
P.getDim1(), P.getDim1(),
P.getDim1(), P.getDim1());
// Translation Matrix
// 3 different calculations
// transpose
gpuTranspose(dR, dR_transpose, P.getDim1(), P.getDim1());
// dot product
// Normally dt should fit the right dimension
matrixMultiplication(dP_center, dR_transpose, dt,
1, P.getDim1(),
P.getDim1(), P.getDim1(),
1, P.getDim1());
// subtract
gridsize = get_gridsize(1, Q.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dQ_center, dt, dt, MatrixOP::SUBTRACT,
1, Q.getDim1(), Q.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double));
// Update P
// use same device pointer for the dot product both dimensions being the same
// first transpose - already done with R transpose
// dot product / use P_centered to store the result bc no need of the data anymore
matrixMultiplication(dP_copy, dR_transpose, dP_centered,
P.getDim0(), P.getDim1(),
P.getDim1(), P.getDim1(),
P.getDim0(), P.getDim1());
// plus
gridsize = get_gridsize(P.getDim0(), P.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dP_centered, dt, dP_copy, MatrixOP::ADD,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double));
}
hipDeviceSynchronize();
double* P_result = (double*)malloc(P.getDim0() * P.getDim1() * sizeof(double));
hipMemcpy(P_result, dP_copy, P.getDim0() * P.getDim1() * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dQ_center);
hipFree(dQ_centered);
hipFree(dP_copy);
hipFree(dP_centered);
hipFree(dP_center);
hipFree(dDot_temp);
hipFree(dcross_var);
hipFree(dcorresps);
hipFree(d_R);
hipFree(dU);
hipFree(dV_T);
hipFree(dR);
hipFree(dR_transpose);
hipFree(dt);
hipDeviceReset();
cudaCheckError();
return CPUMatrix(P_result, P.getDim0(), P.getDim1());
}
__global__ void naiveGPUTranspose(const double *d_a, double *d_b, const int rows, const int cols)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int index_in = i * cols + j;
int index_out = j * rows + i;
if (i < rows && j < cols)
d_b[index_out] = d_a[index_in];
}
void gpuTranspose(double* A, double* B, int numRows, int numColumns) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadPerBlock(Tile_size, Tile_size);//Number of threads in each block
dim3 numBlocks((numColumns/ Tile_size) + 1, (numRows/ Tile_size) + 1);//Number of Blocks required
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( naiveGPUTranspose), dim3(numBlocks), dim3(threadPerBlock), 0, 0, A, B, numRows, numColumns);
}
CPUMatrix icp_gpu_optimized(CPUMatrix& P, CPUMatrix& Q, unsigned iterations, std::string &method) {
// Assuming most of the time P.getdim1() == Q.getdim1()
//----- MALLOC -----/
/*
hipMalloc(Q_center) dim(Q.dim1)
hipMalloc(Q_centered) dim(Q.dim0 * Q.dim1)
hipMalloc(P_copy) // the size won't change
hipMalloc(P_centered) dim(P.dim0 * P.dim1)
hipMalloc(P_center) (axis = 0) (sizeof * dim1)?
hipMalloc(cross_var) (3*3) aka (dim1 * dim1)
hipMalloc(U) and V_T ? S is not used
// U dim(cov.dim0 * cov.dim0) and V (cov.dim1 * cov.dim1)
hipMalloc(R) rotation matrix dim(U.dim0 * VT.dim1)
hipMalloc(t) translation matrix dim(Qcenter.Dim0 * Qcenter.dim1)
hipMalloc(corresps) dim(P
*/
// Device pointers
double* dQ_center, * dQ_centered,
* dP_copy, * dP_centered, * dP_center,
* dDot_temp,
* dU, * dS, * dV_T,
* dR, * dR_transpose, * dt;
// Corresps device pointers
unsigned int* dcorresps;
double* dcross_var = nullptr;
//double* d_R = nullptr;
unsigned int d_r0 = P.getDim0(), d_r1 = P.getDim1() * Q.getDim1();
size_t cross_var_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
size_t reducepitch = Q.getDim1() * sizeof(double);
size_t r_pitch;
size_t cov_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
// for new optimized version private use
unsigned int dist_1;
ICPCorresp *d_dist = nullptr;
size_t threads_num = 1024;
//==== Init ====
dQ_center = nullptr; // reduce_0 function does the allocation if nullptr
hipMalloc(&dQ_centered, Q.getDim0() * Q.getDim1() * sizeof(double));
hipMalloc(&dP_copy, P.getDim0() * P.getDim1() * sizeof(double));
hipMalloc(&dP_centered, P.getDim0() * P.getDim1() * sizeof(double));
dP_center = nullptr; // reduce_0 function does the allocation if nullptr
hipMalloc(&dDot_temp, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dU, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dS, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dV_T, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dR, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dR_transpose, P.getDim1() * P.getDim1() * sizeof(double));
hipMalloc(&dt, Q.getDim1() * sizeof(double));
hipMalloc((void**)&dcorresps, P.getDim0() * sizeof(unsigned int));
cudaCheckError();
//----- MEMCPY -----/
hipMemcpy(dQ_centered, Q.getArray(), Q.getDim0() * Q.getDim1() * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dP_copy, P.getArray(), P.getDim0() * P.getDim1() * sizeof(double), hipMemcpyHostToDevice);
// Center data P and Q
// Q_center cuda malloc and mean
// Move Q to device and call it Q_centered, apply Q_centered = Q_centered - Q_center
//------COMPUTATION------/
// pitch = dim1 * sizeof()
// Mean Q_center = Q.mean(0)
reduce_0(MatrixReduceOP::MEAN, dQ_centered, &dQ_center, Q.getDim1(), Q.getDim0(), Q.getDim1() * sizeof(double), &reducepitch, threads_num);
// Subtract Q -= Q_center
dim3 blocksize(32, 32);
auto gridsize = get_gridsize(Q.getDim0(), Q.getDim1(), 1, Q.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dQ_centered, dQ_center, dQ_centered, MatrixOP::SUBTRACT,
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double),
1, Q.getDim1(), Q.getDim1() * sizeof(double),
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double));
// cuda memcpy device to device to put equal P_centered and P_copy
for (unsigned i = 0; i < iterations; ++i)
{
// Mean calculation, pass P_center pointer directly as result
mean_0(dP_copy, &dP_center, P.getDim1(), P.getDim0(), P.getDim1() * sizeof(double), &reducepitch, threads_num);
// Center P
// Substract and put result in P_centered
// but first compute new gridsize
gridsize = get_gridsize(P.getDim0(), P.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dP_copy, dP_center, dP_centered, MatrixOP::SUBTRACT,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double));
// Compute correspondences indices
// Call correspondence indices gpu with (P_centered, Q_centered)
// Compute cross var GPU, call with (P_centered, Q_centered, corresps, default_kernel)
// TODO: have method option support
if (method == "-loop")
{
get_array_correspondences(dcorresps, dP_centered, dQ_centered,
P.getDim0(), P.getDim1(),
Q.getDim0(), Q.getDim1());
} else if (method == "-shared") {
get_array_correspondences_optimized_one_iter(dcorresps, &d_dist, &dist_1, dP_centered, dQ_centered, P.getDim0(), P.getDim1(), Q.getDim0(), Q.getDim1());
} else if (method == "-shared-loop") {
get_array_correspondences_optimized(dcorresps, dP_centered, dQ_centered,
P.getDim0(), P.getDim1(),
Q.getDim0(), Q.getDim1());
}
//print_Mat_gpu(dcorresps, 1, P.getDim0(), "Csp");
get_array_cross_covs_flattened(dP_centered, dQ_centered, &dcross_var, dcorresps,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double),
d_r0, d_r1, &r_pitch,
P.getDim0(), true);
reduce_0(MatrixReduceOP::SUM, dcross_var, &dcross_var, (size_t) d_r1, (size_t) d_r0, r_pitch, &r_pitch, threads_num);
//print_Mat_gpu(dcross_var, P.getDim1(), P.getDim1(), "cov");
// cross_var is here 3*3 mat
svd_gpu(dcross_var, P.getDim1(), P.getDim1(), dV_T, dS, dU);
// Rotation matrix
matrixMultiplication(dU, dV_T, dR,
P.getDim1(), P.getDim1(),
P.getDim1(), P.getDim1(),
P.getDim1(), P.getDim1());
// Translation Matrix
// 3 different calculations
// transpose
gpuTranspose(dR, dR_transpose, P.getDim1(), P.getDim1());
// dot product
// Normally dt should fit the right dimension
matrixMultiplication(dP_center, dR_transpose, dt,
1, P.getDim1(),
P.getDim1(), P.getDim1(),
1, P.getDim1());
// subtract
gridsize = get_gridsize(1, Q.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dQ_center, dt, dt, MatrixOP::SUBTRACT,
1, Q.getDim1(), Q.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double));
// Update P
// use same device pointer for the dot product both dimensions being the same
// first transpose - already done with R transpose
// dot product / use P_centered to store the result bc no need of the data anymore
matrixMultiplication(dP_copy, dR_transpose, dP_centered,
P.getDim0(), P.getDim1(),
P.getDim1(), P.getDim1(),
P.getDim0(), P.getDim1());
// plus
gridsize = get_gridsize(P.getDim0(), P.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dP_centered, dt, dP_copy, MatrixOP::ADD,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double));
}
hipDeviceSynchronize();
double* P_result = (double*)malloc(P.getDim0() * P.getDim1() * sizeof(double));
hipMemcpy(P_result, dP_copy, P.getDim0() * P.getDim1() * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dQ_center);
hipFree(dQ_centered);
hipFree(dP_copy);
hipFree(dP_centered);
hipFree(dP_center);
hipFree(dDot_temp);
hipFree(dcross_var);
hipFree(dcorresps);
hipFree(dU);
hipFree(dV_T);
hipFree(dR);
if (d_dist != nullptr)
hipFree(d_dist);
hipFree(dR_transpose);
hipFree(dt);
hipDeviceReset();
cudaCheckError();
return CPUMatrix(P_result, P.getDim0(), P.getDim1());
}
| dc93fc9c327cf6364586d2351fcd0115c6129cf0.cu | #include <vector>
#include <limits>
#include <tuple>
#include <iostream>
#include <cmath>
#include "libalg/basic_operations.hpp"
#include "libalg/alg.hpp"
#include "libalg/CPUMatrix.hpp"
#include "libalg/CPUView.hpp"
#include "libalg/broadcasting.hpp"
#include "error.hpp"
#include "cpu/tuple.hpp"
#include "gpu/icp.cuh"
#include "libgpualg/mult.cuh"
#include "libgpualg/euclidist.cuh"
#include "error.cuh"
#include "libgpualg/mean.cuh"
#include "libgpualg/ope.cuh"
#include "libgpualg/svd.cuh"
#include "libgpuicp/corresp_optimized.cuh"
#include "libgpuicp/dist.cuh"
#include "libgpuicp/batchcovs.cuh"
#define Tile_size 2
/* --------- CPU Version Calling GPU Kernel ------------ */
__host__ std::vector<std::tuple<size_t, int>> get_correspondence_indices(double *P, double *Q,
size_t P_r, size_t P_c, size_t Q_r, size_t Q_c)
{
std::vector<std::tuple<size_t, int>> correspondances = {};
for (size_t i = 0; i < P_r; i++)
{
double *p_point = P + i * P_c;
double min_dist = std::numeric_limits<double>::max();
int chosen_idx = -1;
for (size_t j = 0; j < Q_r; j++)
{
double *q_point = Q + j * Q_c;
double dist = std::sqrt(element_wise_reduce(p_point, q_point, 1, P_c, 1, Q_c,
squared_norm_2, add, add)); //norm 2 between 2 vectors
if (dist < min_dist)
{
min_dist = dist;
chosen_idx = j;
}
}
correspondances.push_back(std::make_tuple(i, chosen_idx));
}
return correspondances;
}
// Intermediation function to be replaced with element_wise_op
__host__ void increment_cov(double *P, double *Q)
{
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
P[i*3 + j] = P[i*3 + j] + Q[i*3 + j];
}
}
}
__host__ double* calling_transpose_kernel(double *A, size_t row, size_t column)
{
// Calling transpose kernel
size_t size = sizeof(double) * row * column;
// Allocations
double *d_source_transpose, *d_dest_transpose;
cudaMalloc((void **)&d_source_transpose, size);
cudaMalloc((void **)&d_dest_transpose, size);
double *transposed_Q = (double *)calloc(size, sizeof(double));
// Copy mem and exec
cudaMemcpy(d_source_transpose, A, size, cudaMemcpyHostToDevice);
gpuTranspose(d_source_transpose, d_dest_transpose, row, column);
cudaDeviceSynchronize();
cudaMemcpy(transposed_Q, d_dest_transpose, size, cudaMemcpyDeviceToHost);
// Free cuda mem
cudaFree(d_source_transpose);
cudaFree(d_dest_transpose);
// End of transpose call
return transposed_Q;
}
__host__ double *calling_dot_kernel(double *A, double *B, size_t A_row, size_t A_col, size_t B_row, size_t B_col)
{
size_t sizeA = A_row * A_col * sizeof(double);
size_t sizeB = B_row * B_col * sizeof(double);
size_t sizeC = A_row * B_col * sizeof(double);
double *h_C = (double *)calloc(sizeC, sizeof(double));
double *d_A;
double *d_B;
double *d_C;
cudaMalloc(&d_A, sizeA);
cudaMalloc(&d_B, sizeB);
cudaMalloc(&d_C, sizeC);
cudaMemcpy(d_A, A, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeB, cudaMemcpyHostToDevice);
matrixMultiplication(d_A, d_B, d_C, A_row, A_col, B_row, B_col, A_row, B_col);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, sizeC, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return h_C;
}
__host__ double *compute_cross_variance_cpu_call_gpu(double *P, double *Q, std::vector<std::tuple<size_t, int>> correspondences, size_t P_r, size_t P_c,
size_t Q_r, size_t Q_c) //set default function to lambda function??
{
UNUSED(Q_r);
double *cov = (double *)calloc(9, sizeof(double));
for (auto tup : correspondences)
{
auto i = std::get<0>(tup);
auto j = std::get<1>(tup);
double *q_point = Q + j * Q_c;
double *p_point = P + i * P_c;
double *doted_points = nullptr;
double *transposed_Q = calling_transpose_kernel(q_point, 1, Q_c);
dot_product(&doted_points, transposed_Q, p_point, Q_c, 1, 1, P_c); //dim of Q_r * P_r
free (transposed_Q);
increment_cov(cov, doted_points); //need to set element_wise_op but too complicated, doesn't work for some reason.
free(doted_points);
}
return cov;
}
/* -------------- Version GPU Kernel -----------*/
// Implementation with double arrays and no vector for full GPU usage
__global__ void get_correspondence_indices_array_gpu(tuple **correspondances, double *P, double *Q, size_t P_r, size_t P_c, size_t Q_r, size_t Q_c)
{
int push_index = 0;
for (size_t i = 0; i < P_r; i++)
{
double *p_point = P + i * P_c;
double min_dist = std::numeric_limits<double>::max();
int chosen_idx = -1;
for (size_t j = 0; j < Q_r; j++)
{
double *q_point = Q + j * Q_c;
double dist = std::sqrt(*p_point + *q_point);
if (dist < min_dist)
{
min_dist = dist;
chosen_idx = j;
}
}
tuple *new_tup = nullptr;
cudaMalloc(&new_tup, sizeof(tuple));
new_tup->index = i;
new_tup->value = chosen_idx;
correspondances[push_index] = new_tup;
push_index++;
}
}
// Array implementation for GPU
void compute_cross_variance_array(double * cov, double *P, double *Q, std::tuple<size_t, int> *correspondences, size_t P_r, size_t P_c,
size_t Q_r, size_t Q_c) //set default function to lambda function??
{
UNUSED(Q_r);
UNUSED(P_r);
for (size_t index = 0; index < P_r; index ++)
{
auto i = std::get<0>(correspondences[index]);
auto j = std::get<1>(correspondences[index]);
double *q_point = Q + j * Q_c;
double *p_point = P + i * P_c;
double *transposed_Q = transpose(q_point, 1, Q_c);
double *doted_points = nullptr;
dot_product(&doted_points, transposed_Q, p_point, Q_c, 1, 1, P_c); //dim of Q_r * P_r
free (transposed_Q);
increment_cov(cov, doted_points); //need to set element_wise_op but too complicated, doesn't work for some reason.
free(doted_points);
}
}
dim3 get_gridsize(size_t a_0, size_t a_1, size_t b_0, size_t b_1, dim3 blocksize)
{
size_t r_0, r_1;
get_broadcastable_size(a_0, a_1, b_0, b_1, &r_0, &r_1);
int nbblocksx = std::ceil((float)r_1 / blocksize.x);
int nbblocksy = std::ceil((float)r_0 / blocksize.y);
return dim3(nbblocksx, nbblocksy);
}
// TODO: REMOVE ME since useless
__global__ void print_matrix_kern(char* d_A, int pitch, int nbvals)
{
int j;
int idx = threadIdx.x;
double* line = (double*)(d_A + idx * pitch);
printf("Line %d:\n", idx);
__syncthreads();
for (j = 0; j < nbvals; ++j) {
//printf("%6.2f\t", (double)(d_A[idx * pitch + j * sizeof(double)]));
printf("%6.2f\t", line[j]);
__syncthreads();
}
printf("\n");
__syncthreads();
}
void print_Mat_gpu(double *dmat, int m, int n, const char* name)
{
double* Mat = (double*)malloc(m * n * sizeof(double));
cudaMemcpy(Mat, dmat, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for (int row = 0; row < m; row++) {
for (int col = 0; col < n; col++) {
double Areg = Mat[col + row * n];
printf("%s(%d,%d) = %f ", name, row, col, Areg);
}
printf("\n");
}
free(Mat);
}
void print_Mat_gpu(unsigned int* dmat, int m, int n, const char* name)
{
unsigned int* Mat = (unsigned int*)malloc(m * n * sizeof(unsigned int));
cudaMemcpy(Mat, dmat, m * n * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for (int row = 0; row < m; row++) {
for (int col = 0; col < n; col++) {
unsigned int Areg = Mat[col + row * n];
printf("%s(%d,%d) = %u ", name, row, col, Areg);
}
printf("\n");
}
free(Mat);
}
void print_corresp_gpu(ICPCorresp* dmat, int m, int n, const char* name)
{
ICPCorresp* Mat = (ICPCorresp*)malloc(m * n * sizeof(ICPCorresp));
cudaMemcpy(Mat, dmat, m * n * sizeof(ICPCorresp), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for (int row = 0; row < m; row++) {
for (int col = 0; col < n; col++) {
ICPCorresp Areg = Mat[col + row * n];
printf("%s(%d,%d) = (%f,%d) ", name, row, col, Areg.dist, Areg.id);
}
printf("\n");
}
free(Mat);
}
CPUMatrix icp_gpu(CPUMatrix& P, CPUMatrix& Q, unsigned iterations)
{
// Assuming most of the time P.getdim1() == Q.getdim1()
//----- MALLOC -----/
/*
cudaMalloc(Q_center) dim(Q.dim1)
cudaMalloc(Q_centered) dim(Q.dim0 * Q.dim1)
cudaMalloc(P_copy) // the size won't change
cudaMalloc(P_centered) dim(P.dim0 * P.dim1)
cudaMalloc(P_center) (axis = 0) (sizeof * dim1)?
cudaMalloc(cross_var) (3*3) aka (dim1 * dim1)
cudaMalloc(U) and V_T ? S is not used
// U dim(cov.dim0 * cov.dim0) and V (cov.dim1 * cov.dim1)
cudaMalloc(R) rotation matrix dim(U.dim0 * VT.dim1)
cudaMalloc(t) translation matrix dim(Qcenter.Dim0 * Qcenter.dim1)
cudaMalloc(corresps) dim(P
*/
// Device pointers
double* dQ_center, *dQ_centered,
*dP_copy, *dP_centered,*dP_center,
*dDot_temp,
*dU, *dS, *dV_T,
*dR, *dR_transpose, *dt;
// Corresps device pointers
ICPCorresp* dcorresps;
double* dcross_var;
double* d_R;
size_t dcorresps_pitch;
size_t cross_var_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
size_t reducepitch = Q.getDim1() * sizeof(double);
size_t r_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
size_t cov_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
size_t threads_num = 1024;
size_t batchsize = 16;
//==== Init ====
dQ_center = nullptr; // reduce_0 function does the allocation if nullptr
cudaMalloc(&dQ_centered, Q.getDim0() * Q.getDim1() * sizeof(double));
cudaMalloc(&dP_copy, P.getDim0() * P.getDim1() * sizeof(double));
cudaMalloc(&dP_centered, P.getDim0() * P.getDim1() * sizeof(double));
dP_center = nullptr; // reduce_0 function does the allocation if nullptr
cudaMalloc(&dDot_temp, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dU, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dS, P.getDim1() * P.getDim1() * sizeof(double)); // FIXME shape?
cudaMalloc(&dV_T, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dR, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dR_transpose, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dt, Q.getDim1() * sizeof(double));
cudaMallocPitch((void**)&dcorresps, &dcorresps_pitch, Q.getDim0() * sizeof(ICPCorresp), batchsize);
cudaCheckError();
cudaMalloc((void**)&d_R, batchsize * r_pitch);
cudaCheckError();
cudaMalloc((void**)&dcross_var, 1 * cov_pitch);
cudaCheckError();
//----- MEMCPY -----/
cudaMemcpy(dQ_centered, Q.getArray(), Q.getDim0() * Q.getDim1() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dP_copy, P.getArray(), P.getDim0() * P.getDim1() * sizeof(double), cudaMemcpyHostToDevice);
// Center data P and Q
// Q_center cuda malloc and mean
// Move Q to device and call it Q_centered, apply Q_centered = Q_centered - Q_center
//------COMPUTATION------/
// pitch = dim1 * sizeof()
// Mean Q_center = Q.mean(0)
reduce_0(MatrixReduceOP::MEAN, dQ_centered, &dQ_center, Q.getDim1(), Q.getDim0(), Q.getDim1() * sizeof(double), &reducepitch, threads_num);
// Subtract Q -= Q_center
dim3 blocksize(32, 32);
auto gridsize = get_gridsize(Q.getDim0(), Q.getDim1(), 1, Q.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dQ_centered, dQ_center, dQ_centered, MatrixOP::SUBTRACT,
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double),
1, Q.getDim1(), Q.getDim1() * sizeof(double),
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double));
// cuda memcpy device to device to put equal P_centered and P_copy
for (unsigned i = 0; i < iterations; ++i)
{
// Mean calculation, pass P_center pointer directly as result
mean_0(dP_copy, &dP_center, P.getDim1(), P.getDim0(), P.getDim1() * sizeof(double), &reducepitch, threads_num);
// Center P
// Substract and put result in P_centered
// but first compute new gridsize
gridsize = get_gridsize(P.getDim0(), P.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dP_copy, dP_center, dP_centered, MatrixOP::SUBTRACT,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double));
// Compute correspondences indices
// Call correspondence indices gpu with (P_centered, Q_centered)
// Compute cross var GPU, call with (P_centered, Q_centered, corresps, default_kernel)
get_batch_cov(dP_centered, P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
dQ_centered, Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double),
dcorresps, batchsize, Q.getDim0(), dcorresps_pitch,
d_R, batchsize, P.getDim1() * Q.getDim1(), r_pitch,
dcross_var, P.getDim1(), Q.getDim1(), cov_pitch,
batchsize
);
// cross_var is here 3*3 mat
svd_gpu(dcross_var, P.getDim1(), P.getDim1(), dV_T, dS, dU);
// Rotation matrix
matrixMultiplication(dU, dV_T, dR,
P.getDim1(), P.getDim1(),
P.getDim1(), P.getDim1(),
P.getDim1(), P.getDim1());
// Translation Matrix
// 3 different calculations
// transpose
gpuTranspose(dR, dR_transpose, P.getDim1(), P.getDim1());
// dot product
// Normally dt should fit the right dimension
matrixMultiplication(dP_center, dR_transpose, dt,
1, P.getDim1(),
P.getDim1(), P.getDim1(),
1, P.getDim1());
// subtract
gridsize = get_gridsize(1, Q.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dQ_center, dt, dt, MatrixOP::SUBTRACT,
1, Q.getDim1(), Q.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double));
// Update P
// use same device pointer for the dot product both dimensions being the same
// first transpose - already done with R transpose
// dot product / use P_centered to store the result bc no need of the data anymore
matrixMultiplication(dP_copy, dR_transpose, dP_centered,
P.getDim0(), P.getDim1(),
P.getDim1(), P.getDim1(),
P.getDim0(), P.getDim1());
// plus
gridsize = get_gridsize(P.getDim0(), P.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dP_centered, dt, dP_copy, MatrixOP::ADD,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double));
}
cudaDeviceSynchronize();
double* P_result = (double*)malloc(P.getDim0() * P.getDim1() * sizeof(double));
cudaMemcpy(P_result, dP_copy, P.getDim0() * P.getDim1() * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dQ_center);
cudaFree(dQ_centered);
cudaFree(dP_copy);
cudaFree(dP_centered);
cudaFree(dP_center);
cudaFree(dDot_temp);
cudaFree(dcross_var);
cudaFree(dcorresps);
cudaFree(d_R);
cudaFree(dU);
cudaFree(dV_T);
cudaFree(dR);
cudaFree(dR_transpose);
cudaFree(dt);
cudaDeviceReset();
cudaCheckError();
return CPUMatrix(P_result, P.getDim0(), P.getDim1());
}
__global__ void naiveGPUTranspose(const double *d_a, double *d_b, const int rows, const int cols)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int index_in = i * cols + j;
int index_out = j * rows + i;
if (i < rows && j < cols)
d_b[index_out] = d_a[index_in];
}
void gpuTranspose(double* A, double* B, int numRows, int numColumns) {
// declare the number of blocks per grid and the number of threads per block
dim3 threadPerBlock(Tile_size, Tile_size);//Number of threads in each block
dim3 numBlocks((numColumns/ Tile_size) + 1, (numRows/ Tile_size) + 1);//Number of Blocks required
//@@ Launch the GPU Kernel here
naiveGPUTranspose<<<numBlocks, threadPerBlock>>>(A, B, numRows, numColumns);
}
CPUMatrix icp_gpu_optimized(CPUMatrix& P, CPUMatrix& Q, unsigned iterations, std::string &method) {
// Assuming most of the time P.getdim1() == Q.getdim1()
//----- MALLOC -----/
/*
cudaMalloc(Q_center) dim(Q.dim1)
cudaMalloc(Q_centered) dim(Q.dim0 * Q.dim1)
cudaMalloc(P_copy) // the size won't change
cudaMalloc(P_centered) dim(P.dim0 * P.dim1)
cudaMalloc(P_center) (axis = 0) (sizeof * dim1)?
cudaMalloc(cross_var) (3*3) aka (dim1 * dim1)
cudaMalloc(U) and V_T ? S is not used
// U dim(cov.dim0 * cov.dim0) and V (cov.dim1 * cov.dim1)
cudaMalloc(R) rotation matrix dim(U.dim0 * VT.dim1)
cudaMalloc(t) translation matrix dim(Qcenter.Dim0 * Qcenter.dim1)
cudaMalloc(corresps) dim(P
*/
// Device pointers
double* dQ_center, * dQ_centered,
* dP_copy, * dP_centered, * dP_center,
* dDot_temp,
* dU, * dS, * dV_T,
* dR, * dR_transpose, * dt;
// Corresps device pointers
unsigned int* dcorresps;
double* dcross_var = nullptr;
//double* d_R = nullptr;
unsigned int d_r0 = P.getDim0(), d_r1 = P.getDim1() * Q.getDim1();
size_t cross_var_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
size_t reducepitch = Q.getDim1() * sizeof(double);
size_t r_pitch;
size_t cov_pitch = P.getDim1() * Q.getDim1() * sizeof(double);
// for new optimized version private use
unsigned int dist_1;
ICPCorresp *d_dist = nullptr;
size_t threads_num = 1024;
//==== Init ====
dQ_center = nullptr; // reduce_0 function does the allocation if nullptr
cudaMalloc(&dQ_centered, Q.getDim0() * Q.getDim1() * sizeof(double));
cudaMalloc(&dP_copy, P.getDim0() * P.getDim1() * sizeof(double));
cudaMalloc(&dP_centered, P.getDim0() * P.getDim1() * sizeof(double));
dP_center = nullptr; // reduce_0 function does the allocation if nullptr
cudaMalloc(&dDot_temp, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dU, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dS, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dV_T, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dR, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dR_transpose, P.getDim1() * P.getDim1() * sizeof(double));
cudaMalloc(&dt, Q.getDim1() * sizeof(double));
cudaMalloc((void**)&dcorresps, P.getDim0() * sizeof(unsigned int));
cudaCheckError();
//----- MEMCPY -----/
cudaMemcpy(dQ_centered, Q.getArray(), Q.getDim0() * Q.getDim1() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dP_copy, P.getArray(), P.getDim0() * P.getDim1() * sizeof(double), cudaMemcpyHostToDevice);
// Center data P and Q
// Q_center cuda malloc and mean
// Move Q to device and call it Q_centered, apply Q_centered = Q_centered - Q_center
//------COMPUTATION------/
// pitch = dim1 * sizeof()
// Mean Q_center = Q.mean(0)
reduce_0(MatrixReduceOP::MEAN, dQ_centered, &dQ_center, Q.getDim1(), Q.getDim0(), Q.getDim1() * sizeof(double), &reducepitch, threads_num);
// Subtract Q -= Q_center
dim3 blocksize(32, 32);
auto gridsize = get_gridsize(Q.getDim0(), Q.getDim1(), 1, Q.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dQ_centered, dQ_center, dQ_centered, MatrixOP::SUBTRACT,
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double),
1, Q.getDim1(), Q.getDim1() * sizeof(double),
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double));
// cuda memcpy device to device to put equal P_centered and P_copy
for (unsigned i = 0; i < iterations; ++i)
{
// Mean calculation, pass P_center pointer directly as result
mean_0(dP_copy, &dP_center, P.getDim1(), P.getDim0(), P.getDim1() * sizeof(double), &reducepitch, threads_num);
// Center P
// Substract and put result in P_centered
// but first compute new gridsize
gridsize = get_gridsize(P.getDim0(), P.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dP_copy, dP_center, dP_centered, MatrixOP::SUBTRACT,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double));
// Compute correspondences indices
// Call correspondence indices gpu with (P_centered, Q_centered)
// Compute cross var GPU, call with (P_centered, Q_centered, corresps, default_kernel)
// TODO: have method option support
if (method == "-loop")
{
get_array_correspondences(dcorresps, dP_centered, dQ_centered,
P.getDim0(), P.getDim1(),
Q.getDim0(), Q.getDim1());
} else if (method == "-shared") {
get_array_correspondences_optimized_one_iter(dcorresps, &d_dist, &dist_1, dP_centered, dQ_centered, P.getDim0(), P.getDim1(), Q.getDim0(), Q.getDim1());
} else if (method == "-shared-loop") {
get_array_correspondences_optimized(dcorresps, dP_centered, dQ_centered,
P.getDim0(), P.getDim1(),
Q.getDim0(), Q.getDim1());
}
//print_Mat_gpu(dcorresps, 1, P.getDim0(), "Csp");
get_array_cross_covs_flattened(dP_centered, dQ_centered, &dcross_var, dcorresps,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
Q.getDim0(), Q.getDim1(), Q.getDim1() * sizeof(double),
d_r0, d_r1, &r_pitch,
P.getDim0(), true);
reduce_0(MatrixReduceOP::SUM, dcross_var, &dcross_var, (size_t) d_r1, (size_t) d_r0, r_pitch, &r_pitch, threads_num);
//print_Mat_gpu(dcross_var, P.getDim1(), P.getDim1(), "cov");
// cross_var is here 3*3 mat
svd_gpu(dcross_var, P.getDim1(), P.getDim1(), dV_T, dS, dU);
// Rotation matrix
matrixMultiplication(dU, dV_T, dR,
P.getDim1(), P.getDim1(),
P.getDim1(), P.getDim1(),
P.getDim1(), P.getDim1());
// Translation Matrix
// 3 different calculations
// transpose
gpuTranspose(dR, dR_transpose, P.getDim1(), P.getDim1());
// dot product
// Normally dt should fit the right dimension
matrixMultiplication(dP_center, dR_transpose, dt,
1, P.getDim1(),
P.getDim1(), P.getDim1(),
1, P.getDim1());
// subtract
gridsize = get_gridsize(1, Q.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dQ_center, dt, dt, MatrixOP::SUBTRACT,
1, Q.getDim1(), Q.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double));
// Update P
// use same device pointer for the dot product both dimensions being the same
// first transpose - already done with R transpose
// dot product / use P_centered to store the result bc no need of the data anymore
matrixMultiplication(dP_copy, dR_transpose, dP_centered,
P.getDim0(), P.getDim1(),
P.getDim1(), P.getDim1(),
P.getDim0(), P.getDim1());
// plus
gridsize = get_gridsize(P.getDim0(), P.getDim1(), 1, P.getDim1(), blocksize);
matrix_op<double>(gridsize, blocksize, dP_centered, dt, dP_copy, MatrixOP::ADD,
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double),
1, P.getDim1(), P.getDim1() * sizeof(double),
P.getDim0(), P.getDim1(), P.getDim1() * sizeof(double));
}
cudaDeviceSynchronize();
double* P_result = (double*)malloc(P.getDim0() * P.getDim1() * sizeof(double));
cudaMemcpy(P_result, dP_copy, P.getDim0() * P.getDim1() * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dQ_center);
cudaFree(dQ_centered);
cudaFree(dP_copy);
cudaFree(dP_centered);
cudaFree(dP_center);
cudaFree(dDot_temp);
cudaFree(dcross_var);
cudaFree(dcorresps);
cudaFree(dU);
cudaFree(dV_T);
cudaFree(dR);
if (d_dist != nullptr)
cudaFree(d_dist);
cudaFree(dR_transpose);
cudaFree(dt);
cudaDeviceReset();
cudaCheckError();
return CPUMatrix(P_result, P.getDim0(), P.getDim1());
}
|
b789e9b9350ef889a8c3082034009d4e5e6b69fa.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2011, Tokyo Institute of Technology.
// All rights reserved.
//
// This file is distributed under the license described in
// LICENSE.txt.
//
// Author: Naoya Maruyama ([email protected])
#include "runtime/buffer_cuda.h"
#include <hip/hip_runtime.h>
#include <cutil.h>
#define CUDA_MEMCPY_ASYNC_SIZE (64 << 10) // 64KB
namespace physis {
namespace runtime {
BufferCUDAHost::BufferCUDAHost(size_t elm_size)
: Buffer(elm_size) {
mpi_buf_ = new BufferHost(elm_size);
deleter_ = BufferCUDAHost::DeleteChunk;
}
BufferCUDAHost::BufferCUDAHost(int num_dims, size_t elm_size)
: Buffer(num_dims, elm_size) {
mpi_buf_ = new BufferHost(num_dims, elm_size);
deleter_ = BufferCUDAHost::DeleteChunk;
}
BufferCUDAHost::~BufferCUDAHost() {
delete mpi_buf_;
}
void BufferCUDAHost::DeleteChunk(void *ptr) {
CUDA_SAFE_CALL(hipHostFree(ptr));
return;
}
void *BufferCUDAHost::GetChunk(const IndexArray &size) {
void *ptr = NULL;
if (size.accumulate(num_dims_) > 0) {
LOG_INFO() << "Trying to allocate host pinned memory of "
<< GetLinearSize(size) << " bytes.\n";
CUDA_SAFE_CALL(hipHostMalloc(&ptr, GetLinearSize(size)));
}
return ptr;
}
void BufferCUDAHost::Copyin(const void *buf, const IndexArray &offset,
const IndexArray &size) {
EnsureCapacity(offset+size);
// Offset access is not yet supported.
PSAssert(offset == 0);
memcpy(Get(), buf, GetLinearSize(size));
}
void BufferCUDAHost::Copyin(const BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDAHost::Copyout(void *buf, const IndexArray &offset,
const IndexArray &s) {
PSAssert(offset + s <= size());
// Offset access is not yet supported.
PSAssert(offset == 0);
memcpy(buf, Get(), GetLinearSize(s));
}
void BufferCUDAHost::Copyout(BufferHost &buf,
const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
Copyout(buf.Get(), offset, size);
}
void BufferCUDAHost::MPIRecv(int src, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
mpi_buf_->MPIRecv(src, comm, IndexArray(), size);
Copyin(*mpi_buf_, offset, size);
//mpi_buf_->Delete();
}
void BufferCUDAHost::MPISend(int dst, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
Copyout(*mpi_buf_, offset, size);
mpi_buf_->MPISend(dst, comm, IndexArray(), size);
//mpi_buf_->Delete();
}
//
// BufferCUDAHostMapped
//
BufferCUDAHostMapped::BufferCUDAHostMapped(size_t elm_size)
: Buffer(elm_size), dev_ptr_(NULL) {
mpi_buf_ = new BufferHost(elm_size);
deleter_ = BufferCUDAHostMapped::DeleteChunk;
}
BufferCUDAHostMapped::BufferCUDAHostMapped(int num_dims, size_t elm_size)
: Buffer(num_dims, elm_size), dev_ptr_(NULL) {
mpi_buf_ = new BufferHost(num_dims, elm_size);
deleter_ = BufferCUDAHostMapped::DeleteChunk;
}
BufferCUDAHostMapped::~BufferCUDAHostMapped() {
delete mpi_buf_;
}
void BufferCUDAHostMapped::DeleteChunk(void *ptr) {
CUDA_SAFE_CALL(hipHostFree(ptr));
return;
}
void *BufferCUDAHostMapped::GetChunk(const IndexArray &size) {
void *ptr = NULL;
if (size.accumulate(num_dims_) > 0) {
LOG_INFO() << "Trying to allocate host pinned memory of "
<< GetLinearSize(size) << " bytes.\n";
CUDA_SAFE_CALL(hipHostMalloc(&ptr, GetLinearSize(size),
hipHostMallocMapped));
}
return ptr;
}
void BufferCUDAHostMapped::Allocate(int num_dims, size_t elm_size,
const IndexArray &size) {
Delete();
if (size.accumulate(num_dims)) {
num_dims_ = num_dims;
elm_size_ = elm_size;
buf_ = GetChunk(size);
CUDA_SAFE_CALL(hipHostGetDevicePointer(&dev_ptr_, buf_, 0));
}
size_ = size;
}
void BufferCUDAHostMapped::Copyin(const void *buf, const IndexArray &offset,
const IndexArray &size) {
EnsureCapacity(offset+size);
// Offset access is not yet supported.
PSAssert(offset == 0);
memcpy(Get(), buf, GetLinearSize(size));
}
void BufferCUDAHostMapped::Copyin(const BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDAHostMapped::Copyout(void *buf, const IndexArray &offset,
const IndexArray &s) {
PSAssert(offset + s <= size());
// Offset access is not yet supported.
PSAssert(offset == 0);
memcpy(buf, Get(), GetLinearSize(s));
}
void BufferCUDAHostMapped::Copyout(BufferHost &buf,
const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
Copyout(buf.Get(), offset, size);
}
void BufferCUDAHostMapped::MPIRecv(int src, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
mpi_buf_->MPIRecv(src, comm, IndexArray(), size);
Copyin(*mpi_buf_, offset, size);
//mpi_buf_->Delete();
}
void BufferCUDAHostMapped::MPISend(int dst, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
Copyout(*mpi_buf_, offset, size);
mpi_buf_->MPISend(dst, comm, IndexArray(), size);
//mpi_buf_->Delete();
}
//
// BufferCUDADev
//
BufferCUDADev::BufferCUDADev(size_t elm_size)
: Buffer(elm_size), strm_(0) {
pinned_buf_ = new BufferCUDAHost(elm_size);
deleter_ = BufferCUDADev::DeleteChunk;
}
BufferCUDADev::BufferCUDADev(int num_dims, size_t elm_size)
: Buffer(num_dims, elm_size), strm_(0) {
pinned_buf_ = new BufferCUDAHost(num_dims, elm_size);
deleter_ = BufferCUDADev::DeleteChunk;
}
BufferCUDADev::~BufferCUDADev() {
delete pinned_buf_;
}
void BufferCUDADev::Copyin(const void *buf, const IndexArray &offset,
const IndexArray &size) {
pinned_buf_->Copyin(buf, size);
Copyin(*pinned_buf_, offset, size);
}
void BufferCUDADev::Copyin(const BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDADev::Copyin(const BufferCUDAHost &buf,
const IndexArray &offset,
const IndexArray &size) {
PSAssert(offset == 0);
EnsureCapacity(offset+size);
if (strm_) {
CUDA_SAFE_CALL(hipMemcpyAsync(Get(), buf.Get(), GetLinearSize(size),
hipMemcpyHostToDevice, strm_));
CUDA_SAFE_CALL(hipStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(hipMemcpy(Get(), buf.Get(), GetLinearSize(size),
hipMemcpyHostToDevice));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev::Copyout(void *buf, const IndexArray &offset,
const IndexArray &size) {
Copyout(*pinned_buf_, offset, size);
pinned_buf_->Copyout(buf, size);
}
void BufferCUDADev::Copyout(BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDADev::Copyout(BufferCUDAHost &buf, const IndexArray &offset,
const IndexArray &size) {
PSAssert(offset == 0);
PSAssert(offset + size <= this->size());
buf.EnsureCapacity(num_dims_, elm_size_, size);
if (strm_) {
CUDA_SAFE_CALL(hipMemcpyAsync(buf.Get(), Get(), GetLinearSize(size),
hipMemcpyDeviceToHost, strm_));
CUDA_SAFE_CALL(hipStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(hipMemcpy(buf.Get(), Get(), GetLinearSize(size),
hipMemcpyDeviceToHost));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev::MPIRecv(int src, MPI_Comm comm, const IndexArray &offset,
const IndexArray &size) {
// First, recv with the host pinned buffer (which also performs
// internal copying between MPI and CUDA buffers.
pinned_buf_->Buffer::MPIRecv(src, comm, size);
// Then use hipMemcpy to copy into the device memory
Copyin(*pinned_buf_, offset, size);
}
void BufferCUDADev::MPISend(int dst, MPI_Comm comm, const IndexArray &offset,
const IndexArray &size) {
Copyout(*pinned_buf_, offset, size);
pinned_buf_->Buffer::MPISend(dst, comm, size);
}
void *BufferCUDADev::GetChunk(const IndexArray &size) {
void *p = NULL;
if (size.accumulate(num_dims_) >0)
CUDA_SAFE_CALL(hipMalloc(&p, GetLinearSize(size)));
return p;
}
void BufferCUDADev::DeleteChunk(void *ptr) {
if (ptr) {
CUDA_SAFE_CALL(hipFree(ptr));
}
}
//
// BufferCUDADev3D
//
BufferCUDADev3D::BufferCUDADev3D(int num_dims, size_t elm_size)
: Buffer(num_dims, elm_size), strm_(0) {
pinned_buf_ = new BufferCUDAHost(num_dims, elm_size);
mapped_buf_ = new BufferCUDAHostMapped(num_dims, elm_size);
deleter_ = BufferCUDADev3D::DeleteChunk;
}
BufferCUDADev3D::~BufferCUDADev3D() {
delete pinned_buf_;
delete mapped_buf_;
}
hipPitchedPtr BufferCUDADev3D::GetChunk3D(const IndexArray &size) {
// use hipMalloc3D
hipPitchedPtr pp;
if (size.accumulate(num_dims_)) {
hipExtent ext = make_hipExtent(size[0] * elm_size_,
size[1], size[2]);
CUDA_SAFE_CALL(hipMalloc3D(&pp, ext));
} else {
pp = make_hipPitchedPtr(NULL, 0, 0, 0);
}
return pp;
}
void BufferCUDADev3D::DeleteChunk(void *ptr) {
if (ptr) {
CUDA_SAFE_CALL(hipFree(ptr));
}
}
void BufferCUDADev3D::Allocate(int num_dims, size_t elm_size,
const IndexArray &size) {
Delete();
if (size.accumulate(num_dims)) {
num_dims_ = num_dims;
elm_size_ = elm_size;
pp_ = GetChunk3D(size);
buf_ = pp_.ptr;
LOG_DEBUG() << "Pitch: " << pp_.pitch << "\n";
}
size_ = size;
}
/*void BufferCUDADev3D::Allocate(const IndexArray &size) {
Allocate(num_dims_, elm_size_, size);
}*/
void BufferCUDADev3D::Copyin(const void *buf, const IndexArray &offset,
const IndexArray &size) {
pinned_buf_->Copyin(buf, size);
Copyin(*pinned_buf_, offset, size);
}
void BufferCUDADev3D::Copyin(const BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDADev3D::Copyin(const BufferCUDAHost &buf,
const IndexArray &offset,
const IndexArray &size) {
EnsureCapacity(offset+size);
hipMemcpy3DParms parms = {0};
parms.srcPtr = make_hipPitchedPtr(
const_cast<void*>(buf.Get()), size[0] * elm_size_,
size[0], size[1]);
parms.dstPtr = pp_;
parms.extent = make_hipExtent(size[0] * elm_size_, size[1], size[2]);
parms.dstPos = make_hipPos(offset[0] * elm_size_, offset[1],
offset[2]);
parms.kind = hipMemcpyHostToDevice;
if (strm_) {
CUDA_SAFE_CALL(hipMemcpy3DAsync(&parms, strm_));
CUDA_SAFE_CALL(hipStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(hipMemcpy3D(&parms));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev3D::Copyout(void *buf, const IndexArray &offset,
const IndexArray &size) {
Copyout(*pinned_buf_, offset, size);
pinned_buf_->Copyout(buf, size);
}
void BufferCUDADev3D::Copyout(BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
Copyout(buf.Get(), offset, size);
}
// REMEMBER to call hipSetDeviceFlags(hipDeviceMapHost)
template <typename T>
__global__ void BufferCUDADev3D_copyout_kernel(
const T *src, size_t xpos, size_t ypos, size_t zpos,
size_t xextent, size_t yextent, size_t zextent,
size_t xdim, size_t ydim, T *dst) {
T *dst_ptr = dst;
for (size_t k = blockIdx.x; k < zextent; k += gridDim.x) {
//for (size_t k = blockIdx.x; k == blockIdx.x; k += gridDim.x) {
size_t offset_k = xdim * ydim * (k + zpos);
T *dst_ptr_k = dst_ptr + xextent * yextent * k;
for (size_t j = threadIdx.y; j < yextent; j += blockDim.y) {
//for (size_t j = threadIdx.y; j == threadIdx.y; j += blockDim.y) {
size_t offset_j = offset_k + xdim * (j + ypos);
T *dst_ptr_j = dst_ptr_k + xextent * j;
for (size_t i = threadIdx.x; i < xextent; i += blockDim.x) {
//for (size_t i = threadIdx.x; i == threadIdx.x; i += blockDim.x) {
size_t offset_i = offset_j + (i + xpos);
dst_ptr_j[i] = src[offset_i];
}
}
}
}
void BufferCUDADev3D::Copyout_Opt(BufferCUDAHostMapped &buf,
const IndexArray &offset,
const IndexArray &size) {
// TODO: this must be refined.
dim3 bdim;
if (size[0] < 4) {
bdim.x = size[0];
bdim.y = 64;
} else {
bdim.x = 64;
bdim.y = size[1];
}
dim3 gdim(size[2]);
if (elm_size_ == 4) {
hipLaunchKernelGGL(( BufferCUDADev3D_copyout_kernel<float>), dim3(gdim), dim3(bdim), 0, strm_,
(float*)Get(), offset[0], offset[1], offset[2],
size[0], size[1], size[2],
pp_.pitch / elm_size_, pp_.ysize, (float*)buf.GetDevPointer());
} else if (elm_size_ == 8) {
hipLaunchKernelGGL(( BufferCUDADev3D_copyout_kernel<double>), dim3(gdim), dim3(bdim), 0, strm_,
(double*)Get(), offset[0], offset[1], offset[2],
size[0], size[1], size[2],
pp_.pitch / elm_size_, pp_.ysize, (double*)buf.GetDevPointer());
} else {
PSAssert(0);
}
CUDA_SAFE_CALL(hipStreamSynchronize(strm_));
}
void BufferCUDADev3D::Copyout(BufferCUDAHostMapped &buf,
const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
PSAssert(offset + size <= this->size());
// TODO: this must be refined.
if (size[0] < 4 || size[1] < 4) {
LOG_VERBOSE() << "Copyout to mapped host memory\n";
Copyout_Opt(buf, offset, size);
return;
}
hipMemcpy3DParms parms = {0};
parms.srcPtr = pp_;
parms.dstPtr = make_hipPitchedPtr(
buf.Get(), size[0] * elm_size_, size[0], size[1]);
parms.extent = make_hipExtent(size[0] * elm_size_, size[1], size[2]);
parms.srcPos = make_hipPos(offset[0] * elm_size_, offset[1],
offset[2]);
parms.kind = hipMemcpyDeviceToHost;
if (strm_) {
CUDA_SAFE_CALL(hipMemcpy3DAsync(&parms, strm_));
CUDA_SAFE_CALL(hipStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(hipMemcpy3D(&parms));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev3D::Copyout(BufferCUDAHost &buf, const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
PSAssert(offset + size <= this->size());
hipMemcpy3DParms parms = {0};
parms.srcPtr = pp_;
parms.dstPtr = make_hipPitchedPtr(
buf.Get(), size[0] * elm_size_, size[0], size[1]);
parms.extent = make_hipExtent(size[0] * elm_size_, size[1], size[2]);
parms.srcPos = make_hipPos(offset[0] * elm_size_, offset[1],
offset[2]);
parms.kind = hipMemcpyDeviceToHost;
if (strm_) {
CUDA_SAFE_CALL(hipMemcpy3DAsync(&parms, strm_));
CUDA_SAFE_CALL(hipStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(hipMemcpy3D(&parms));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev3D::MPIRecv(int src, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
// First, recv with the host pinned buffer (which also performs
// internal copying between MPI and CUDA buffers.
pinned_buf_->Buffer::MPIRecv(src, comm, size);
// Then use hipMemcpy to copy into the device memory
Copyin(*pinned_buf_, offset, size);
}
void BufferCUDADev3D::MPISend(int dst, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
Copyout(*pinned_buf_, offset, size);
pinned_buf_->Buffer::MPISend(dst, comm, size);
}
} // namespace runtime
} // namespace physis
| b789e9b9350ef889a8c3082034009d4e5e6b69fa.cu | // Copyright 2011, Tokyo Institute of Technology.
// All rights reserved.
//
// This file is distributed under the license described in
// LICENSE.txt.
//
// Author: Naoya Maruyama ([email protected])
#include "runtime/buffer_cuda.h"
#include <cuda_runtime.h>
#include <cutil.h>
#define CUDA_MEMCPY_ASYNC_SIZE (64 << 10) // 64KB
namespace physis {
namespace runtime {
BufferCUDAHost::BufferCUDAHost(size_t elm_size)
: Buffer(elm_size) {
mpi_buf_ = new BufferHost(elm_size);
deleter_ = BufferCUDAHost::DeleteChunk;
}
BufferCUDAHost::BufferCUDAHost(int num_dims, size_t elm_size)
: Buffer(num_dims, elm_size) {
mpi_buf_ = new BufferHost(num_dims, elm_size);
deleter_ = BufferCUDAHost::DeleteChunk;
}
BufferCUDAHost::~BufferCUDAHost() {
delete mpi_buf_;
}
void BufferCUDAHost::DeleteChunk(void *ptr) {
CUDA_SAFE_CALL(cudaFreeHost(ptr));
return;
}
void *BufferCUDAHost::GetChunk(const IndexArray &size) {
void *ptr = NULL;
if (size.accumulate(num_dims_) > 0) {
LOG_INFO() << "Trying to allocate host pinned memory of "
<< GetLinearSize(size) << " bytes.\n";
CUDA_SAFE_CALL(cudaMallocHost(&ptr, GetLinearSize(size)));
}
return ptr;
}
void BufferCUDAHost::Copyin(const void *buf, const IndexArray &offset,
const IndexArray &size) {
EnsureCapacity(offset+size);
// Offset access is not yet supported.
PSAssert(offset == 0);
memcpy(Get(), buf, GetLinearSize(size));
}
void BufferCUDAHost::Copyin(const BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDAHost::Copyout(void *buf, const IndexArray &offset,
const IndexArray &s) {
PSAssert(offset + s <= size());
// Offset access is not yet supported.
PSAssert(offset == 0);
memcpy(buf, Get(), GetLinearSize(s));
}
void BufferCUDAHost::Copyout(BufferHost &buf,
const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
Copyout(buf.Get(), offset, size);
}
void BufferCUDAHost::MPIRecv(int src, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
mpi_buf_->MPIRecv(src, comm, IndexArray(), size);
Copyin(*mpi_buf_, offset, size);
//mpi_buf_->Delete();
}
void BufferCUDAHost::MPISend(int dst, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
Copyout(*mpi_buf_, offset, size);
mpi_buf_->MPISend(dst, comm, IndexArray(), size);
//mpi_buf_->Delete();
}
//
// BufferCUDAHostMapped
//
BufferCUDAHostMapped::BufferCUDAHostMapped(size_t elm_size)
: Buffer(elm_size), dev_ptr_(NULL) {
mpi_buf_ = new BufferHost(elm_size);
deleter_ = BufferCUDAHostMapped::DeleteChunk;
}
BufferCUDAHostMapped::BufferCUDAHostMapped(int num_dims, size_t elm_size)
: Buffer(num_dims, elm_size), dev_ptr_(NULL) {
mpi_buf_ = new BufferHost(num_dims, elm_size);
deleter_ = BufferCUDAHostMapped::DeleteChunk;
}
BufferCUDAHostMapped::~BufferCUDAHostMapped() {
delete mpi_buf_;
}
void BufferCUDAHostMapped::DeleteChunk(void *ptr) {
CUDA_SAFE_CALL(cudaFreeHost(ptr));
return;
}
void *BufferCUDAHostMapped::GetChunk(const IndexArray &size) {
void *ptr = NULL;
if (size.accumulate(num_dims_) > 0) {
LOG_INFO() << "Trying to allocate host pinned memory of "
<< GetLinearSize(size) << " bytes.\n";
CUDA_SAFE_CALL(cudaHostAlloc(&ptr, GetLinearSize(size),
cudaHostAllocMapped));
}
return ptr;
}
void BufferCUDAHostMapped::Allocate(int num_dims, size_t elm_size,
const IndexArray &size) {
Delete();
if (size.accumulate(num_dims)) {
num_dims_ = num_dims;
elm_size_ = elm_size;
buf_ = GetChunk(size);
CUDA_SAFE_CALL(cudaHostGetDevicePointer(&dev_ptr_, buf_, 0));
}
size_ = size;
}
void BufferCUDAHostMapped::Copyin(const void *buf, const IndexArray &offset,
const IndexArray &size) {
EnsureCapacity(offset+size);
// Offset access is not yet supported.
PSAssert(offset == 0);
memcpy(Get(), buf, GetLinearSize(size));
}
void BufferCUDAHostMapped::Copyin(const BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDAHostMapped::Copyout(void *buf, const IndexArray &offset,
const IndexArray &s) {
PSAssert(offset + s <= size());
// Offset access is not yet supported.
PSAssert(offset == 0);
memcpy(buf, Get(), GetLinearSize(s));
}
void BufferCUDAHostMapped::Copyout(BufferHost &buf,
const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
Copyout(buf.Get(), offset, size);
}
void BufferCUDAHostMapped::MPIRecv(int src, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
mpi_buf_->MPIRecv(src, comm, IndexArray(), size);
Copyin(*mpi_buf_, offset, size);
//mpi_buf_->Delete();
}
void BufferCUDAHostMapped::MPISend(int dst, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
Copyout(*mpi_buf_, offset, size);
mpi_buf_->MPISend(dst, comm, IndexArray(), size);
//mpi_buf_->Delete();
}
//
// BufferCUDADev
//
BufferCUDADev::BufferCUDADev(size_t elm_size)
: Buffer(elm_size), strm_(0) {
pinned_buf_ = new BufferCUDAHost(elm_size);
deleter_ = BufferCUDADev::DeleteChunk;
}
BufferCUDADev::BufferCUDADev(int num_dims, size_t elm_size)
: Buffer(num_dims, elm_size), strm_(0) {
pinned_buf_ = new BufferCUDAHost(num_dims, elm_size);
deleter_ = BufferCUDADev::DeleteChunk;
}
BufferCUDADev::~BufferCUDADev() {
delete pinned_buf_;
}
void BufferCUDADev::Copyin(const void *buf, const IndexArray &offset,
const IndexArray &size) {
pinned_buf_->Copyin(buf, size);
Copyin(*pinned_buf_, offset, size);
}
void BufferCUDADev::Copyin(const BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDADev::Copyin(const BufferCUDAHost &buf,
const IndexArray &offset,
const IndexArray &size) {
PSAssert(offset == 0);
EnsureCapacity(offset+size);
if (strm_) {
CUDA_SAFE_CALL(cudaMemcpyAsync(Get(), buf.Get(), GetLinearSize(size),
cudaMemcpyHostToDevice, strm_));
CUDA_SAFE_CALL(cudaStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(cudaMemcpy(Get(), buf.Get(), GetLinearSize(size),
cudaMemcpyHostToDevice));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev::Copyout(void *buf, const IndexArray &offset,
const IndexArray &size) {
Copyout(*pinned_buf_, offset, size);
pinned_buf_->Copyout(buf, size);
}
void BufferCUDADev::Copyout(BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDADev::Copyout(BufferCUDAHost &buf, const IndexArray &offset,
const IndexArray &size) {
PSAssert(offset == 0);
PSAssert(offset + size <= this->size());
buf.EnsureCapacity(num_dims_, elm_size_, size);
if (strm_) {
CUDA_SAFE_CALL(cudaMemcpyAsync(buf.Get(), Get(), GetLinearSize(size),
cudaMemcpyDeviceToHost, strm_));
CUDA_SAFE_CALL(cudaStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(cudaMemcpy(buf.Get(), Get(), GetLinearSize(size),
cudaMemcpyDeviceToHost));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev::MPIRecv(int src, MPI_Comm comm, const IndexArray &offset,
const IndexArray &size) {
// First, recv with the host pinned buffer (which also performs
// internal copying between MPI and CUDA buffers.
pinned_buf_->Buffer::MPIRecv(src, comm, size);
// Then use cudaMemcpy to copy into the device memory
Copyin(*pinned_buf_, offset, size);
}
void BufferCUDADev::MPISend(int dst, MPI_Comm comm, const IndexArray &offset,
const IndexArray &size) {
Copyout(*pinned_buf_, offset, size);
pinned_buf_->Buffer::MPISend(dst, comm, size);
}
void *BufferCUDADev::GetChunk(const IndexArray &size) {
void *p = NULL;
if (size.accumulate(num_dims_) >0)
CUDA_SAFE_CALL(cudaMalloc(&p, GetLinearSize(size)));
return p;
}
void BufferCUDADev::DeleteChunk(void *ptr) {
if (ptr) {
CUDA_SAFE_CALL(cudaFree(ptr));
}
}
//
// BufferCUDADev3D
//
BufferCUDADev3D::BufferCUDADev3D(int num_dims, size_t elm_size)
: Buffer(num_dims, elm_size), strm_(0) {
pinned_buf_ = new BufferCUDAHost(num_dims, elm_size);
mapped_buf_ = new BufferCUDAHostMapped(num_dims, elm_size);
deleter_ = BufferCUDADev3D::DeleteChunk;
}
BufferCUDADev3D::~BufferCUDADev3D() {
delete pinned_buf_;
delete mapped_buf_;
}
cudaPitchedPtr BufferCUDADev3D::GetChunk3D(const IndexArray &size) {
// use cudaMalloc3D
cudaPitchedPtr pp;
if (size.accumulate(num_dims_)) {
cudaExtent ext = make_cudaExtent(size[0] * elm_size_,
size[1], size[2]);
CUDA_SAFE_CALL(cudaMalloc3D(&pp, ext));
} else {
pp = make_cudaPitchedPtr(NULL, 0, 0, 0);
}
return pp;
}
void BufferCUDADev3D::DeleteChunk(void *ptr) {
if (ptr) {
CUDA_SAFE_CALL(cudaFree(ptr));
}
}
void BufferCUDADev3D::Allocate(int num_dims, size_t elm_size,
const IndexArray &size) {
Delete();
if (size.accumulate(num_dims)) {
num_dims_ = num_dims;
elm_size_ = elm_size;
pp_ = GetChunk3D(size);
buf_ = pp_.ptr;
LOG_DEBUG() << "Pitch: " << pp_.pitch << "\n";
}
size_ = size;
}
/*void BufferCUDADev3D::Allocate(const IndexArray &size) {
Allocate(num_dims_, elm_size_, size);
}*/
void BufferCUDADev3D::Copyin(const void *buf, const IndexArray &offset,
const IndexArray &size) {
pinned_buf_->Copyin(buf, size);
Copyin(*pinned_buf_, offset, size);
}
void BufferCUDADev3D::Copyin(const BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
Copyin(buf.Get(), offset, size);
}
void BufferCUDADev3D::Copyin(const BufferCUDAHost &buf,
const IndexArray &offset,
const IndexArray &size) {
EnsureCapacity(offset+size);
cudaMemcpy3DParms parms = {0};
parms.srcPtr = make_cudaPitchedPtr(
const_cast<void*>(buf.Get()), size[0] * elm_size_,
size[0], size[1]);
parms.dstPtr = pp_;
parms.extent = make_cudaExtent(size[0] * elm_size_, size[1], size[2]);
parms.dstPos = make_cudaPos(offset[0] * elm_size_, offset[1],
offset[2]);
parms.kind = cudaMemcpyHostToDevice;
if (strm_) {
CUDA_SAFE_CALL(cudaMemcpy3DAsync(&parms, strm_));
CUDA_SAFE_CALL(cudaStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(cudaMemcpy3D(&parms));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev3D::Copyout(void *buf, const IndexArray &offset,
const IndexArray &size) {
Copyout(*pinned_buf_, offset, size);
pinned_buf_->Copyout(buf, size);
}
void BufferCUDADev3D::Copyout(BufferHost &buf, const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
Copyout(buf.Get(), offset, size);
}
// REMEMBER to call cudaSetDeviceFlags(cudaDeviceMapHost)
template <typename T>
__global__ void BufferCUDADev3D_copyout_kernel(
const T *src, size_t xpos, size_t ypos, size_t zpos,
size_t xextent, size_t yextent, size_t zextent,
size_t xdim, size_t ydim, T *dst) {
T *dst_ptr = dst;
for (size_t k = blockIdx.x; k < zextent; k += gridDim.x) {
//for (size_t k = blockIdx.x; k == blockIdx.x; k += gridDim.x) {
size_t offset_k = xdim * ydim * (k + zpos);
T *dst_ptr_k = dst_ptr + xextent * yextent * k;
for (size_t j = threadIdx.y; j < yextent; j += blockDim.y) {
//for (size_t j = threadIdx.y; j == threadIdx.y; j += blockDim.y) {
size_t offset_j = offset_k + xdim * (j + ypos);
T *dst_ptr_j = dst_ptr_k + xextent * j;
for (size_t i = threadIdx.x; i < xextent; i += blockDim.x) {
//for (size_t i = threadIdx.x; i == threadIdx.x; i += blockDim.x) {
size_t offset_i = offset_j + (i + xpos);
dst_ptr_j[i] = src[offset_i];
}
}
}
}
void BufferCUDADev3D::Copyout_Opt(BufferCUDAHostMapped &buf,
const IndexArray &offset,
const IndexArray &size) {
// TODO: this must be refined.
dim3 bdim;
if (size[0] < 4) {
bdim.x = size[0];
bdim.y = 64;
} else {
bdim.x = 64;
bdim.y = size[1];
}
dim3 gdim(size[2]);
if (elm_size_ == 4) {
BufferCUDADev3D_copyout_kernel<float><<<gdim, bdim, 0, strm_>>>(
(float*)Get(), offset[0], offset[1], offset[2],
size[0], size[1], size[2],
pp_.pitch / elm_size_, pp_.ysize, (float*)buf.GetDevPointer());
} else if (elm_size_ == 8) {
BufferCUDADev3D_copyout_kernel<double><<<gdim, bdim, 0, strm_>>>(
(double*)Get(), offset[0], offset[1], offset[2],
size[0], size[1], size[2],
pp_.pitch / elm_size_, pp_.ysize, (double*)buf.GetDevPointer());
} else {
PSAssert(0);
}
CUDA_SAFE_CALL(cudaStreamSynchronize(strm_));
}
void BufferCUDADev3D::Copyout(BufferCUDAHostMapped &buf,
const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
PSAssert(offset + size <= this->size());
// TODO: this must be refined.
if (size[0] < 4 || size[1] < 4) {
LOG_VERBOSE() << "Copyout to mapped host memory\n";
Copyout_Opt(buf, offset, size);
return;
}
cudaMemcpy3DParms parms = {0};
parms.srcPtr = pp_;
parms.dstPtr = make_cudaPitchedPtr(
buf.Get(), size[0] * elm_size_, size[0], size[1]);
parms.extent = make_cudaExtent(size[0] * elm_size_, size[1], size[2]);
parms.srcPos = make_cudaPos(offset[0] * elm_size_, offset[1],
offset[2]);
parms.kind = cudaMemcpyDeviceToHost;
if (strm_) {
CUDA_SAFE_CALL(cudaMemcpy3DAsync(&parms, strm_));
CUDA_SAFE_CALL(cudaStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(cudaMemcpy3D(&parms));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev3D::Copyout(BufferCUDAHost &buf, const IndexArray &offset,
const IndexArray &size) {
buf.EnsureCapacity(num_dims_, elm_size_, size);
PSAssert(offset + size <= this->size());
cudaMemcpy3DParms parms = {0};
parms.srcPtr = pp_;
parms.dstPtr = make_cudaPitchedPtr(
buf.Get(), size[0] * elm_size_, size[0], size[1]);
parms.extent = make_cudaExtent(size[0] * elm_size_, size[1], size[2]);
parms.srcPos = make_cudaPos(offset[0] * elm_size_, offset[1],
offset[2]);
parms.kind = cudaMemcpyDeviceToHost;
if (strm_) {
CUDA_SAFE_CALL(cudaMemcpy3DAsync(&parms, strm_));
CUDA_SAFE_CALL(cudaStreamSynchronize(strm_));
} else {
CUDA_SAFE_CALL(cudaMemcpy3D(&parms));
if ((size.accumulate(num_dims_) * elm_size_) <=
CUDA_MEMCPY_ASYNC_SIZE) {
CUDA_SAFE_THREAD_SYNC();
}
}
}
void BufferCUDADev3D::MPIRecv(int src, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
// First, recv with the host pinned buffer (which also performs
// internal copying between MPI and CUDA buffers.
pinned_buf_->Buffer::MPIRecv(src, comm, size);
// Then use cudaMemcpy to copy into the device memory
Copyin(*pinned_buf_, offset, size);
}
void BufferCUDADev3D::MPISend(int dst, MPI_Comm comm,
const IndexArray &offset,
const IndexArray &size) {
Copyout(*pinned_buf_, offset, size);
pinned_buf_->Buffer::MPISend(dst, comm, size);
}
} // namespace runtime
} // namespace physis
|
bfa1aa1e502c3f05d7329c1f8847a169fc51c3ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2021 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <GPUTreeShap/gpu_treeshap.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "predict_fn.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx,
common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t, bst_row_t, size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(hipMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), hipMemcpyDefault));
dh::safe_cuda(hipMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), hipMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Size() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Size() > r.Size()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Size() != 0 || other.categories.Size() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{int64_t(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::hip::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::hip::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(hipMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), hipMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(generic_param_->gpu_id);
batch.data.SetDevice(generic_param_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(generic_param_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(generic_param_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, generic_param_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>()) {
dmat->Info().feature_types.SetDevice(generic_param_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(generic_param_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (generic_param_->gpu_id >= 0 && generic_param_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = generic_param_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->generic_param_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<
data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<
data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, generic_param_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
dh::LaunchN(
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, generic_param_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(
row_idx, ngroup, group, n_features, n_features, n_features)] +=
margin.empty() ? base_score : margin[idx];
});
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const override {
size_t n_classes = model.learner_model_param->num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->SetDevice(generic_param_->gpu_id);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.learner_model_param->base_score);
}
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(generic_param_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(generic_param_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->generic_param_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>()) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(generic_param_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
| bfa1aa1e502c3f05d7329c1f8847a169fc51c3ed.cu | /*!
* Copyright 2017-2021 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <GPUTreeShap/gpu_treeshap.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "predict_fn.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx,
common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t, bst_row_t, size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(cudaMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), cudaMemcpyDefault));
dh::safe_cuda(cudaMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Size() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Size() > r.Size()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Size() != 0 || other.categories.Size() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{int64_t(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::cuda::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::cuda::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(cudaMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), cudaMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(generic_param_->gpu_id);
batch.data.SetDevice(generic_param_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(generic_param_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(generic_param_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, generic_param_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>()) {
dmat->Info().feature_types.SetDevice(generic_param_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(generic_param_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (generic_param_->gpu_id >= 0 && generic_param_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = generic_param_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->generic_param_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<
data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<
data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, generic_param_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
dh::LaunchN(
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, generic_param_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(
row_idx, ngroup, group, n_features, n_features, n_features)] +=
margin.empty() ? base_score : margin[idx];
});
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const override {
size_t n_classes = model.learner_model_param->num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->SetDevice(generic_param_->gpu_id);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.learner_model_param->base_score);
}
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(generic_param_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(generic_param_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->generic_param_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>()) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(generic_param_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
|
8e63e907520806d7a9fe3d4bc362f84737a63700.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "hoomd/VectorMath.h"
#include "hoomd/Index1D.h"
#include "hoomd/ParticleData.cuh"
// Maintainer: jglaser
/*! \file ForceComposite.cu
\brief Defines GPU kernel code for the composite particle integration on the GPU.
*/
//! Shared memory for body force and torque reduction, required allocation when the kernel is called
extern __shared__ char sum[];
extern __shared__ Scalar sum_virial[];
//! Calculates the body forces and torques by summing the constituent particle forces using a fixed sliding window size
/* Compute the force and torque sum on all bodies in the system from their constituent particles. n_bodies_per_block
bodies are handled within each block of execution on the GPU. The reason for this is to decrease
over-parallelism and use the GPU cores more effectively when bodies are smaller than the block size. Otherwise,
small bodies leave many threads in the block idle with nothing to do.
On start, the properties common to each body are read in, computed, and stored in shared memory for all the threads
working on that body to access. Then, the threads loop over all particles that are part of the body with
a sliding window. Each loop of the window computes the force and torque for block_size/n_bodies_per_block particles
in as many threads in parallel. These quantities are summed over enough windows to cover the whole body.
The block_size/n_bodies_per_block partial sums are stored in shared memory. Then n_bodies_per_block partial
reductions are performed in parallel using all threads to sum the total force and torque on each body. This looks
just like a normal reduction, except that it terminates at a certain level in the tree. To make the math
for the partial reduction work out, block_size must be a power of 2 as must n_bodies_per_block.
Performance testing on GF100 with many different bodies of different sizes ranging from 4-256 particles per body
has found that the optimum block size for most bodies is 64 threads. Performance increases for all body sizes
as n_bodies_per_block is increased, but only up to 8. n_bodies_per_block=16 slows performance significantly.
Based on these performance results, this kernel is hardcoded to handle only 1,2,4,8 n_bodies_per_block
with a power of 2 block size (hardcoded to 64 in the kernel launch).
*/
__global__ void gpu_rigid_force_sliding_kernel(Scalar4* d_force,
Scalar4* d_torque,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_list,
Index2D molecule_indexer,
const Scalar4 *d_postype,
const Scalar4* d_orientation,
Index2D body_indexer,
Scalar3* d_body_pos,
Scalar4* d_body_orientation,
const unsigned int *d_body_len,
const unsigned int *d_body,
unsigned int *d_flag,
Scalar4* d_net_force,
Scalar4* d_net_torque,
unsigned int n_mol,
unsigned int N,
unsigned int window_size,
unsigned int thread_mask,
unsigned int n_bodies_per_block,
bool zero_force)
{
// determine which body (0 ... n_bodies_per_block-1) this thread is working on
// assign threads 0, 1, 2, ... to body 0, n, n+1, n+2, ... to body 1, and so on.
unsigned int m = threadIdx.x / (blockDim.x / n_bodies_per_block);
// body_force and body_torque are each shared memory arrays with 1 element per threads
Scalar4 *body_force = (Scalar4 *)sum;
Scalar3 *body_torque = (Scalar3 *) (body_force + blockDim.x);
// store body type, orientation and the index in molecule list in shared memory. Up to 16 bodies per block can
// be handled.
__shared__ unsigned int body_type[16];
__shared__ Scalar4 body_orientation[16];
__shared__ unsigned int mol_idx[16];
__shared__ unsigned int central_idx[16];
// each thread makes partial sums of force and torque of all the particles that this thread loops over
Scalar4 sum_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0),Scalar(0.0));
Scalar3 sum_torque = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
// thread_mask is a bitmask that masks out the high bits in threadIdx.x.
// threadIdx.x & thread_mask is an index from 0 to block_size/n_bodies_per_block-1 and determines what offset
// this thread is to use when accessing the particles in the body
if ((threadIdx.x & thread_mask) == 0)
{
// thread 0 for this body reads in the body id and orientation and stores them in shared memory
int group_idx = blockIdx.x*n_bodies_per_block + m;
if (group_idx < n_mol)
{
mol_idx[m] = group_idx;
// first ptl is central ptl
central_idx[m] = d_molecule_list[molecule_indexer(group_idx, 0)];
body_type[m] = __scalar_as_int(d_postype[central_idx[m]].w);
body_orientation[m] = d_orientation[central_idx[m]];
}
else
{
mol_idx[m] = NO_BODY;
}
}
__syncthreads();
if (mol_idx[m] != NO_BODY && central_idx[m] < N)
{
// compute the number of windows that we need to loop over
unsigned int mol_len = d_molecule_len[mol_idx[m]];
unsigned int n_windows = mol_len / window_size + 1;
if (mol_len != d_body_len[body_type[m]] + 1)
{
// incomplete molecule
atomicMax(d_flag, d_body[central_idx[m]] + 1);
}
// slide the window throughout the block
for (unsigned int start = 0; start < n_windows; start++)
{
// determine the index with this body that this particle should handle
unsigned int k = start * window_size + (threadIdx.x & thread_mask);
// if that index is in the body we are actually handling a real body
if (k < mol_len)
{
// determine the particle idx of the particle
unsigned int pidx = d_molecule_list[molecule_indexer(mol_idx[m],k)];
// if this particle is not the central particle
if (pidx != central_idx[m])
{
// calculate body force and torques
vec3<Scalar> particle_pos(d_body_pos[body_indexer(body_type[m], k-1)]);
Scalar4 fi = d_net_force[pidx];
//will likely need to rotate these components too
vec3<Scalar> ti(d_net_torque[pidx]);
// tally the force in the per thread counter
sum_force.x += fi.x;
sum_force.y += fi.y;
sum_force.z += fi.z;
// sum up energy
sum_force.w += fi.w;
// zero force only if we don't need it later
if (zero_force)
{
// zero net energy on constituent ptls to avoid double counting
// also zero net force for consistency
d_net_force[pidx] = make_scalar4(0.0,0.0,0.0,0.0);
}
vec3<Scalar> ri = rotate(quat<Scalar>(body_orientation[m]), particle_pos);
// torque = r x f
vec3<Scalar> del_torque(cross(ri, vec3<Scalar>(fi)));
// tally the torque in the per thread counter
sum_torque.x += ti.x+del_torque.x;
sum_torque.y += ti.y+del_torque.y;
sum_torque.z += ti.z+del_torque.z;
// zero net torque on constituent particles
d_net_torque[pidx] = make_scalar4(0.0,0.0,0.0,0.0);
}
}
}
}
__syncthreads();
// put the partial sums into shared memory
body_force[threadIdx.x] = sum_force;
body_torque[threadIdx.x] = sum_torque;
__syncthreads();
// perform a set of partial reductions. Each block_size/n_bodies_per_block threads performs a sum reduction
// just within its own group
unsigned int offset = window_size >> 1;
while (offset > 0)
{
if ((threadIdx.x & thread_mask) < offset)
{
body_force[threadIdx.x].x += body_force[threadIdx.x + offset].x;
body_force[threadIdx.x].y += body_force[threadIdx.x + offset].y;
body_force[threadIdx.x].z += body_force[threadIdx.x + offset].z;
body_force[threadIdx.x].w += body_force[threadIdx.x + offset].w;
body_torque[threadIdx.x].x += body_torque[threadIdx.x + offset].x;
body_torque[threadIdx.x].y += body_torque[threadIdx.x + offset].y;
body_torque[threadIdx.x].z += body_torque[threadIdx.x + offset].z;
}
offset >>= 1;
__syncthreads();
}
// thread 0 within this body writes out the total force and torque for the body
if ((threadIdx.x & thread_mask) == 0 && mol_idx[m] != NO_BODY)
{
d_force[central_idx[m]] = body_force[threadIdx.x];
d_torque[central_idx[m]] = make_scalar4(body_torque[threadIdx.x].x, body_torque[threadIdx.x].y, body_torque[threadIdx.x].z, 0.0f);
}
}
__global__ void gpu_rigid_virial_sliding_kernel(Scalar* d_virial,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_list,
Index2D molecule_indexer,
const Scalar4 *d_postype,
const Scalar4* d_orientation,
Index2D body_indexer,
Scalar3* d_body_pos,
Scalar4* d_body_orientation,
Scalar4* d_net_force,
Scalar* d_net_virial,
unsigned int n_mol,
unsigned int N,
unsigned int net_virial_pitch,
unsigned int virial_pitch,
unsigned int window_size,
unsigned int thread_mask,
unsigned int n_bodies_per_block)
{
// determine which body (0 ... n_bodies_per_block-1) this thread is working on
// assign threads 0, 1, 2, ... to body 0, n, n+1, n+2, ... to body 1, and so on.
unsigned int m = threadIdx.x / (blockDim.x / n_bodies_per_block);
// body_force and body_torque are each shared memory arrays with 1 element per threads
Scalar *body_virial_xx = sum_virial;
Scalar *body_virial_xy = &sum_virial[1*blockDim.x];
Scalar *body_virial_xz = &sum_virial[2*blockDim.x];
Scalar *body_virial_yy = &sum_virial[3*blockDim.x];
Scalar *body_virial_yz = &sum_virial[4*blockDim.x];
Scalar *body_virial_zz = &sum_virial[5*blockDim.x];
// store body type, orientation and the index in molecule list in shared memory. Up to 16 bodies per block can
// be handled.
__shared__ unsigned int body_type[16];
__shared__ Scalar4 body_orientation[16];
__shared__ unsigned int mol_idx[16];
__shared__ unsigned int central_idx[16];
// each thread makes partial sums of the virial of all the particles that this thread loops over
Scalar sum_virial_xx(0.0);
Scalar sum_virial_xy(0.0);
Scalar sum_virial_xz(0.0);
Scalar sum_virial_yy(0.0);
Scalar sum_virial_yz(0.0);
Scalar sum_virial_zz(0.0);
// thread_mask is a bitmask that masks out the high bits in threadIdx.x.
// threadIdx.x & thread_mask is an index from 0 to block_size/n_bodies_per_block-1 and determines what offset
// this thread is to use when accessing the particles in the body
if ((threadIdx.x & thread_mask) == 0)
{
// thread 0 for this body reads in the body id and orientation and stores them in shared memory
int group_idx = blockIdx.x*n_bodies_per_block + m;
if (group_idx < n_mol)
{
mol_idx[m] = group_idx;
// first ptl is central ptl
central_idx[m] = d_molecule_list[molecule_indexer(group_idx, 0)];
body_type[m] = __scalar_as_int(d_postype[central_idx[m]].w);
body_orientation[m] = d_orientation[central_idx[m]];
}
else
{
mol_idx[m] = NO_BODY;
}
}
__syncthreads();
if (mol_idx[m] != NO_BODY && central_idx[m] < N)
{
// compute the number of windows that we need to loop over
unsigned int mol_len = d_molecule_len[mol_idx[m]];
unsigned int n_windows = mol_len / window_size + 1;
// slide the window throughout the block
for (unsigned int start = 0; start < n_windows; start++)
{
// determine the index with this body that this particle should handle
unsigned int k = start * window_size + (threadIdx.x & thread_mask);
// if that index is in the body we are actually handling a real body
if (k < mol_len)
{
// determine the particle idx of the particle
unsigned int pidx = d_molecule_list[molecule_indexer(mol_idx[m],k)];
// if this particle is not the central particle
if (pidx != central_idx[m])
{
// calculate body force and torques
vec3<Scalar> particle_pos(d_body_pos[body_indexer(body_type[m], k-1)]);
Scalar4 fi = d_net_force[pidx];
vec3<Scalar> ri = rotate(quat<Scalar>(body_orientation[m]), particle_pos);
// sum up virial
Scalar virialxx = d_net_virial[0*net_virial_pitch+pidx];
Scalar virialxy = d_net_virial[1*net_virial_pitch+pidx];
Scalar virialxz = d_net_virial[2*net_virial_pitch+pidx];
Scalar virialyy = d_net_virial[3*net_virial_pitch+pidx];
Scalar virialyz = d_net_virial[4*net_virial_pitch+pidx];
Scalar virialzz = d_net_virial[5*net_virial_pitch+pidx];
// subtract intra-body virial prt
sum_virial_xx += virialxx - fi.x*ri.x;
sum_virial_xy += virialxy - fi.x*ri.y;
sum_virial_xz += virialxz - fi.x*ri.z;
sum_virial_yy += virialyy - fi.y*ri.y;
sum_virial_yz += virialyz - fi.y*ri.z;
sum_virial_zz += virialzz - fi.z*ri.z;
// zero force and virial on constituent particles
d_net_force[pidx] = make_scalar4(0.0,0.0,0.0,0.0);
d_net_virial[0*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[1*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[2*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[3*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[4*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[5*net_virial_pitch+pidx] = Scalar(0.0);
}
}
}
}
__syncthreads();
// put the partial sums into shared memory
body_virial_xx[threadIdx.x] = sum_virial_xx;
body_virial_xy[threadIdx.x] = sum_virial_xy;
body_virial_xz[threadIdx.x] = sum_virial_xz;
body_virial_yy[threadIdx.x] = sum_virial_yy;
body_virial_yz[threadIdx.x] = sum_virial_yz;
body_virial_zz[threadIdx.x] = sum_virial_zz;
__syncthreads();
// perform a set of partial reductions. Each block_size/n_bodies_per_block threads performs a sum reduction
// just within its own group
unsigned int offset = window_size >> 1;
while (offset > 0)
{
if ((threadIdx.x & thread_mask) < offset)
{
body_virial_xx[threadIdx.x] += body_virial_xx[threadIdx.x + offset];
body_virial_xy[threadIdx.x] += body_virial_xy[threadIdx.x + offset];
body_virial_xz[threadIdx.x] += body_virial_xz[threadIdx.x + offset];
body_virial_yy[threadIdx.x] += body_virial_yy[threadIdx.x + offset];
body_virial_yz[threadIdx.x] += body_virial_yz[threadIdx.x + offset];
body_virial_zz[threadIdx.x] += body_virial_zz[threadIdx.x + offset];
}
offset >>= 1;
__syncthreads();
}
// thread 0 within this body writes out the total virial for the body
if ((threadIdx.x & thread_mask) == 0 && mol_idx[m] != NO_BODY)
{
d_virial[0*virial_pitch+central_idx[m]] = body_virial_xx[threadIdx.x];
d_virial[1*virial_pitch+central_idx[m]] = body_virial_xy[threadIdx.x];
d_virial[2*virial_pitch+central_idx[m]] = body_virial_xz[threadIdx.x];
d_virial[3*virial_pitch+central_idx[m]] = body_virial_yy[threadIdx.x];
d_virial[4*virial_pitch+central_idx[m]] = body_virial_yz[threadIdx.x];
d_virial[5*virial_pitch+central_idx[m]] = body_virial_zz[threadIdx.x];
}
}
/*!
*/
hipError_t gpu_rigid_force(Scalar4* d_force,
Scalar4* d_torque,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_list,
Index2D molecule_indexer,
const Scalar4 *d_postype,
const Scalar4* d_orientation,
Index2D body_indexer,
Scalar3* d_body_pos,
Scalar4* d_body_orientation,
const unsigned int *d_body_len,
const unsigned int *d_body,
unsigned int *d_flag,
Scalar4* d_net_force,
Scalar4* d_net_torque,
unsigned int n_mol,
unsigned int N,
unsigned int n_bodies_per_block,
unsigned int block_size,
const hipDeviceProp_t& dev_prop,
bool zero_force)
{
// reset force and torque
hipMemset(d_force, 0, sizeof(Scalar4)*N);
hipMemset(d_torque, 0, sizeof(Scalar4)*N);
dim3 force_grid(n_mol / n_bodies_per_block + 1, 1, 1);
static unsigned int max_block_size = UINT_MAX;
static hipFuncAttributes attr;
if (max_block_size == UINT_MAX)
{
hipFuncGetAttributes(&attr, (const void *) gpu_rigid_force_sliding_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = max_block_size < block_size ? max_block_size : block_size;
// round down to nearest power of two
unsigned int b = 1;
while (b * 2 <= run_block_size) { b *= 2; }
run_block_size = b;
unsigned int window_size = run_block_size / n_bodies_per_block;
unsigned int thread_mask = window_size - 1;
unsigned int shared_bytes = run_block_size * (sizeof(Scalar4) + sizeof(Scalar3));
while (shared_bytes + attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock)
{
// block size is power of two
run_block_size /= 2;
shared_bytes = run_block_size * (sizeof(Scalar4) + sizeof(Scalar3));
window_size = run_block_size / n_bodies_per_block;
thread_mask = window_size - 1;
}
hipLaunchKernelGGL(( gpu_rigid_force_sliding_kernel), dim3(force_grid), dim3(run_block_size), shared_bytes , 0,
d_force,
d_torque,
d_molecule_len,
d_molecule_list,
molecule_indexer,
d_postype,
d_orientation,
body_indexer,
d_body_pos,
d_body_orientation,
d_body_len,
d_body,
d_flag,
d_net_force,
d_net_torque,
n_mol,
N,
window_size,
thread_mask,
n_bodies_per_block,
zero_force);
return hipSuccess;
}
hipError_t gpu_rigid_virial(Scalar* d_virial,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_list,
Index2D molecule_indexer,
const Scalar4 *d_postype,
const Scalar4* d_orientation,
Index2D body_indexer,
Scalar3* d_body_pos,
Scalar4* d_body_orientation,
Scalar4* d_net_force,
Scalar* d_net_virial,
unsigned int n_mol,
unsigned int N,
unsigned int n_bodies_per_block,
unsigned int net_virial_pitch,
unsigned int virial_pitch,
unsigned int block_size,
const hipDeviceProp_t& dev_prop)
{
// reset force and torque
hipMemset(d_virial,0, sizeof(Scalar)*virial_pitch*6);
dim3 force_grid(n_mol / n_bodies_per_block + 1, 1, 1);
static unsigned int max_block_size = UINT_MAX;
static hipFuncAttributes attr;
if (max_block_size == UINT_MAX)
{
hipFuncGetAttributes(&attr, (const void *) gpu_rigid_virial_sliding_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = max_block_size < block_size ? max_block_size : block_size;
// round down to nearest power of two
unsigned int b = 1;
while (b * 2 <= run_block_size) { b *= 2; }
run_block_size = b;
unsigned int window_size = run_block_size / n_bodies_per_block;
unsigned int thread_mask = window_size - 1;
unsigned int shared_bytes = 6 * run_block_size * sizeof(Scalar);
while (shared_bytes + attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock)
{
// block size is power of two
run_block_size /= 2;
shared_bytes = 6 * run_block_size * sizeof(Scalar);
window_size = run_block_size / n_bodies_per_block;
thread_mask = window_size - 1;
}
hipLaunchKernelGGL(( gpu_rigid_virial_sliding_kernel), dim3(force_grid), dim3(run_block_size), shared_bytes , 0,
d_virial,
d_molecule_len,
d_molecule_list,
molecule_indexer,
d_postype,
d_orientation,
body_indexer,
d_body_pos,
d_body_orientation,
d_net_force,
d_net_virial,
n_mol,
N,
net_virial_pitch,
virial_pitch,
window_size,
thread_mask,
n_bodies_per_block);
return hipSuccess;
}
__global__ void gpu_update_composite_kernel(unsigned int N,
unsigned int n_ghost,
const unsigned int *d_body,
const unsigned int *d_rtag,
Scalar4 *d_postype,
Scalar4 *d_orientation,
Index2D body_indexer,
const Scalar3 *d_body_pos,
const Scalar4 *d_body_orientation,
const unsigned int *d_body_len,
const unsigned int *d_molecule_order,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_idx,
int3 *d_image,
const BoxDim box,
unsigned int *d_flag)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N+n_ghost) return;
unsigned int central_tag = d_body[idx];
if (central_tag == NO_BODY) return;
unsigned int central_idx = d_rtag[central_tag];
if (central_idx == NOT_LOCAL && idx >= N) return;
// do not overwrite central ptl
if (idx == central_idx) return;
Scalar4 postype = d_postype[central_idx];
vec3<Scalar> pos(postype);
quat<Scalar> orientation(d_orientation[central_idx]);
unsigned int body_type = __scalar_as_int(postype.w);
unsigned int body_len = d_body_len[body_type];
unsigned int mol_idx = d_molecule_idx[idx];
if (body_len != d_molecule_len[mol_idx]-1)
{
// if a molecule with a local member is incomplete, this is an error
if (idx < N)
{
atomicMax(d_flag, central_tag+1);
}
// otherwise, ignore
return;
}
int3 img = d_image[central_idx];
unsigned int idx_in_body = d_molecule_order[idx] - 1;
vec3<Scalar> local_pos(d_body_pos[body_indexer(body_type, idx_in_body)]);
vec3<Scalar> dr_space = rotate(orientation, local_pos);
vec3<Scalar> updated_pos(pos);
updated_pos += dr_space;
quat<Scalar> local_orientation(d_body_orientation[body_indexer(body_type, idx_in_body)]);
quat<Scalar> updated_orientation = orientation*local_orientation;
int3 imgi = img;
box.wrap(updated_pos, imgi);
unsigned int type = __scalar_as_int(d_postype[idx].w);
d_postype[idx] = make_scalar4(updated_pos.x, updated_pos.y, updated_pos.z, __int_as_scalar(type));
d_image[idx] = imgi;
}
void gpu_update_composite(unsigned int N,
unsigned int n_ghost,
const unsigned int *d_body,
const unsigned int *d_rtag,
Scalar4 *d_postype,
Scalar4 *d_orientation,
Index2D body_indexer,
const Scalar3 *d_body_pos,
const Scalar4 *d_body_orientation,
const unsigned int *d_body_len,
const unsigned int *d_molecule_order,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_idx,
int3 *d_image,
const BoxDim box,
unsigned int block_size,
unsigned int *d_flag)
{
unsigned int run_block_size = block_size;
static unsigned int max_block_size = UINT_MAX;
static hipFuncAttributes attr;
if (max_block_size == UINT_MAX)
{
hipFuncGetAttributes(&attr, (const void *) gpu_update_composite_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
if (max_block_size <= run_block_size)
{
run_block_size = max_block_size;
}
unsigned int n_blocks = (N+n_ghost)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_update_composite_kernel), dim3(n_blocks),dim3(run_block_size), 0, 0, N,
n_ghost,
d_body,
d_rtag,
d_postype,
d_orientation,
body_indexer,
d_body_pos,
d_body_orientation,
d_body_len,
d_molecule_order,
d_molecule_len,
d_molecule_idx,
d_image,
box,
d_flag);
}
| 8e63e907520806d7a9fe3d4bc362f84737a63700.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "hoomd/VectorMath.h"
#include "hoomd/Index1D.h"
#include "hoomd/ParticleData.cuh"
// Maintainer: jglaser
/*! \file ForceComposite.cu
\brief Defines GPU kernel code for the composite particle integration on the GPU.
*/
//! Shared memory for body force and torque reduction, required allocation when the kernel is called
extern __shared__ char sum[];
extern __shared__ Scalar sum_virial[];
//! Calculates the body forces and torques by summing the constituent particle forces using a fixed sliding window size
/* Compute the force and torque sum on all bodies in the system from their constituent particles. n_bodies_per_block
bodies are handled within each block of execution on the GPU. The reason for this is to decrease
over-parallelism and use the GPU cores more effectively when bodies are smaller than the block size. Otherwise,
small bodies leave many threads in the block idle with nothing to do.
On start, the properties common to each body are read in, computed, and stored in shared memory for all the threads
working on that body to access. Then, the threads loop over all particles that are part of the body with
a sliding window. Each loop of the window computes the force and torque for block_size/n_bodies_per_block particles
in as many threads in parallel. These quantities are summed over enough windows to cover the whole body.
The block_size/n_bodies_per_block partial sums are stored in shared memory. Then n_bodies_per_block partial
reductions are performed in parallel using all threads to sum the total force and torque on each body. This looks
just like a normal reduction, except that it terminates at a certain level in the tree. To make the math
for the partial reduction work out, block_size must be a power of 2 as must n_bodies_per_block.
Performance testing on GF100 with many different bodies of different sizes ranging from 4-256 particles per body
has found that the optimum block size for most bodies is 64 threads. Performance increases for all body sizes
as n_bodies_per_block is increased, but only up to 8. n_bodies_per_block=16 slows performance significantly.
Based on these performance results, this kernel is hardcoded to handle only 1,2,4,8 n_bodies_per_block
with a power of 2 block size (hardcoded to 64 in the kernel launch).
*/
__global__ void gpu_rigid_force_sliding_kernel(Scalar4* d_force,
Scalar4* d_torque,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_list,
Index2D molecule_indexer,
const Scalar4 *d_postype,
const Scalar4* d_orientation,
Index2D body_indexer,
Scalar3* d_body_pos,
Scalar4* d_body_orientation,
const unsigned int *d_body_len,
const unsigned int *d_body,
unsigned int *d_flag,
Scalar4* d_net_force,
Scalar4* d_net_torque,
unsigned int n_mol,
unsigned int N,
unsigned int window_size,
unsigned int thread_mask,
unsigned int n_bodies_per_block,
bool zero_force)
{
// determine which body (0 ... n_bodies_per_block-1) this thread is working on
// assign threads 0, 1, 2, ... to body 0, n, n+1, n+2, ... to body 1, and so on.
unsigned int m = threadIdx.x / (blockDim.x / n_bodies_per_block);
// body_force and body_torque are each shared memory arrays with 1 element per threads
Scalar4 *body_force = (Scalar4 *)sum;
Scalar3 *body_torque = (Scalar3 *) (body_force + blockDim.x);
// store body type, orientation and the index in molecule list in shared memory. Up to 16 bodies per block can
// be handled.
__shared__ unsigned int body_type[16];
__shared__ Scalar4 body_orientation[16];
__shared__ unsigned int mol_idx[16];
__shared__ unsigned int central_idx[16];
// each thread makes partial sums of force and torque of all the particles that this thread loops over
Scalar4 sum_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0),Scalar(0.0));
Scalar3 sum_torque = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
// thread_mask is a bitmask that masks out the high bits in threadIdx.x.
// threadIdx.x & thread_mask is an index from 0 to block_size/n_bodies_per_block-1 and determines what offset
// this thread is to use when accessing the particles in the body
if ((threadIdx.x & thread_mask) == 0)
{
// thread 0 for this body reads in the body id and orientation and stores them in shared memory
int group_idx = blockIdx.x*n_bodies_per_block + m;
if (group_idx < n_mol)
{
mol_idx[m] = group_idx;
// first ptl is central ptl
central_idx[m] = d_molecule_list[molecule_indexer(group_idx, 0)];
body_type[m] = __scalar_as_int(d_postype[central_idx[m]].w);
body_orientation[m] = d_orientation[central_idx[m]];
}
else
{
mol_idx[m] = NO_BODY;
}
}
__syncthreads();
if (mol_idx[m] != NO_BODY && central_idx[m] < N)
{
// compute the number of windows that we need to loop over
unsigned int mol_len = d_molecule_len[mol_idx[m]];
unsigned int n_windows = mol_len / window_size + 1;
if (mol_len != d_body_len[body_type[m]] + 1)
{
// incomplete molecule
atomicMax(d_flag, d_body[central_idx[m]] + 1);
}
// slide the window throughout the block
for (unsigned int start = 0; start < n_windows; start++)
{
// determine the index with this body that this particle should handle
unsigned int k = start * window_size + (threadIdx.x & thread_mask);
// if that index is in the body we are actually handling a real body
if (k < mol_len)
{
// determine the particle idx of the particle
unsigned int pidx = d_molecule_list[molecule_indexer(mol_idx[m],k)];
// if this particle is not the central particle
if (pidx != central_idx[m])
{
// calculate body force and torques
vec3<Scalar> particle_pos(d_body_pos[body_indexer(body_type[m], k-1)]);
Scalar4 fi = d_net_force[pidx];
//will likely need to rotate these components too
vec3<Scalar> ti(d_net_torque[pidx]);
// tally the force in the per thread counter
sum_force.x += fi.x;
sum_force.y += fi.y;
sum_force.z += fi.z;
// sum up energy
sum_force.w += fi.w;
// zero force only if we don't need it later
if (zero_force)
{
// zero net energy on constituent ptls to avoid double counting
// also zero net force for consistency
d_net_force[pidx] = make_scalar4(0.0,0.0,0.0,0.0);
}
vec3<Scalar> ri = rotate(quat<Scalar>(body_orientation[m]), particle_pos);
// torque = r x f
vec3<Scalar> del_torque(cross(ri, vec3<Scalar>(fi)));
// tally the torque in the per thread counter
sum_torque.x += ti.x+del_torque.x;
sum_torque.y += ti.y+del_torque.y;
sum_torque.z += ti.z+del_torque.z;
// zero net torque on constituent particles
d_net_torque[pidx] = make_scalar4(0.0,0.0,0.0,0.0);
}
}
}
}
__syncthreads();
// put the partial sums into shared memory
body_force[threadIdx.x] = sum_force;
body_torque[threadIdx.x] = sum_torque;
__syncthreads();
// perform a set of partial reductions. Each block_size/n_bodies_per_block threads performs a sum reduction
// just within its own group
unsigned int offset = window_size >> 1;
while (offset > 0)
{
if ((threadIdx.x & thread_mask) < offset)
{
body_force[threadIdx.x].x += body_force[threadIdx.x + offset].x;
body_force[threadIdx.x].y += body_force[threadIdx.x + offset].y;
body_force[threadIdx.x].z += body_force[threadIdx.x + offset].z;
body_force[threadIdx.x].w += body_force[threadIdx.x + offset].w;
body_torque[threadIdx.x].x += body_torque[threadIdx.x + offset].x;
body_torque[threadIdx.x].y += body_torque[threadIdx.x + offset].y;
body_torque[threadIdx.x].z += body_torque[threadIdx.x + offset].z;
}
offset >>= 1;
__syncthreads();
}
// thread 0 within this body writes out the total force and torque for the body
if ((threadIdx.x & thread_mask) == 0 && mol_idx[m] != NO_BODY)
{
d_force[central_idx[m]] = body_force[threadIdx.x];
d_torque[central_idx[m]] = make_scalar4(body_torque[threadIdx.x].x, body_torque[threadIdx.x].y, body_torque[threadIdx.x].z, 0.0f);
}
}
__global__ void gpu_rigid_virial_sliding_kernel(Scalar* d_virial,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_list,
Index2D molecule_indexer,
const Scalar4 *d_postype,
const Scalar4* d_orientation,
Index2D body_indexer,
Scalar3* d_body_pos,
Scalar4* d_body_orientation,
Scalar4* d_net_force,
Scalar* d_net_virial,
unsigned int n_mol,
unsigned int N,
unsigned int net_virial_pitch,
unsigned int virial_pitch,
unsigned int window_size,
unsigned int thread_mask,
unsigned int n_bodies_per_block)
{
// determine which body (0 ... n_bodies_per_block-1) this thread is working on
// assign threads 0, 1, 2, ... to body 0, n, n+1, n+2, ... to body 1, and so on.
unsigned int m = threadIdx.x / (blockDim.x / n_bodies_per_block);
// body_force and body_torque are each shared memory arrays with 1 element per threads
Scalar *body_virial_xx = sum_virial;
Scalar *body_virial_xy = &sum_virial[1*blockDim.x];
Scalar *body_virial_xz = &sum_virial[2*blockDim.x];
Scalar *body_virial_yy = &sum_virial[3*blockDim.x];
Scalar *body_virial_yz = &sum_virial[4*blockDim.x];
Scalar *body_virial_zz = &sum_virial[5*blockDim.x];
// store body type, orientation and the index in molecule list in shared memory. Up to 16 bodies per block can
// be handled.
__shared__ unsigned int body_type[16];
__shared__ Scalar4 body_orientation[16];
__shared__ unsigned int mol_idx[16];
__shared__ unsigned int central_idx[16];
// each thread makes partial sums of the virial of all the particles that this thread loops over
Scalar sum_virial_xx(0.0);
Scalar sum_virial_xy(0.0);
Scalar sum_virial_xz(0.0);
Scalar sum_virial_yy(0.0);
Scalar sum_virial_yz(0.0);
Scalar sum_virial_zz(0.0);
// thread_mask is a bitmask that masks out the high bits in threadIdx.x.
// threadIdx.x & thread_mask is an index from 0 to block_size/n_bodies_per_block-1 and determines what offset
// this thread is to use when accessing the particles in the body
if ((threadIdx.x & thread_mask) == 0)
{
// thread 0 for this body reads in the body id and orientation and stores them in shared memory
int group_idx = blockIdx.x*n_bodies_per_block + m;
if (group_idx < n_mol)
{
mol_idx[m] = group_idx;
// first ptl is central ptl
central_idx[m] = d_molecule_list[molecule_indexer(group_idx, 0)];
body_type[m] = __scalar_as_int(d_postype[central_idx[m]].w);
body_orientation[m] = d_orientation[central_idx[m]];
}
else
{
mol_idx[m] = NO_BODY;
}
}
__syncthreads();
if (mol_idx[m] != NO_BODY && central_idx[m] < N)
{
// compute the number of windows that we need to loop over
unsigned int mol_len = d_molecule_len[mol_idx[m]];
unsigned int n_windows = mol_len / window_size + 1;
// slide the window throughout the block
for (unsigned int start = 0; start < n_windows; start++)
{
// determine the index with this body that this particle should handle
unsigned int k = start * window_size + (threadIdx.x & thread_mask);
// if that index is in the body we are actually handling a real body
if (k < mol_len)
{
// determine the particle idx of the particle
unsigned int pidx = d_molecule_list[molecule_indexer(mol_idx[m],k)];
// if this particle is not the central particle
if (pidx != central_idx[m])
{
// calculate body force and torques
vec3<Scalar> particle_pos(d_body_pos[body_indexer(body_type[m], k-1)]);
Scalar4 fi = d_net_force[pidx];
vec3<Scalar> ri = rotate(quat<Scalar>(body_orientation[m]), particle_pos);
// sum up virial
Scalar virialxx = d_net_virial[0*net_virial_pitch+pidx];
Scalar virialxy = d_net_virial[1*net_virial_pitch+pidx];
Scalar virialxz = d_net_virial[2*net_virial_pitch+pidx];
Scalar virialyy = d_net_virial[3*net_virial_pitch+pidx];
Scalar virialyz = d_net_virial[4*net_virial_pitch+pidx];
Scalar virialzz = d_net_virial[5*net_virial_pitch+pidx];
// subtract intra-body virial prt
sum_virial_xx += virialxx - fi.x*ri.x;
sum_virial_xy += virialxy - fi.x*ri.y;
sum_virial_xz += virialxz - fi.x*ri.z;
sum_virial_yy += virialyy - fi.y*ri.y;
sum_virial_yz += virialyz - fi.y*ri.z;
sum_virial_zz += virialzz - fi.z*ri.z;
// zero force and virial on constituent particles
d_net_force[pidx] = make_scalar4(0.0,0.0,0.0,0.0);
d_net_virial[0*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[1*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[2*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[3*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[4*net_virial_pitch+pidx] = Scalar(0.0);
d_net_virial[5*net_virial_pitch+pidx] = Scalar(0.0);
}
}
}
}
__syncthreads();
// put the partial sums into shared memory
body_virial_xx[threadIdx.x] = sum_virial_xx;
body_virial_xy[threadIdx.x] = sum_virial_xy;
body_virial_xz[threadIdx.x] = sum_virial_xz;
body_virial_yy[threadIdx.x] = sum_virial_yy;
body_virial_yz[threadIdx.x] = sum_virial_yz;
body_virial_zz[threadIdx.x] = sum_virial_zz;
__syncthreads();
// perform a set of partial reductions. Each block_size/n_bodies_per_block threads performs a sum reduction
// just within its own group
unsigned int offset = window_size >> 1;
while (offset > 0)
{
if ((threadIdx.x & thread_mask) < offset)
{
body_virial_xx[threadIdx.x] += body_virial_xx[threadIdx.x + offset];
body_virial_xy[threadIdx.x] += body_virial_xy[threadIdx.x + offset];
body_virial_xz[threadIdx.x] += body_virial_xz[threadIdx.x + offset];
body_virial_yy[threadIdx.x] += body_virial_yy[threadIdx.x + offset];
body_virial_yz[threadIdx.x] += body_virial_yz[threadIdx.x + offset];
body_virial_zz[threadIdx.x] += body_virial_zz[threadIdx.x + offset];
}
offset >>= 1;
__syncthreads();
}
// thread 0 within this body writes out the total virial for the body
if ((threadIdx.x & thread_mask) == 0 && mol_idx[m] != NO_BODY)
{
d_virial[0*virial_pitch+central_idx[m]] = body_virial_xx[threadIdx.x];
d_virial[1*virial_pitch+central_idx[m]] = body_virial_xy[threadIdx.x];
d_virial[2*virial_pitch+central_idx[m]] = body_virial_xz[threadIdx.x];
d_virial[3*virial_pitch+central_idx[m]] = body_virial_yy[threadIdx.x];
d_virial[4*virial_pitch+central_idx[m]] = body_virial_yz[threadIdx.x];
d_virial[5*virial_pitch+central_idx[m]] = body_virial_zz[threadIdx.x];
}
}
/*!
*/
cudaError_t gpu_rigid_force(Scalar4* d_force,
Scalar4* d_torque,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_list,
Index2D molecule_indexer,
const Scalar4 *d_postype,
const Scalar4* d_orientation,
Index2D body_indexer,
Scalar3* d_body_pos,
Scalar4* d_body_orientation,
const unsigned int *d_body_len,
const unsigned int *d_body,
unsigned int *d_flag,
Scalar4* d_net_force,
Scalar4* d_net_torque,
unsigned int n_mol,
unsigned int N,
unsigned int n_bodies_per_block,
unsigned int block_size,
const cudaDeviceProp& dev_prop,
bool zero_force)
{
// reset force and torque
cudaMemset(d_force, 0, sizeof(Scalar4)*N);
cudaMemset(d_torque, 0, sizeof(Scalar4)*N);
dim3 force_grid(n_mol / n_bodies_per_block + 1, 1, 1);
static unsigned int max_block_size = UINT_MAX;
static cudaFuncAttributes attr;
if (max_block_size == UINT_MAX)
{
cudaFuncGetAttributes(&attr, (const void *) gpu_rigid_force_sliding_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = max_block_size < block_size ? max_block_size : block_size;
// round down to nearest power of two
unsigned int b = 1;
while (b * 2 <= run_block_size) { b *= 2; }
run_block_size = b;
unsigned int window_size = run_block_size / n_bodies_per_block;
unsigned int thread_mask = window_size - 1;
unsigned int shared_bytes = run_block_size * (sizeof(Scalar4) + sizeof(Scalar3));
while (shared_bytes + attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock)
{
// block size is power of two
run_block_size /= 2;
shared_bytes = run_block_size * (sizeof(Scalar4) + sizeof(Scalar3));
window_size = run_block_size / n_bodies_per_block;
thread_mask = window_size - 1;
}
gpu_rigid_force_sliding_kernel<<< force_grid, run_block_size, shared_bytes >>>(
d_force,
d_torque,
d_molecule_len,
d_molecule_list,
molecule_indexer,
d_postype,
d_orientation,
body_indexer,
d_body_pos,
d_body_orientation,
d_body_len,
d_body,
d_flag,
d_net_force,
d_net_torque,
n_mol,
N,
window_size,
thread_mask,
n_bodies_per_block,
zero_force);
return cudaSuccess;
}
cudaError_t gpu_rigid_virial(Scalar* d_virial,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_list,
Index2D molecule_indexer,
const Scalar4 *d_postype,
const Scalar4* d_orientation,
Index2D body_indexer,
Scalar3* d_body_pos,
Scalar4* d_body_orientation,
Scalar4* d_net_force,
Scalar* d_net_virial,
unsigned int n_mol,
unsigned int N,
unsigned int n_bodies_per_block,
unsigned int net_virial_pitch,
unsigned int virial_pitch,
unsigned int block_size,
const cudaDeviceProp& dev_prop)
{
// reset force and torque
cudaMemset(d_virial,0, sizeof(Scalar)*virial_pitch*6);
dim3 force_grid(n_mol / n_bodies_per_block + 1, 1, 1);
static unsigned int max_block_size = UINT_MAX;
static cudaFuncAttributes attr;
if (max_block_size == UINT_MAX)
{
cudaFuncGetAttributes(&attr, (const void *) gpu_rigid_virial_sliding_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = max_block_size < block_size ? max_block_size : block_size;
// round down to nearest power of two
unsigned int b = 1;
while (b * 2 <= run_block_size) { b *= 2; }
run_block_size = b;
unsigned int window_size = run_block_size / n_bodies_per_block;
unsigned int thread_mask = window_size - 1;
unsigned int shared_bytes = 6 * run_block_size * sizeof(Scalar);
while (shared_bytes + attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock)
{
// block size is power of two
run_block_size /= 2;
shared_bytes = 6 * run_block_size * sizeof(Scalar);
window_size = run_block_size / n_bodies_per_block;
thread_mask = window_size - 1;
}
gpu_rigid_virial_sliding_kernel<<< force_grid, run_block_size, shared_bytes >>>(
d_virial,
d_molecule_len,
d_molecule_list,
molecule_indexer,
d_postype,
d_orientation,
body_indexer,
d_body_pos,
d_body_orientation,
d_net_force,
d_net_virial,
n_mol,
N,
net_virial_pitch,
virial_pitch,
window_size,
thread_mask,
n_bodies_per_block);
return cudaSuccess;
}
__global__ void gpu_update_composite_kernel(unsigned int N,
unsigned int n_ghost,
const unsigned int *d_body,
const unsigned int *d_rtag,
Scalar4 *d_postype,
Scalar4 *d_orientation,
Index2D body_indexer,
const Scalar3 *d_body_pos,
const Scalar4 *d_body_orientation,
const unsigned int *d_body_len,
const unsigned int *d_molecule_order,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_idx,
int3 *d_image,
const BoxDim box,
unsigned int *d_flag)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N+n_ghost) return;
unsigned int central_tag = d_body[idx];
if (central_tag == NO_BODY) return;
unsigned int central_idx = d_rtag[central_tag];
if (central_idx == NOT_LOCAL && idx >= N) return;
// do not overwrite central ptl
if (idx == central_idx) return;
Scalar4 postype = d_postype[central_idx];
vec3<Scalar> pos(postype);
quat<Scalar> orientation(d_orientation[central_idx]);
unsigned int body_type = __scalar_as_int(postype.w);
unsigned int body_len = d_body_len[body_type];
unsigned int mol_idx = d_molecule_idx[idx];
if (body_len != d_molecule_len[mol_idx]-1)
{
// if a molecule with a local member is incomplete, this is an error
if (idx < N)
{
atomicMax(d_flag, central_tag+1);
}
// otherwise, ignore
return;
}
int3 img = d_image[central_idx];
unsigned int idx_in_body = d_molecule_order[idx] - 1;
vec3<Scalar> local_pos(d_body_pos[body_indexer(body_type, idx_in_body)]);
vec3<Scalar> dr_space = rotate(orientation, local_pos);
vec3<Scalar> updated_pos(pos);
updated_pos += dr_space;
quat<Scalar> local_orientation(d_body_orientation[body_indexer(body_type, idx_in_body)]);
quat<Scalar> updated_orientation = orientation*local_orientation;
int3 imgi = img;
box.wrap(updated_pos, imgi);
unsigned int type = __scalar_as_int(d_postype[idx].w);
d_postype[idx] = make_scalar4(updated_pos.x, updated_pos.y, updated_pos.z, __int_as_scalar(type));
d_image[idx] = imgi;
}
void gpu_update_composite(unsigned int N,
unsigned int n_ghost,
const unsigned int *d_body,
const unsigned int *d_rtag,
Scalar4 *d_postype,
Scalar4 *d_orientation,
Index2D body_indexer,
const Scalar3 *d_body_pos,
const Scalar4 *d_body_orientation,
const unsigned int *d_body_len,
const unsigned int *d_molecule_order,
const unsigned int *d_molecule_len,
const unsigned int *d_molecule_idx,
int3 *d_image,
const BoxDim box,
unsigned int block_size,
unsigned int *d_flag)
{
unsigned int run_block_size = block_size;
static unsigned int max_block_size = UINT_MAX;
static cudaFuncAttributes attr;
if (max_block_size == UINT_MAX)
{
cudaFuncGetAttributes(&attr, (const void *) gpu_update_composite_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
if (max_block_size <= run_block_size)
{
run_block_size = max_block_size;
}
unsigned int n_blocks = (N+n_ghost)/run_block_size + 1;
gpu_update_composite_kernel<<<n_blocks,run_block_size>>>(N,
n_ghost,
d_body,
d_rtag,
d_postype,
d_orientation,
body_indexer,
d_body_pos,
d_body_orientation,
d_body_len,
d_molecule_order,
d_molecule_len,
d_molecule_idx,
d_image,
box,
d_flag);
}
|
8d5198ec252abbabcd161bff0ef7982dcdd0d746.hip | // !!! This is a file automatically generated by hipify!!!
#include <time.h>
#include <stdio.h>
#include <hip/hip_runtime_api.h>
__device__ int is_a_match(char *attempt) {
char password1[] = "AB1111";
char password2[] = "AB9999";
char password3[] = "AB6666";
char password4[] = "AB1966";
char *j = attempt;
char *e = attempt;
char *n = attempt;
char *i = attempt;
char *pass1 = password1;
char *pass2 = password2;
char *pass3 = password3;
char *pass4 = password4;
while(*j == *pass1) {
if(*j == '\0')
{
printf("Found password: %s\n",password1);
break;
}
j++;
pass1++;
}
while(*e == *pass2) {
if(*e == '\0')
{
printf("Found password: %s\n",password2);
break;
}
e++;
pass2++;
}
while(*n == *pass3) {
if(*n == '\0')
{
printf("Found password: %s\n",password3);
break;
}
n++;
pass3++;
}
while(*i == *pass4) {
if(*i == '\0')
{
printf("Found password: %s\n",password4);
return 1;
}
i++;
pass4++;
}
return 0;
}
__global__ void kernel() {
char a,b,c,d;
char password[7];
password[6] = '\0';
int x = blockIdx.x+65;
int y = threadIdx.x+65;
char firstValue = x;
char secondValue = y;
password[0] = firstValue;
password[1] = secondValue;
for(a='0'; a<='9'; a++){
for(b='0'; b<='9'; b++){
for(c='0';c<='9';c++){
for(d='0';d<='9';d++){
password[2] = a;
password[3] = b;
password[4]= c;
password[5]=d;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL((
kernel) , dim3(26),dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
| 8d5198ec252abbabcd161bff0ef7982dcdd0d746.cu | #include <time.h>
#include <stdio.h>
#include <cuda_runtime_api.h>
__device__ int is_a_match(char *attempt) {
char password1[] = "AB1111";
char password2[] = "AB9999";
char password3[] = "AB6666";
char password4[] = "AB1966";
char *j = attempt;
char *e = attempt;
char *n = attempt;
char *i = attempt;
char *pass1 = password1;
char *pass2 = password2;
char *pass3 = password3;
char *pass4 = password4;
while(*j == *pass1) {
if(*j == '\0')
{
printf("Found password: %s\n",password1);
break;
}
j++;
pass1++;
}
while(*e == *pass2) {
if(*e == '\0')
{
printf("Found password: %s\n",password2);
break;
}
e++;
pass2++;
}
while(*n == *pass3) {
if(*n == '\0')
{
printf("Found password: %s\n",password3);
break;
}
n++;
pass3++;
}
while(*i == *pass4) {
if(*i == '\0')
{
printf("Found password: %s\n",password4);
return 1;
}
i++;
pass4++;
}
return 0;
}
__global__ void kernel() {
char a,b,c,d;
char password[7];
password[6] = '\0';
int x = blockIdx.x+65;
int y = threadIdx.x+65;
char firstValue = x;
char secondValue = y;
password[0] = firstValue;
password[1] = secondValue;
for(a='0'; a<='9'; a++){
for(b='0'; b<='9'; b++){
for(c='0';c<='9';c++){
for(d='0';d<='9';d++){
password[2] = a;
password[3] = b;
password[4]= c;
password[5]=d;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
d421d7297595b2ce0d18d175f711320088fd7ef6.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------------------------------------------------------------------
/// @author Toby Gilbert
// ----------------------------------------------------------------------------------------------------------------------------------------
#include "Ocean.h"
#include <helper_cuda.h>
#include <hipfft.h>
#include <glm/glm.hpp>
#include <complex>
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <surface_functions.h>
#include <helper_math.h>
/// @brief Given a time you can create a field of frequency amplitudes
/// @param d_h0Pointer An OpenGL buffer which stores a set of amplitudes and phases at time zero
/// @param d_htPointer An OpenGL buffer for outputting the frequency amplitude field
/// @param _time The current simulation time
/// @param _res The simulation resolution
// ----------------------------------------------------------------------------------------------------------------------------------------
__global__ void frequencyDomain(float2* d_h0Pointer, float2* d_htPointer, float _time, int _res){
// A constant for the accelleration due to gravity
const float g = 9.81;
// A 2D vector to represent a position on the grid with constraits -(_res/2) <= k < (_res/2)
float2 k;
k.x = float((threadIdx.x - (_res * floor(double(threadIdx.x / _res)))) - (_res/2));
k.y = float(((blockIdx.x * (blockDim.x/_res)) + ceil(double(threadIdx.x / _res))) - (_res/2));
float kLen = sqrt(double(k.x*k.x + k.y*k.y));
// Calculate the wave frequency
float w = sqrt(double(g * kLen));
// complexExp holds the complex exponential where the x value stores the real part and the y value stores the imaginary part
float2 complexExp;
complexExp.x = sin(w * _time);
complexExp.y = cos(w * _time);
float2 complexExpConjugate;
complexExpConjugate.x = complexExp.x;
complexExpConjugate.y = -complexExp.y;
int blockNum =(( _res * _res )/ blockDim.x) - 1;
float2 h0 = d_h0Pointer[(blockIdx.x * blockDim.x) + threadIdx.x];
float2 h0conjugate = d_h0Pointer[((blockNum - blockIdx.x) * blockDim.x) + ((blockDim.x - 1) - threadIdx.x)];
// Swap the imaginary parts sign
h0conjugate.y = -h0conjugate.y;
// Equation 26 of Tessendorf's paper h(k,t) = h0(k)exp{iw(k)t} + ~h0(-k)exp{-iw(k)t}
float2 h;
h.x = (h0.x * complexExp.x - h0.y * complexExp.y);
h.y = (h0.x * complexExp.x + h0.y * complexExp.y);
float2 hStar;
hStar.x = (h0conjugate.x * complexExpConjugate.x - h0conjugate.y * complexExpConjugate.y) ;
hStar.y = (h0conjugate.x * complexExpConjugate.x - h0conjugate.y * complexExpConjugate.y) ;
// Output h(k,t) term to d_htPointer buffer which represents a set of points in the frequency domain
float2 hTilde;
hTilde.x= h.x + hStar.x;
hTilde.y = h.y + hStar.y;
d_htPointer[(blockIdx.x * blockDim.x) + threadIdx.x].x = hTilde.x;
d_htPointer[(blockIdx.x * blockDim.x) + threadIdx.x].y = hTilde.y;
}
// ----------------------------------------------------------------------------------------------------------------------------------------
/// @brief Once inverse FFT has been performed points in the frequency domain are converted to the spatial domain
/// and can be used to update the heights
/// @param d_position An OpenGL buffer for storing the current positions of the vertices in the grid
/// @param d_height An OpenGL buffer which holds the new heights of grid positions
/// @param d_normal An OpenGL buffer which holds the normals
/// @param d_xDisplacement An OpenGL buffer for storing the displacment in the x axis
/// @param _res The resolution of the grid
/// @param _scale Scales the amplitude of the waves
// ----------------------------------------------------------------------------------------------------------------------------------------
__global__ void height(float3* d_position, float2* d_height, float2* d_chopX, float2* d_chopZ, float _choppiness, int _res, float _scale){
// A vertex on the grid
int u = int(threadIdx.x - (_res * floor(double(threadIdx.x / _res))));
int v = int((blockIdx.x * (blockDim.x/(float)_res)) + ceil(double(threadIdx.x / _res)));
// Sign correction - Unsure why this is needed
float sign = 1.0;
if ((u+v) % 2 != 0){
sign = -1.0;
}
// Update the heights of the vertices
float prevX = d_position[(blockIdx.x * blockDim.x) + threadIdx.x].x;
float prevZ = d_position[(blockIdx.x * blockDim.x) + threadIdx.x].z;
float xDisp = _choppiness * (d_chopX[(blockIdx.x * blockDim.x) + threadIdx.x].x /_scale) * sign;
float zDisp = _choppiness * (d_chopZ[(blockIdx.x * blockDim.x) + threadIdx.x].x /_scale) * sign;
float height = ((d_height[(blockIdx.x * blockDim.x) + threadIdx.x].x / _scale) * sign ) / 255.0f;
float newX = prevX +xDisp;
float newZ = prevZ + zDisp;
d_position[(blockIdx.x * blockDim.x) + threadIdx.x].x = newX;
d_position[(blockIdx.x * blockDim.x) + threadIdx.x].y =height;
d_position[(blockIdx.x * blockDim.x) + threadIdx.x].z = newZ;
}
__global__ void calculateNormals(float3* d_position, float3* d_normals, int _res){
float3 norm = make_float3(0.0, 0.0, 0.0);
float3 posL, posR, posD, posU;
/// @todo remove branching conditions
if (((blockIdx.x * blockDim.x) + threadIdx.x) >= 1){
posL = (d_position[((blockIdx.x * blockDim.x) + threadIdx.x) - 1]);
}
else{
posL = (d_position[_res]); // A position on a neighbouring tile
}
if (((blockIdx.x * blockDim.x) + threadIdx.x) <=(_res*_res)-2){
posR = (d_position[((blockIdx.x * blockDim.x) + threadIdx.x) + 1]);
}
else{
posR = d_position[_res*_res - _res]; // A position on a neighbouring tile
}
if (((blockIdx.x * blockDim.x) + threadIdx.x) >= _res){
posU = (d_position[((blockIdx.x * blockDim.x) + threadIdx.x) - _res]);
}
else{
posU = d_position[_res*_res-_res + threadIdx.x];
}
if (((blockIdx.x * blockDim.x) + threadIdx.x) <= (_res*_res)-_res-1){
posD = (d_position[((blockIdx.x * blockDim.x) + threadIdx.x) + _res]);
}
else{
posD = d_position[threadIdx.x];
}
float3 leftVec, rightVec, topVec, bottomVec;
float3 centerVec = d_position[((blockIdx.x * blockDim.x) + threadIdx.x)];
leftVec = posL - centerVec;
leftVec.y *= 100.0;
rightVec = posR - centerVec;
rightVec.y *= 100.0;
topVec = posU - centerVec;
topVec.y *= 100.0;
bottomVec = posD - centerVec;
bottomVec.y *= 100.0;
float3 tmpNorm1 = normalize(cross(leftVec, topVec));
float3 tmpNorm2 = normalize(cross(topVec, rightVec));
float3 tmpNorm3 = normalize(cross(rightVec, bottomVec));
float3 tmpNorm4 = normalize(cross(bottomVec, leftVec));
tmpNorm1.y = fabs(tmpNorm1.y);
tmpNorm2.y = fabs(tmpNorm2.y);
tmpNorm3.y = fabs(tmpNorm3.y);
tmpNorm4.y = fabs(tmpNorm4.y);
norm = normalize((tmpNorm1 + tmpNorm2 + tmpNorm3 + tmpNorm4));
// Update the normals buffer
d_normals[(blockIdx.x * blockDim.x) + threadIdx.x] = norm;
}
// ----------------------------------------------------------------------------------------------------------------------------------------
/// @brief Create x displacement in in the frequency domain
/// @param
/// @param d_xDisplacement An OpenGL buffer to store the x displacement in the frequency domain
/// @param d_zDisplacement An OpenGL buffer to store the z displacement in the frequency domain
/// @param _res The resolution of the grid
// ----------------------------------------------------------------------------------------------------------------------------------------
__global__ void choppiness(float2* d_Ht, float2* d_chopX, float2* d_chopZ, float2 _windSpeed){
// k - A position on the grid
float2 k;
k.x = _windSpeed.x;
k.y = _windSpeed.y;
float kLen = sqrt(double(k.x*k.x + k.y*k.y));
float Kx = k.x / kLen;
float Kz = k.y / kLen;
if (kLen == 0.0){
Kx = 0.0;
Kz = 0.0;
}
d_chopX[(blockIdx.x * blockDim.x) + threadIdx.x].x = 0.0;
d_chopX[(blockIdx.x * blockDim.x) + threadIdx.x].y = d_Ht[(blockIdx.x * blockDim.x) + threadIdx.x].y * -Kx;
d_chopZ[(blockIdx.x * blockDim.x) + threadIdx.x].x = 0.0;
d_chopZ[(blockIdx.x * blockDim.x) + threadIdx.x].y = d_Ht[(blockIdx.x * blockDim.x) + threadIdx.x].y * -Kz;
}
// ----------------------------------------------------------------------------------------------------------------------------------------
void updateFrequencyDomain(float2 *d_h0, float2 *d_ht, float _time, int _res){
int numBlocks =( _res * _res )/ 1024;
hipLaunchKernelGGL(( frequencyDomain), dim3(numBlocks), dim3(1024), 0, 0, d_h0, d_ht, _time, _res);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
void updateHeight(float3* d_position, float3* d_norms, float2* d_height, float2* d_chopX, float2* d_chopZ, float _choppiness, int _res, float _scale){
int numBlocks =( _res * _res )/ 1024;
hipLaunchKernelGGL(( height), dim3(numBlocks), dim3(1024), 0, 0, d_position, d_height, d_chopX, d_chopZ, _choppiness, _res, _scale);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calculateNormals), dim3(numBlocks), dim3(1024), 0, 0, d_position, d_norms, _res);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
void addChoppiness(float2* d_Heights, float2* d_chopX, float2* d_chopZ, int _res, float2 _windDirection){
int numBlocks =( _res * _res )/ 1024;
hipLaunchKernelGGL(( choppiness), dim3(numBlocks), dim3(1024), 0, 0, d_Heights, d_chopX, d_chopZ, _windDirection);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
| d421d7297595b2ce0d18d175f711320088fd7ef6.cu | // ----------------------------------------------------------------------------------------------------------------------------------------
/// @author Toby Gilbert
// ----------------------------------------------------------------------------------------------------------------------------------------
#include "Ocean.h"
#include <helper_cuda.h>
#include <cufft.h>
#include <glm/glm.hpp>
#include <complex>
#include <curand.h>
#include <helper_functions.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <surface_functions.h>
#include <helper_math.h>
/// @brief Given a time you can create a field of frequency amplitudes
/// @param d_h0Pointer An OpenGL buffer which stores a set of amplitudes and phases at time zero
/// @param d_htPointer An OpenGL buffer for outputting the frequency amplitude field
/// @param _time The current simulation time
/// @param _res The simulation resolution
// ----------------------------------------------------------------------------------------------------------------------------------------
__global__ void frequencyDomain(float2* d_h0Pointer, float2* d_htPointer, float _time, int _res){
// A constant for the accelleration due to gravity
const float g = 9.81;
// A 2D vector to represent a position on the grid with constraits -(_res/2) <= k < (_res/2)
float2 k;
k.x = float((threadIdx.x - (_res * floor(double(threadIdx.x / _res)))) - (_res/2));
k.y = float(((blockIdx.x * (blockDim.x/_res)) + ceil(double(threadIdx.x / _res))) - (_res/2));
float kLen = sqrt(double(k.x*k.x + k.y*k.y));
// Calculate the wave frequency
float w = sqrt(double(g * kLen));
// complexExp holds the complex exponential where the x value stores the real part and the y value stores the imaginary part
float2 complexExp;
complexExp.x = sin(w * _time);
complexExp.y = cos(w * _time);
float2 complexExpConjugate;
complexExpConjugate.x = complexExp.x;
complexExpConjugate.y = -complexExp.y;
int blockNum =(( _res * _res )/ blockDim.x) - 1;
float2 h0 = d_h0Pointer[(blockIdx.x * blockDim.x) + threadIdx.x];
float2 h0conjugate = d_h0Pointer[((blockNum - blockIdx.x) * blockDim.x) + ((blockDim.x - 1) - threadIdx.x)];
// Swap the imaginary parts sign
h0conjugate.y = -h0conjugate.y;
// Equation 26 of Tessendorf's paper h(k,t) = h0(k)exp{iw(k)t} + ~h0(-k)exp{-iw(k)t}
float2 h;
h.x = (h0.x * complexExp.x - h0.y * complexExp.y);
h.y = (h0.x * complexExp.x + h0.y * complexExp.y);
float2 hStar;
hStar.x = (h0conjugate.x * complexExpConjugate.x - h0conjugate.y * complexExpConjugate.y) ;
hStar.y = (h0conjugate.x * complexExpConjugate.x - h0conjugate.y * complexExpConjugate.y) ;
// Output h(k,t) term to d_htPointer buffer which represents a set of points in the frequency domain
float2 hTilde;
hTilde.x= h.x + hStar.x;
hTilde.y = h.y + hStar.y;
d_htPointer[(blockIdx.x * blockDim.x) + threadIdx.x].x = hTilde.x;
d_htPointer[(blockIdx.x * blockDim.x) + threadIdx.x].y = hTilde.y;
}
// ----------------------------------------------------------------------------------------------------------------------------------------
/// @brief Once inverse FFT has been performed points in the frequency domain are converted to the spatial domain
/// and can be used to update the heights
/// @param d_position An OpenGL buffer for storing the current positions of the vertices in the grid
/// @param d_height An OpenGL buffer which holds the new heights of grid positions
/// @param d_normal An OpenGL buffer which holds the normals
/// @param d_xDisplacement An OpenGL buffer for storing the displacment in the x axis
/// @param _res The resolution of the grid
/// @param _scale Scales the amplitude of the waves
// ----------------------------------------------------------------------------------------------------------------------------------------
__global__ void height(float3* d_position, float2* d_height, float2* d_chopX, float2* d_chopZ, float _choppiness, int _res, float _scale){
// A vertex on the grid
int u = int(threadIdx.x - (_res * floor(double(threadIdx.x / _res))));
int v = int((blockIdx.x * (blockDim.x/(float)_res)) + ceil(double(threadIdx.x / _res)));
// Sign correction - Unsure why this is needed
float sign = 1.0;
if ((u+v) % 2 != 0){
sign = -1.0;
}
// Update the heights of the vertices
float prevX = d_position[(blockIdx.x * blockDim.x) + threadIdx.x].x;
float prevZ = d_position[(blockIdx.x * blockDim.x) + threadIdx.x].z;
float xDisp = _choppiness * (d_chopX[(blockIdx.x * blockDim.x) + threadIdx.x].x /_scale) * sign;
float zDisp = _choppiness * (d_chopZ[(blockIdx.x * blockDim.x) + threadIdx.x].x /_scale) * sign;
float height = ((d_height[(blockIdx.x * blockDim.x) + threadIdx.x].x / _scale) * sign ) / 255.0f;
float newX = prevX +xDisp;
float newZ = prevZ + zDisp;
d_position[(blockIdx.x * blockDim.x) + threadIdx.x].x = newX;
d_position[(blockIdx.x * blockDim.x) + threadIdx.x].y =height;
d_position[(blockIdx.x * blockDim.x) + threadIdx.x].z = newZ;
}
__global__ void calculateNormals(float3* d_position, float3* d_normals, int _res){
float3 norm = make_float3(0.0, 0.0, 0.0);
float3 posL, posR, posD, posU;
/// @todo remove branching conditions
if (((blockIdx.x * blockDim.x) + threadIdx.x) >= 1){
posL = (d_position[((blockIdx.x * blockDim.x) + threadIdx.x) - 1]);
}
else{
posL = (d_position[_res]); // A position on a neighbouring tile
}
if (((blockIdx.x * blockDim.x) + threadIdx.x) <=(_res*_res)-2){
posR = (d_position[((blockIdx.x * blockDim.x) + threadIdx.x) + 1]);
}
else{
posR = d_position[_res*_res - _res]; // A position on a neighbouring tile
}
if (((blockIdx.x * blockDim.x) + threadIdx.x) >= _res){
posU = (d_position[((blockIdx.x * blockDim.x) + threadIdx.x) - _res]);
}
else{
posU = d_position[_res*_res-_res + threadIdx.x];
}
if (((blockIdx.x * blockDim.x) + threadIdx.x) <= (_res*_res)-_res-1){
posD = (d_position[((blockIdx.x * blockDim.x) + threadIdx.x) + _res]);
}
else{
posD = d_position[threadIdx.x];
}
float3 leftVec, rightVec, topVec, bottomVec;
float3 centerVec = d_position[((blockIdx.x * blockDim.x) + threadIdx.x)];
leftVec = posL - centerVec;
leftVec.y *= 100.0;
rightVec = posR - centerVec;
rightVec.y *= 100.0;
topVec = posU - centerVec;
topVec.y *= 100.0;
bottomVec = posD - centerVec;
bottomVec.y *= 100.0;
float3 tmpNorm1 = normalize(cross(leftVec, topVec));
float3 tmpNorm2 = normalize(cross(topVec, rightVec));
float3 tmpNorm3 = normalize(cross(rightVec, bottomVec));
float3 tmpNorm4 = normalize(cross(bottomVec, leftVec));
tmpNorm1.y = fabs(tmpNorm1.y);
tmpNorm2.y = fabs(tmpNorm2.y);
tmpNorm3.y = fabs(tmpNorm3.y);
tmpNorm4.y = fabs(tmpNorm4.y);
norm = normalize((tmpNorm1 + tmpNorm2 + tmpNorm3 + tmpNorm4));
// Update the normals buffer
d_normals[(blockIdx.x * blockDim.x) + threadIdx.x] = norm;
}
// ----------------------------------------------------------------------------------------------------------------------------------------
/// @brief Create x displacement in in the frequency domain
/// @param
/// @param d_xDisplacement An OpenGL buffer to store the x displacement in the frequency domain
/// @param d_zDisplacement An OpenGL buffer to store the z displacement in the frequency domain
/// @param _res The resolution of the grid
// ----------------------------------------------------------------------------------------------------------------------------------------
__global__ void choppiness(float2* d_Ht, float2* d_chopX, float2* d_chopZ, float2 _windSpeed){
// k - A position on the grid
float2 k;
k.x = _windSpeed.x;
k.y = _windSpeed.y;
float kLen = sqrt(double(k.x*k.x + k.y*k.y));
float Kx = k.x / kLen;
float Kz = k.y / kLen;
if (kLen == 0.0){
Kx = 0.0;
Kz = 0.0;
}
d_chopX[(blockIdx.x * blockDim.x) + threadIdx.x].x = 0.0;
d_chopX[(blockIdx.x * blockDim.x) + threadIdx.x].y = d_Ht[(blockIdx.x * blockDim.x) + threadIdx.x].y * -Kx;
d_chopZ[(blockIdx.x * blockDim.x) + threadIdx.x].x = 0.0;
d_chopZ[(blockIdx.x * blockDim.x) + threadIdx.x].y = d_Ht[(blockIdx.x * blockDim.x) + threadIdx.x].y * -Kz;
}
// ----------------------------------------------------------------------------------------------------------------------------------------
void updateFrequencyDomain(float2 *d_h0, float2 *d_ht, float _time, int _res){
int numBlocks =( _res * _res )/ 1024;
frequencyDomain<<<numBlocks, 1024>>>(d_h0, d_ht, _time, _res);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
void updateHeight(float3* d_position, float3* d_norms, float2* d_height, float2* d_chopX, float2* d_chopZ, float _choppiness, int _res, float _scale){
int numBlocks =( _res * _res )/ 1024;
height<<<numBlocks, 1024>>>(d_position, d_height, d_chopX, d_chopZ, _choppiness, _res, _scale);
cudaThreadSynchronize();
calculateNormals<<<numBlocks, 1024>>>(d_position, d_norms, _res);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
void addChoppiness(float2* d_Heights, float2* d_chopX, float2* d_chopZ, int _res, float2 _windDirection){
int numBlocks =( _res * _res )/ 1024;
choppiness<<<numBlocks, 1024>>>(d_Heights, d_chopX, d_chopZ, _windDirection);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
|
581aaca799d36b8827e39086c370be539a7ba072.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************
* Matrix * Matrix Multiplication
*
* The GPU is with an input of n = 512 faster than the GPU
************************************************************************************/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
void random_ints(float *a, int n);
void print_matrix(float *matrix, int n);
void matrix_mul_cpu(float *matA, float *matB, float *matC, int n);
void matrix_mul_gpu(float *matA, float *matB, float *matC, int n);
__global__ void matrix_mul_gpu_element(float *gpuA, float *gpuB, float *gpuC, int n);
#define N 1024
int main(int argc, char* argv[]) {
if (argc < 2) {
printf("Please provide the matrix size parameter n.\n");
return 0;
}
srand(time(NULL)); // get your seed!
// Get n, m from command line
int n = atoi(argv[1]);
// Fill matrix with random numbers
float *matA = (float *) malloc(n * n * sizeof(float));
random_ints(matA, n*n);
float *matB = (float *) malloc(n * n * sizeof(float));
random_ints(matB, n*n);
// Store results in matrix C
float *matC = (float *) malloc(n * n * sizeof(float));
// It's CPU time!
// printf("\n### CPU time! ###\n");
// clock_t cputime = clock();
// matrix_mul_cpu(matA, matB, matC, n);
// printf("Matrix C [0]: \t\t%f\n", matC[0]);
// printf("Matrix C [%d]: \t%f\n", n-1, matC[n-1]);
// printf("Time: \t\t\t%f s\n", ((double)clock() - cputime) / CLOCKS_PER_SEC);
//
// printf("\nResetting Matrix C... ");
// random_ints(matC, n*n);
// printf("Done.\n");
// GPU
printf("\n### Now the GPU... ###\n");
clock_t gputime = clock();
matrix_mul_gpu(matA, matB, matC, n);
printf("Matrix C [0]: \t\t%f\n", matC[0]);
printf("Matrix C [%d]: \t%f\n", n-1, matC[n-1]);
printf("Time: \t\t\t%f s\n", ((double)clock() - gputime) / CLOCKS_PER_SEC);
free(matA);
free(matB);
free(matC);
return 0;
}
// GPU Version
void matrix_mul_gpu(float *matA, float *matB, float *matC, int n) {
unsigned int threads = 32;
unsigned int blocks = (n + (threads - 1)) / threads;
dim3 BlocksPerGrid(blocks, blocks);
dim3 ThreadsPerBlock(threads, threads);
int size = n * n * sizeof(float);
float *gpuA, *gpuB, *gpuC;
// Allocate and load matrix A and B to the gpu
hipMalloc(&gpuA, size);
hipMemcpy(gpuA, matA, size, hipMemcpyHostToDevice);
hipMalloc(&gpuB, size);
hipMemcpy(gpuB, matB, size, hipMemcpyHostToDevice);
hipMalloc(&gpuC, size);
// Launch the device in one block with n Threads
// matrix_mul_gpu_element<<<dim3(1,1,1), dim3(n,1,1)>>>(gpuA, gpuB, gpuC, n);
hipLaunchKernelGGL(( matrix_mul_gpu_element), dim3(BlocksPerGrid), dim3(ThreadsPerBlock), 0, 0, gpuA, gpuB, gpuC, n);
// Get result from device
hipMemcpy(matC, gpuC, size, hipMemcpyDeviceToHost);
hipFree(gpuA);
hipFree(gpuB);
hipFree(gpuC);
}
__global__ void matrix_mul_gpu_element(float *gpuA, float *gpuB, float *gpuC, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float a, b, sum = 0;
for (int k = 0; k < n; ++k) {
a = gpuA[k + row*n];
b = gpuB[col + k*n];
sum += a * b;
}
gpuC[col + row*n] = sum;
}
// CPU Version
void matrix_mul_cpu(float *matA, float *matB, float *matC, int n) {
float a, b, sum;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
sum = 0;
for (int k = 0; k < n; ++k) {
a = matA[k + i*n];
b = matB[j + k*n];
sum += a * b;
}
matC[j + i*n] = sum;
}
}
}
//////////////////////////////////
/* Functions for initialization */
//////////////////////////////////
// CPU function to generate a vector of random integers
void random_ints(float* a, int n) {
for (int i = 0; i < n; i++)
a[i] = rand() % 10000; // random number between 0 and 9999
}
void print_matrix(float* matrix, int n) {
for (int i = 0; i < n*n; i++) {
if (i % n == 0) printf("\n");
printf("%f ", matrix[i]);
}
}
| 581aaca799d36b8827e39086c370be539a7ba072.cu | /************************************************************************************
* Matrix * Matrix Multiplication
*
* The GPU is with an input of n = 512 faster than the GPU
************************************************************************************/
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
void random_ints(float *a, int n);
void print_matrix(float *matrix, int n);
void matrix_mul_cpu(float *matA, float *matB, float *matC, int n);
void matrix_mul_gpu(float *matA, float *matB, float *matC, int n);
__global__ void matrix_mul_gpu_element(float *gpuA, float *gpuB, float *gpuC, int n);
#define N 1024
int main(int argc, char* argv[]) {
if (argc < 2) {
printf("Please provide the matrix size parameter n.\n");
return 0;
}
srand(time(NULL)); // get your seed!
// Get n, m from command line
int n = atoi(argv[1]);
// Fill matrix with random numbers
float *matA = (float *) malloc(n * n * sizeof(float));
random_ints(matA, n*n);
float *matB = (float *) malloc(n * n * sizeof(float));
random_ints(matB, n*n);
// Store results in matrix C
float *matC = (float *) malloc(n * n * sizeof(float));
// It's CPU time!
// printf("\n### CPU time! ###\n");
// clock_t cputime = clock();
// matrix_mul_cpu(matA, matB, matC, n);
// printf("Matrix C [0]: \t\t%f\n", matC[0]);
// printf("Matrix C [%d]: \t%f\n", n-1, matC[n-1]);
// printf("Time: \t\t\t%f s\n", ((double)clock() - cputime) / CLOCKS_PER_SEC);
//
// printf("\nResetting Matrix C... ");
// random_ints(matC, n*n);
// printf("Done.\n");
// GPU
printf("\n### Now the GPU... ###\n");
clock_t gputime = clock();
matrix_mul_gpu(matA, matB, matC, n);
printf("Matrix C [0]: \t\t%f\n", matC[0]);
printf("Matrix C [%d]: \t%f\n", n-1, matC[n-1]);
printf("Time: \t\t\t%f s\n", ((double)clock() - gputime) / CLOCKS_PER_SEC);
free(matA);
free(matB);
free(matC);
return 0;
}
// GPU Version
void matrix_mul_gpu(float *matA, float *matB, float *matC, int n) {
unsigned int threads = 32;
unsigned int blocks = (n + (threads - 1)) / threads;
dim3 BlocksPerGrid(blocks, blocks);
dim3 ThreadsPerBlock(threads, threads);
int size = n * n * sizeof(float);
float *gpuA, *gpuB, *gpuC;
// Allocate and load matrix A and B to the gpu
cudaMalloc(&gpuA, size);
cudaMemcpy(gpuA, matA, size, cudaMemcpyHostToDevice);
cudaMalloc(&gpuB, size);
cudaMemcpy(gpuB, matB, size, cudaMemcpyHostToDevice);
cudaMalloc(&gpuC, size);
// Launch the device in one block with n Threads
// matrix_mul_gpu_element<<<dim3(1,1,1), dim3(n,1,1)>>>(gpuA, gpuB, gpuC, n);
matrix_mul_gpu_element<<<BlocksPerGrid, ThreadsPerBlock>>>(gpuA, gpuB, gpuC, n);
// Get result from device
cudaMemcpy(matC, gpuC, size, cudaMemcpyDeviceToHost);
cudaFree(gpuA);
cudaFree(gpuB);
cudaFree(gpuC);
}
__global__ void matrix_mul_gpu_element(float *gpuA, float *gpuB, float *gpuC, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float a, b, sum = 0;
for (int k = 0; k < n; ++k) {
a = gpuA[k + row*n];
b = gpuB[col + k*n];
sum += a * b;
}
gpuC[col + row*n] = sum;
}
// CPU Version
void matrix_mul_cpu(float *matA, float *matB, float *matC, int n) {
float a, b, sum;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
sum = 0;
for (int k = 0; k < n; ++k) {
a = matA[k + i*n];
b = matB[j + k*n];
sum += a * b;
}
matC[j + i*n] = sum;
}
}
}
//////////////////////////////////
/* Functions for initialization */
//////////////////////////////////
// CPU function to generate a vector of random integers
void random_ints(float* a, int n) {
for (int i = 0; i < n; i++)
a[i] = rand() % 10000; // random number between 0 and 9999
}
void print_matrix(float* matrix, int n) {
for (int i = 0; i < n*n; i++) {
if (i % n == 0) printf("\n");
printf("%f ", matrix[i]);
}
}
|
96469d16a77566a0a37bf18a44793a1ada8503f1.hip | // !!! This is a file automatically generated by hipify!!!
// INCLUDES
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <errno.h> /* errno */
#include <string.h> /* strerror */
#include <math.h> // ceil
#include <time.h> // CLOCKS_PER_SEC
// CUDA
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// GIS
#include "/home/giuliano/git/cuda/weatherprog-cudac/includes/gis.h"
/**
* PARS
*/
#define BLOCK_DIM_small 64
#define BLOCK_DIM 256
static const unsigned int threads = 512;
bool print_intermediate_arrays = false;
const char *BASE_PATH = "/home/giuliano/git/cuda/reduction";
/*
* kernel labels
*/
const char *kern_0 = "filter_roi";
const char *kern_1 = "imperviousness_change_histc_sh_4" ;
const char *kern_2 = "imperviousness_change" ;
char buffer[255];
/*
* DEFINE I/O files
*/
// I/
//const char *FIL_ROI = "/home/giuliano/git/cuda/reduction/data/ROI.tif";
//const char *FIL_BIN1 = "/home/giuliano/git/cuda/reduction/data/BIN1.tif";
//const char *FIL_BIN2 = "/home/giuliano/git/cuda/reduction/data/BIN2.tif";
const char *FIL_ROI = "/media/DATI/db-backup/ssgci-data/testing/ssgci_roi.tif";
const char *FIL_BIN1 = "/media/DATI/db-backup/ssgci-data/testing/ssgci_bin.tif";
const char *FIL_BIN2 = "/media/DATI/db-backup/ssgci-data/testing/ssgci_bin2.tif";
// /O
const char *FIL_LTAKE_grid = "/home/giuliano/git/cuda/reduction/data/LTAKE_map.tif";
const char *FIL_LTAKE_count= "/home/giuliano/git/cuda/reduction/data/LTAKE_count.txt";
/* +++++DEFINEs+++++ */
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
/* +++++DEFINEs+++++ */
template <typename T>
void write_mat_T( const T *MAT, unsigned int nr, unsigned int nc, const char *filename )
{
unsigned int rr,cc;
FILE *fid ;
fid = fopen(filename,"w");
if (fid == NULL) { printf("Error opening file %s!\n",filename); exit(1); }
for(rr=0;rr<nr;rr++)
{
for(cc=0;cc<nc;cc++)
{
fprintf(fid, "%8d ",MAT[rr*nc+cc]);
}
fprintf(fid,"\n");
}
fclose(fid);
}
void write_mat_int( const int *MAT, unsigned int nr, unsigned int nc, const char *filename )
{
unsigned int rr,cc;
FILE *fid ;
fid = fopen(filename,"w");
if (fid == NULL) { printf("Error opening file %s!\n",filename); exit(1); }
for(rr=0;rr<nr;rr++)
{
for(cc=0;cc<nc;cc++)
{
fprintf(fid, "%8d ",MAT[rr*nc+cc]);
}
fprintf(fid,"\n");
}
fclose(fid);
}
__global__ void
filter_roi( unsigned char *BIN, const unsigned char *ROI, unsigned int map_len){
unsigned int tid = threadIdx.x;
unsigned int bix = blockIdx.x;
unsigned int bdx = blockDim.x;
unsigned int gdx = gridDim.x;
unsigned int i = bix*bdx + tid;
unsigned int gridSize = bdx*gdx;
while (i < map_len)
{
//BIN[i] *= ROI[i];
BIN[i] = (unsigned char) ((int)BIN[i] * (int)ROI[i]);
i += gridSize;
}
}
__global__ void imperviousness_change(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT, int *dev_LTAKE_map
)
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
unsigned long int tix = bdx*bix + x; // offset
if( tix < WIDTH*HEIGHT ){
dev_LTAKE_map[tix] = (int)((int)dev_BIN2[tix] - (int)dev_BIN1[tix]);
}
}
__global__ void imperviousness_change_double(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT, double *dev_LTAKE_map
)
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
unsigned long int tix = bdx*bix + x; // offset
if( tix < WIDTH*HEIGHT ){
dev_LTAKE_map[tix] = (double) ( (double)dev_BIN2[tix] - (double)dev_BIN1[tix] );
}
}
__global__ void imperviousness_change_char(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT, char *dev_LTAKE_map
)
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
unsigned long int tix = bdx*bix + x; // offset
if( tix < WIDTH*HEIGHT ){
dev_LTAKE_map[tix] = dev_BIN2[tix] - dev_BIN1[tix];
}
}
__global__ void imperviousness_change_large(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT, int *dev_LTAKE_map,
int mapel_per_thread
)
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
//unsigned long int gdx = gridDim.x;
unsigned long int tid = bdx*bix + x; // offset
unsigned long int tix = tid * mapel_per_thread; // offset
//extern __shared__ int sh_diff[];
if( bdx*bix*mapel_per_thread < WIDTH*HEIGHT ){
//sh_diff[tid] = 0; syncthreads();
for(long int ii=0;ii<mapel_per_thread;ii++){
if( tix+ii < WIDTH*HEIGHT ){
//sh_diff[tid] = (int)((int)dev_BIN2[tix+ii] - (int)dev_BIN1[tix+ii]);
dev_LTAKE_map[tix+ii] = (int)((int)dev_BIN2[tix+ii] - (int)dev_BIN1[tix+ii]);
} //__syncthreads();
//dev_LTAKE_map[tix+ii] = sh_diff[tid];
}
}
}
__global__ void imperviousness_change_histc_sh_4(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT,
int *dev_LTAKE_count, int mapel_per_thread
)
{
/*
INPUTS
dev_BIN1: Imperviousness of OLDER year.
dev_BIN1: Imperviousness of NEWER year.
OUTPUTS
dev_LTAKE_count: 2x2 table with counts about the 4 possible combinations.
dev_LTAKE_map: map storing the difference (dev_BIN2-dev_BIN1).
The following 4 combinations are possible computing the difference:
---------------------------
(N) (BIN2,BIN1) --> (LTAKE)
---------------------------
(1) (0,0) --> +0 ---> nothing changed in rural pixels
(2) (0,1) --> -1 ---> increase of rural pixels
(3) (1,0) --> +1 ---> increase of urban pixels
(4) (1,1) --> -0 ---> nothing changed in urban pixels
---------------------------
where values can be { 0:rural; 1:urban }.
DESCRIPTION
This kernel function counts the number of pixels for each LTAKE type (i.e. {+0,-1,+1,-0}).
It assumes that:
> the number of outcomes LTAKE can assume is equal to FOUR=4, as also stated in kernel name "..._4"
> each thread within a block is in charge of mapel_per_thread pixels in order to allocate
a number of blocks equal to the number of available SMs.
> the number of threads per block is equal to 256(=bdx).
I have to call this kernel using the following resources:
> block: (bdx*mapel_per_thread, 1, 1)
> sh_mem: bdx*4*sizeof(int)
*/
unsigned int x = threadIdx.x;
unsigned int bdx = blockDim.x;
unsigned int bix = blockIdx.x;
unsigned int tid = (bdx*bix + x); // global thread index
unsigned int tix = tid*mapel_per_thread; // offset, considering mapel_per_thread pixels per thread
const int num_bins = 4;
const int nclasses = num_bins/2;
extern __shared__ int sh_sum[];
int loc_sum[num_bins];
unsigned int ii, o;
// if( tix < (WIDTH*HEIGHT) ){ // - WIDTH*HEIGHT%mapel_per_thread+1
// initialise at zero (IMPORTANT!!!):
for(ii=0;ii<num_bins;ii++){
loc_sum[ii] = 0;
sh_sum[x*num_bins+ii] = 0;
}
syncthreads();
// compute difference and store the count in local memory
// (each thread is in charge of mapel_per_thread map elements):
for(ii=0;ii<mapel_per_thread;ii++){
if(tix+ii<WIDTH*HEIGHT) loc_sum[dev_BIN2[tix+ii]*nclasses+dev_BIN1[tix+ii]] += 1;
}
// copy from local to shared memory:
for(ii=0;ii<num_bins;ii++) sh_sum[ii*bdx+x] = loc_sum[ii];
syncthreads();
// reduce two bins per time (to maximise warp allocation
for(ii=0;ii<num_bins;ii++){
o = ii*bdx;
if(x<128) sh_sum[x+o] += sh_sum[x+o + 128]; syncthreads();
if(x<64) sh_sum[x+o] += sh_sum[x+o + 64]; syncthreads();
if(x<32) sh_sum[x+o] += sh_sum[x+o + 32]; syncthreads();
if(x<16) sh_sum[x+o] += sh_sum[x+o + 16]; syncthreads();
if(x<8) sh_sum[x+o] += sh_sum[x+o + 8]; syncthreads();
if(x<4) sh_sum[x+o] += sh_sum[x+o + 4]; syncthreads();
if(x<2) sh_sum[x+o] += sh_sum[x+o + 2]; syncthreads();
if(x<1) sh_sum[x+o] += sh_sum[x+o + 1]; syncthreads();
// each bix writes his count value:
if(x==0) atomicAdd( &dev_LTAKE_count[ii], sh_sum[x+o] );
/* if(x>=bdx/2){
o = ii*bdx*2 + bdx/2;
sh_sum[x+o] += sh_sum[x+o + 128]; syncthreads();
if(x<bdx/2+64) sh_sum[x+o] += sh_sum[x+o + 64]; syncthreads();
if(x<bdx/2+32) sh_sum[x+o] += sh_sum[x+o + 32]; syncthreads();
if(x<bdx/2+16) sh_sum[x+o] += sh_sum[x+o + 16]; syncthreads();
if(x<bdx/2+8) sh_sum[x+o] += sh_sum[x+o + 8]; syncthreads();
if(x<bdx/2+4) sh_sum[x+o] += sh_sum[x+o + 4]; syncthreads();
if(x<bdx/2+2) sh_sum[x+o] += sh_sum[x+o + 2]; syncthreads();
if(x<bdx/2+1) sh_sum[x+o] += sh_sum[x+o + 1]; syncthreads();
// each bix writes his count value:
if(x<bdx/2+1) atomicAdd( &dev_LTAKE_count[1+ii*2], sh_sum[x+o] );
}
*/ }
//}
}
int main( int argc, char **argv ){
/*
* NOTES:
*
*/
/*
* ESTABILISH CONTEXT
*/
GDALAllRegister(); // Establish GDAL context.
hipFree(0); // Establish CUDA context.
metadata MDbin,MDroi,MDdouble;
unsigned int map_len;
double *dev_LTAKE_map,*host_LTAKE_map;
//int *dev_LTAKE_map,*host_LTAKE_map;
//char *dev_LTAKE_map,*host_LTAKE_map;
int *dev_LTAKE_count,*host_LTAKE_count;
//double *dev_LTAKE_count,*host_LTAKE_count;
unsigned char *dev_BIN1, *dev_BIN2, *dev_ROI;
clock_t start_t,end_t;
unsigned int elapsed_time = 0;
hipDeviceProp_t devProp;
unsigned int gpuDev=0;
// count the number of kernels that must print their output:
unsigned int count_print = 0;
bool use_large = false;
int mapel_per_thread_2 = 0;
/*
* LOAD METADATA & DATA
*/
MDbin = geotiffinfo( FIL_BIN1, 1 );
MDroi = geotiffinfo( FIL_ROI, 1 );
// set metadata to eventually print arrays after any CUDA kernel:
/* MDint = MDbin;
MDint.pixel_type = GDT_Int32;
*/ MDdouble = MDbin;
MDdouble.pixel_type = GDT_Float64;
// Set size of all arrays which come into play:
map_len = MDbin.width*MDbin.heigth;
size_t sizeChar = map_len*sizeof( unsigned char );
size_t sizeInt = 4*sizeof( int );
size_t sizeDouble = map_len*sizeof( double );
// initialize arrays:
unsigned char *BIN1 = (unsigned char *) CPLMalloc( sizeChar );
unsigned char *BIN2 = (unsigned char *) CPLMalloc( sizeChar );
unsigned char *ROI = (unsigned char *) CPLMalloc( sizeChar );
// load ROI:
printf("Importing...\t%s\n",FIL_ROI);
geotiffread( FIL_ROI, MDroi, &ROI[0] );
// load BIN:
printf("Importing...\t%s\n",FIL_BIN1);
geotiffread( FIL_BIN1, MDbin, &BIN1[0] );
printf("Importing...\t%s\n",FIL_BIN2);
geotiffread( FIL_BIN2, MDbin, &BIN2[0] );
/*
* INITIALIZE CPU & GPU ARRAYS
*/
// initialize grids on CPU MEM:
CUDA_CHECK_RETURN( hipHostMalloc( (void**)&host_LTAKE_map, sizeDouble) );
//CUDA_CHECK_RETURN( hipHostMalloc( (void**)&host_LTAKE_map, sizeInt) );
//CUDA_CHECK_RETURN( hipHostMalloc( (void**)&host_LTAKE_map, sizeChar) );
//CUDA_CHECK_RETURN( hipHostMalloc( (void**)&host_LTAKE_count, 4*sizeof(double)) );
CUDA_CHECK_RETURN( hipHostMalloc( (void**)&host_LTAKE_count, sizeInt) );
// initialize grids on GPU MEM:
CUDA_CHECK_RETURN( hipMalloc( (void **)&dev_BIN1, sizeChar) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&dev_BIN2, sizeChar) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&dev_ROI, sizeChar) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&dev_LTAKE_map, sizeDouble) );
//CUDA_CHECK_RETURN( hipMalloc( (void **)&dev_LTAKE_map, sizeInt) );
//CUDA_CHECK_RETURN( hipMalloc( (void **)&dev_LTAKE_map, sizeChar) );
//CUDA_CHECK_RETURN( hipMalloc( (void **)&dev_LTAKE_count, 4*sizeof(double)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&dev_LTAKE_count, sizeInt) );
// memset:
CUDA_CHECK_RETURN( hipMemset(dev_LTAKE_count, 0, sizeInt) );
//CUDA_CHECK_RETURN( hipMemset(dev_LTAKE_count, 0, 4*sizeof(double)) );
// H2D:
CUDA_CHECK_RETURN( hipMemcpy(dev_BIN1, BIN1, sizeChar, hipMemcpyHostToDevice) );
CUDA_CHECK_RETURN( hipMemcpy(dev_BIN2, BIN2, sizeChar, hipMemcpyHostToDevice) );
CUDA_CHECK_RETURN( hipMemcpy(dev_ROI, ROI, sizeChar, hipMemcpyHostToDevice) );
/*
* QUERY CURRENT GPU PROPERTIES
*/
CUDA_CHECK_RETURN( hipSetDevice(gpuDev) );
hipGetDeviceProperties(&devProp, gpuDev);
int N_sm = devProp.multiProcessorCount;
int max_threads_per_SM = devProp.maxThreadsPerMultiProcessor;
// int max_shmem_per_block = devProp.sharedMemPerBlock;
/*
* KERNELS GEOMETRY
* NOTE: use ceil() instead of the "%" operator!!!
*/
unsigned int bdx, gdx, num_blocks_per_SM, mapel_per_thread, Nblks_per_grid;
/* if(map_len/BLOCK_DIM < N_sm){
bdx = BLOCK_DIM_small;
}else {
bdx = BLOCK_DIM;
}
*/
bdx = BLOCK_DIM;
num_blocks_per_SM = max_threads_per_SM / bdx;
mapel_per_thread = (unsigned int)ceil( (double)map_len / (double)((bdx*1)*N_sm*num_blocks_per_SM) );
gdx = (unsigned int)ceil( (double)map_len / (double)( mapel_per_thread*(bdx*1) ) );
Nblks_per_grid = N_sm* (max_threads_per_SM /threads);
dim3 block( bdx,1,1 );
dim3 grid ( gdx,1,1 );
dim3 dimBlock( threads, 1, 1 );
dim3 dimGrid( Nblks_per_grid, 1, 1 );
int sh_mem = (bdx*4)*(sizeof(int));
//double sh_mem_double = (bdx*4)*(sizeof(double));
unsigned int gdx_2 = (unsigned int)ceil( (double)map_len / (double)( (bdx*4) ) );
dim3 block_2( bdx*4,1,1 );
dim3 grid_2 ( gdx_2,1,1 );
if (gdx_2>devProp.maxGridSize[0]) {
/* printf( "Error: cannot allocate gridsize=%d (limit is %d)!\n",gdx_2,devProp.maxGridSize[0] );
exit(EXIT_FAILURE);
*/ use_large = true;
mapel_per_thread_2 = 32*4; // warp size times 4
gdx_2 = (unsigned int)ceil( (double)map_len / (double)( (bdx*2*mapel_per_thread_2) ) );
dim3 block_2( bdx*2,1,1 );
dim3 grid_2 ( gdx_2,1,1 );
}
/* KERNELS INVOCATION
*
* ********************************
* -0- filter_roi twice(BIN1,BIN2)
* -1- imperviousness_change_sh_4
* -2- imperviousness_change
* ********************************
*
* Note that imperviousness_change_large does not work!!
*/
printf("\n\n");
// ***-0-***
start_t = clock();
hipLaunchKernelGGL(( filter_roi), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_BIN1,dev_ROI,map_len);
hipLaunchKernelGGL(( filter_roi), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_BIN2,dev_ROI,map_len);
CUDA_CHECK_RETURN( hipDeviceSynchronize() );
end_t = clock();
printf(" -%d- %34s\t%6d [msec]\n",++count_print,kern_0,(int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 ));
if (print_intermediate_arrays){
CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)4*sizeof(int),hipMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)4*sizeof(double),hipMemcpyDeviceToHost) );
sprintf(buffer,"%s/data/-%d-%s.txt",BASE_PATH,count_print,kern_0);
write_mat_int( host_LTAKE_count, 4, 1, buffer );
}
elapsed_time += (int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 );// elapsed time [ms]:
// ***-1-***
start_t = clock();
hipLaunchKernelGGL(( imperviousness_change_histc_sh_4), dim3(grid),dim3(block),sh_mem, 0, dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth,
dev_LTAKE_count, mapel_per_thread );
/* imperviousness_change_histc_sh_4_double<<<grid,block,sh_mem_double>>>(
dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth,
dev_LTAKE_count, mapel_per_thread );
*/ CUDA_CHECK_RETURN( hipDeviceSynchronize() );
end_t = clock();
printf(" -%d- %34s\t%6d [msec]\n",++count_print,kern_1,(int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 ));
if (print_intermediate_arrays){
CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)sizeInt,hipMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)4*sizeof(double),hipMemcpyDeviceToHost) );
sprintf(buffer,"%s/data/-%d-%s.txt",BASE_PATH,count_print,kern_1);
write_mat_int( host_LTAKE_count, 4, 1, buffer );
}
elapsed_time += (int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 );// elapsed time [ms]:
// ***-2-***
start_t = clock();
if(use_large!=true){
hipLaunchKernelGGL(( imperviousness_change_double), dim3(grid_2),dim3(block_2), 0, 0, dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth, dev_LTAKE_map );
//imperviousness_change<<<grid_2,block_2>>>( dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth, dev_LTAKE_map );
//imperviousness_change_char<<<grid_2,block_2>>>( dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth, dev_LTAKE_map );
}else{
printf("Error: imperviousness_change_large does not work yet!");
exit(EXIT_FAILURE);
/* imperviousness_change_large<<<grid_2,block_2>>>(dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth, dev_LTAKE_map, mapel_per_thread_2 );
kern_2 = "imperviousness_change_large" ;
*/ }
CUDA_CHECK_RETURN( hipDeviceSynchronize() );
end_t = clock();
printf(" -%d- %34s\t%6d [msec]\n",++count_print,kern_2,(int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 ));
if (print_intermediate_arrays){
CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeDouble,hipMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeInt,hipMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeChar,hipMemcpyDeviceToHost) );
sprintf(buffer,"%s/data/-%d-%s.tif",BASE_PATH,count_print,kern_2);
geotiffwrite( FIL_BIN1, buffer, MDdouble, host_LTAKE_map );
//geotiffwrite( FIL_BIN1, buffer, MDint, host_LTAKE_map );
//geotiffwrite( FIL_BIN1, buffer, MDbin, host_LTAKE_map );
}
elapsed_time += (int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 );// elapsed time [ms]:
printf("________________________________________________________________\n");
printf("%40s\t%6d [msec]\n", "Total time:",elapsed_time );
CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeDouble,hipMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeInt,hipMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeChar,hipMemcpyDeviceToHost) );
CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)sizeInt,hipMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( hipMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)4*sizeof(double),hipMemcpyDeviceToHost) );
// save on HDD
geotiffwrite( FIL_BIN1, FIL_LTAKE_grid, MDdouble, host_LTAKE_map );
//geotiffwrite( FIL_BIN1, FIL_LTAKE_grid, MDint, host_LTAKE_map );
//geotiffwrite( FIL_BIN1, FIL_LTAKE_grid, MDbin, host_LTAKE_map );
write_mat_T( host_LTAKE_count, 4, 1, FIL_LTAKE_count );
// CUDA free:
hipFree( dev_BIN1 );
hipFree( dev_BIN2 );
hipFree( dev_LTAKE_map );
hipFree( dev_LTAKE_count );
hipFree( dev_ROI );
hipFree( BIN1 );
hipFree( BIN2 );
hipFree( host_LTAKE_map );
hipFree( host_LTAKE_count );
hipFree( ROI );
// Destroy context
CUDA_CHECK_RETURN( hipDeviceReset() );
printf("\n\n\nFinished!!\n");
return 0;// elapsed_time
}
| 96469d16a77566a0a37bf18a44793a1ada8503f1.cu | // INCLUDES
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <errno.h> /* errno */
#include <string.h> /* strerror */
#include <math.h> // ceil
#include <time.h> // CLOCKS_PER_SEC
// CUDA
#include <cuda.h>
#include <cuda_runtime.h>
// GIS
#include "/home/giuliano/git/cuda/weatherprog-cudac/includes/gis.h"
/**
* PARS
*/
#define BLOCK_DIM_small 64
#define BLOCK_DIM 256
static const unsigned int threads = 512;
bool print_intermediate_arrays = false;
const char *BASE_PATH = "/home/giuliano/git/cuda/reduction";
/*
* kernel labels
*/
const char *kern_0 = "filter_roi";
const char *kern_1 = "imperviousness_change_histc_sh_4" ;
const char *kern_2 = "imperviousness_change" ;
char buffer[255];
/*
* DEFINE I/O files
*/
// I/–
//const char *FIL_ROI = "/home/giuliano/git/cuda/reduction/data/ROI.tif";
//const char *FIL_BIN1 = "/home/giuliano/git/cuda/reduction/data/BIN1.tif";
//const char *FIL_BIN2 = "/home/giuliano/git/cuda/reduction/data/BIN2.tif";
const char *FIL_ROI = "/media/DATI/db-backup/ssgci-data/testing/ssgci_roi.tif";
const char *FIL_BIN1 = "/media/DATI/db-backup/ssgci-data/testing/ssgci_bin.tif";
const char *FIL_BIN2 = "/media/DATI/db-backup/ssgci-data/testing/ssgci_bin2.tif";
// –/O
const char *FIL_LTAKE_grid = "/home/giuliano/git/cuda/reduction/data/LTAKE_map.tif";
const char *FIL_LTAKE_count= "/home/giuliano/git/cuda/reduction/data/LTAKE_count.txt";
/* +++++DEFINEs+++++ */
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
/* +++++DEFINEs+++++ */
template <typename T>
void write_mat_T( const T *MAT, unsigned int nr, unsigned int nc, const char *filename )
{
unsigned int rr,cc;
FILE *fid ;
fid = fopen(filename,"w");
if (fid == NULL) { printf("Error opening file %s!\n",filename); exit(1); }
for(rr=0;rr<nr;rr++)
{
for(cc=0;cc<nc;cc++)
{
fprintf(fid, "%8d ",MAT[rr*nc+cc]);
}
fprintf(fid,"\n");
}
fclose(fid);
}
void write_mat_int( const int *MAT, unsigned int nr, unsigned int nc, const char *filename )
{
unsigned int rr,cc;
FILE *fid ;
fid = fopen(filename,"w");
if (fid == NULL) { printf("Error opening file %s!\n",filename); exit(1); }
for(rr=0;rr<nr;rr++)
{
for(cc=0;cc<nc;cc++)
{
fprintf(fid, "%8d ",MAT[rr*nc+cc]);
}
fprintf(fid,"\n");
}
fclose(fid);
}
__global__ void
filter_roi( unsigned char *BIN, const unsigned char *ROI, unsigned int map_len){
unsigned int tid = threadIdx.x;
unsigned int bix = blockIdx.x;
unsigned int bdx = blockDim.x;
unsigned int gdx = gridDim.x;
unsigned int i = bix*bdx + tid;
unsigned int gridSize = bdx*gdx;
while (i < map_len)
{
//BIN[i] *= ROI[i];
BIN[i] = (unsigned char) ((int)BIN[i] * (int)ROI[i]);
i += gridSize;
}
}
__global__ void imperviousness_change(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT, int *dev_LTAKE_map
)
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
unsigned long int tix = bdx*bix + x; // offset
if( tix < WIDTH*HEIGHT ){
dev_LTAKE_map[tix] = (int)((int)dev_BIN2[tix] - (int)dev_BIN1[tix]);
}
}
__global__ void imperviousness_change_double(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT, double *dev_LTAKE_map
)
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
unsigned long int tix = bdx*bix + x; // offset
if( tix < WIDTH*HEIGHT ){
dev_LTAKE_map[tix] = (double) ( (double)dev_BIN2[tix] - (double)dev_BIN1[tix] );
}
}
__global__ void imperviousness_change_char(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT, char *dev_LTAKE_map
)
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
unsigned long int tix = bdx*bix + x; // offset
if( tix < WIDTH*HEIGHT ){
dev_LTAKE_map[tix] = dev_BIN2[tix] - dev_BIN1[tix];
}
}
__global__ void imperviousness_change_large(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT, int *dev_LTAKE_map,
int mapel_per_thread
)
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
//unsigned long int gdx = gridDim.x;
unsigned long int tid = bdx*bix + x; // offset
unsigned long int tix = tid * mapel_per_thread; // offset
//extern __shared__ int sh_diff[];
if( bdx*bix*mapel_per_thread < WIDTH*HEIGHT ){
//sh_diff[tid] = 0; syncthreads();
for(long int ii=0;ii<mapel_per_thread;ii++){
if( tix+ii < WIDTH*HEIGHT ){
//sh_diff[tid] = (int)((int)dev_BIN2[tix+ii] - (int)dev_BIN1[tix+ii]);
dev_LTAKE_map[tix+ii] = (int)((int)dev_BIN2[tix+ii] - (int)dev_BIN1[tix+ii]);
} //__syncthreads();
//dev_LTAKE_map[tix+ii] = sh_diff[tid];
}
}
}
__global__ void imperviousness_change_histc_sh_4(
const unsigned char *dev_BIN1, const unsigned char *dev_BIN2,
unsigned int WIDTH, unsigned int HEIGHT,
int *dev_LTAKE_count, int mapel_per_thread
)
{
/*
INPUTS
dev_BIN1: Imperviousness of OLDER year.
dev_BIN1: Imperviousness of NEWER year.
OUTPUTS
dev_LTAKE_count: 2x2 table with counts about the 4 possible combinations.
dev_LTAKE_map: map storing the difference (dev_BIN2-dev_BIN1).
The following 4 combinations are possible computing the difference:
---------------------------
(N) (BIN2,BIN1) --> (LTAKE)
---------------------------
(1) (0,0) --> +0 ---> nothing changed in rural pixels
(2) (0,1) --> -1 ---> increase of rural pixels
(3) (1,0) --> +1 ---> increase of urban pixels
(4) (1,1) --> -0 ---> nothing changed in urban pixels
---------------------------
where values can be { 0:rural; 1:urban }.
DESCRIPTION
This kernel function counts the number of pixels for each LTAKE type (i.e. {+0,-1,+1,-0}).
It assumes that:
> the number of outcomes LTAKE can assume is equal to FOUR=4, as also stated in kernel name "..._4"
> each thread within a block is in charge of mapel_per_thread pixels in order to allocate
a number of blocks equal to the number of available SMs.
> the number of threads per block is equal to 256(=bdx).
I have to call this kernel using the following resources:
> block: (bdx*mapel_per_thread, 1, 1)
> sh_mem: bdx*4*sizeof(int)
*/
unsigned int x = threadIdx.x;
unsigned int bdx = blockDim.x;
unsigned int bix = blockIdx.x;
unsigned int tid = (bdx*bix + x); // global thread index
unsigned int tix = tid*mapel_per_thread; // offset, considering mapel_per_thread pixels per thread
const int num_bins = 4;
const int nclasses = num_bins/2;
extern __shared__ int sh_sum[];
int loc_sum[num_bins];
unsigned int ii, o;
// if( tix < (WIDTH*HEIGHT) ){ // - WIDTH*HEIGHT%mapel_per_thread+1
// initialise at zero (IMPORTANT!!!):
for(ii=0;ii<num_bins;ii++){
loc_sum[ii] = 0;
sh_sum[x*num_bins+ii] = 0;
}
syncthreads();
// compute difference and store the count in local memory
// (each thread is in charge of mapel_per_thread map elements):
for(ii=0;ii<mapel_per_thread;ii++){
if(tix+ii<WIDTH*HEIGHT) loc_sum[dev_BIN2[tix+ii]*nclasses+dev_BIN1[tix+ii]] += 1;
}
// copy from local to shared memory:
for(ii=0;ii<num_bins;ii++) sh_sum[ii*bdx+x] = loc_sum[ii];
syncthreads();
// reduce two bins per time (to maximise warp allocation
for(ii=0;ii<num_bins;ii++){
o = ii*bdx;
if(x<128) sh_sum[x+o] += sh_sum[x+o + 128]; syncthreads();
if(x<64) sh_sum[x+o] += sh_sum[x+o + 64]; syncthreads();
if(x<32) sh_sum[x+o] += sh_sum[x+o + 32]; syncthreads();
if(x<16) sh_sum[x+o] += sh_sum[x+o + 16]; syncthreads();
if(x<8) sh_sum[x+o] += sh_sum[x+o + 8]; syncthreads();
if(x<4) sh_sum[x+o] += sh_sum[x+o + 4]; syncthreads();
if(x<2) sh_sum[x+o] += sh_sum[x+o + 2]; syncthreads();
if(x<1) sh_sum[x+o] += sh_sum[x+o + 1]; syncthreads();
// each bix writes his count value:
if(x==0) atomicAdd( &dev_LTAKE_count[ii], sh_sum[x+o] );
/* if(x>=bdx/2){
o = ii*bdx*2 + bdx/2;
sh_sum[x+o] += sh_sum[x+o + 128]; syncthreads();
if(x<bdx/2+64) sh_sum[x+o] += sh_sum[x+o + 64]; syncthreads();
if(x<bdx/2+32) sh_sum[x+o] += sh_sum[x+o + 32]; syncthreads();
if(x<bdx/2+16) sh_sum[x+o] += sh_sum[x+o + 16]; syncthreads();
if(x<bdx/2+8) sh_sum[x+o] += sh_sum[x+o + 8]; syncthreads();
if(x<bdx/2+4) sh_sum[x+o] += sh_sum[x+o + 4]; syncthreads();
if(x<bdx/2+2) sh_sum[x+o] += sh_sum[x+o + 2]; syncthreads();
if(x<bdx/2+1) sh_sum[x+o] += sh_sum[x+o + 1]; syncthreads();
// each bix writes his count value:
if(x<bdx/2+1) atomicAdd( &dev_LTAKE_count[1+ii*2], sh_sum[x+o] );
}
*/ }
//}
}
int main( int argc, char **argv ){
/*
* NOTES:
*
*/
/*
* ESTABILISH CONTEXT
*/
GDALAllRegister(); // Establish GDAL context.
cudaFree(0); // Establish CUDA context.
metadata MDbin,MDroi,MDdouble;
unsigned int map_len;
double *dev_LTAKE_map,*host_LTAKE_map;
//int *dev_LTAKE_map,*host_LTAKE_map;
//char *dev_LTAKE_map,*host_LTAKE_map;
int *dev_LTAKE_count,*host_LTAKE_count;
//double *dev_LTAKE_count,*host_LTAKE_count;
unsigned char *dev_BIN1, *dev_BIN2, *dev_ROI;
clock_t start_t,end_t;
unsigned int elapsed_time = 0;
cudaDeviceProp devProp;
unsigned int gpuDev=0;
// count the number of kernels that must print their output:
unsigned int count_print = 0;
bool use_large = false;
int mapel_per_thread_2 = 0;
/*
* LOAD METADATA & DATA
*/
MDbin = geotiffinfo( FIL_BIN1, 1 );
MDroi = geotiffinfo( FIL_ROI, 1 );
// set metadata to eventually print arrays after any CUDA kernel:
/* MDint = MDbin;
MDint.pixel_type = GDT_Int32;
*/ MDdouble = MDbin;
MDdouble.pixel_type = GDT_Float64;
// Set size of all arrays which come into play:
map_len = MDbin.width*MDbin.heigth;
size_t sizeChar = map_len*sizeof( unsigned char );
size_t sizeInt = 4*sizeof( int );
size_t sizeDouble = map_len*sizeof( double );
// initialize arrays:
unsigned char *BIN1 = (unsigned char *) CPLMalloc( sizeChar );
unsigned char *BIN2 = (unsigned char *) CPLMalloc( sizeChar );
unsigned char *ROI = (unsigned char *) CPLMalloc( sizeChar );
// load ROI:
printf("Importing...\t%s\n",FIL_ROI);
geotiffread( FIL_ROI, MDroi, &ROI[0] );
// load BIN:
printf("Importing...\t%s\n",FIL_BIN1);
geotiffread( FIL_BIN1, MDbin, &BIN1[0] );
printf("Importing...\t%s\n",FIL_BIN2);
geotiffread( FIL_BIN2, MDbin, &BIN2[0] );
/*
* INITIALIZE CPU & GPU ARRAYS
*/
// initialize grids on CPU MEM:
CUDA_CHECK_RETURN( cudaMallocHost( (void**)&host_LTAKE_map, sizeDouble) );
//CUDA_CHECK_RETURN( cudaMallocHost( (void**)&host_LTAKE_map, sizeInt) );
//CUDA_CHECK_RETURN( cudaMallocHost( (void**)&host_LTAKE_map, sizeChar) );
//CUDA_CHECK_RETURN( cudaMallocHost( (void**)&host_LTAKE_count, 4*sizeof(double)) );
CUDA_CHECK_RETURN( cudaMallocHost( (void**)&host_LTAKE_count, sizeInt) );
// initialize grids on GPU MEM:
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_BIN1, sizeChar) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_BIN2, sizeChar) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_ROI, sizeChar) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_LTAKE_map, sizeDouble) );
//CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_LTAKE_map, sizeInt) );
//CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_LTAKE_map, sizeChar) );
//CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_LTAKE_count, 4*sizeof(double)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_LTAKE_count, sizeInt) );
// memset:
CUDA_CHECK_RETURN( cudaMemset(dev_LTAKE_count, 0, sizeInt) );
//CUDA_CHECK_RETURN( cudaMemset(dev_LTAKE_count, 0, 4*sizeof(double)) );
// H2D:
CUDA_CHECK_RETURN( cudaMemcpy(dev_BIN1, BIN1, sizeChar, cudaMemcpyHostToDevice) );
CUDA_CHECK_RETURN( cudaMemcpy(dev_BIN2, BIN2, sizeChar, cudaMemcpyHostToDevice) );
CUDA_CHECK_RETURN( cudaMemcpy(dev_ROI, ROI, sizeChar, cudaMemcpyHostToDevice) );
/*
* QUERY CURRENT GPU PROPERTIES
*/
CUDA_CHECK_RETURN( cudaSetDevice(gpuDev) );
cudaGetDeviceProperties(&devProp, gpuDev);
int N_sm = devProp.multiProcessorCount;
int max_threads_per_SM = devProp.maxThreadsPerMultiProcessor;
// int max_shmem_per_block = devProp.sharedMemPerBlock;
/*
* KERNELS GEOMETRY
* NOTE: use ceil() instead of the "%" operator!!!
*/
unsigned int bdx, gdx, num_blocks_per_SM, mapel_per_thread, Nblks_per_grid;
/* if(map_len/BLOCK_DIM < N_sm){
bdx = BLOCK_DIM_small;
}else {
bdx = BLOCK_DIM;
}
*/
bdx = BLOCK_DIM;
num_blocks_per_SM = max_threads_per_SM / bdx;
mapel_per_thread = (unsigned int)ceil( (double)map_len / (double)((bdx*1)*N_sm*num_blocks_per_SM) );
gdx = (unsigned int)ceil( (double)map_len / (double)( mapel_per_thread*(bdx*1) ) );
Nblks_per_grid = N_sm* (max_threads_per_SM /threads);
dim3 block( bdx,1,1 );
dim3 grid ( gdx,1,1 );
dim3 dimBlock( threads, 1, 1 );
dim3 dimGrid( Nblks_per_grid, 1, 1 );
int sh_mem = (bdx*4)*(sizeof(int));
//double sh_mem_double = (bdx*4)*(sizeof(double));
unsigned int gdx_2 = (unsigned int)ceil( (double)map_len / (double)( (bdx*4) ) );
dim3 block_2( bdx*4,1,1 );
dim3 grid_2 ( gdx_2,1,1 );
if (gdx_2>devProp.maxGridSize[0]) {
/* printf( "Error: cannot allocate gridsize=%d (limit is %d)!\n",gdx_2,devProp.maxGridSize[0] );
exit(EXIT_FAILURE);
*/ use_large = true;
mapel_per_thread_2 = 32*4; // warp size times 4
gdx_2 = (unsigned int)ceil( (double)map_len / (double)( (bdx*2*mapel_per_thread_2) ) );
dim3 block_2( bdx*2,1,1 );
dim3 grid_2 ( gdx_2,1,1 );
}
/* KERNELS INVOCATION
*
* ********************************
* -0- filter_roi twice(BIN1,BIN2)
* -1- imperviousness_change_sh_4
* -2- imperviousness_change
* ********************************
*
* Note that imperviousness_change_large does not work!!
*/
printf("\n\n");
// ***-0-***
start_t = clock();
filter_roi<<<dimGrid,dimBlock>>>(dev_BIN1,dev_ROI,map_len);
filter_roi<<<dimGrid,dimBlock>>>(dev_BIN2,dev_ROI,map_len);
CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
end_t = clock();
printf(" -%d- %34s\t%6d [msec]\n",++count_print,kern_0,(int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 ));
if (print_intermediate_arrays){
CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)4*sizeof(int),cudaMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)4*sizeof(double),cudaMemcpyDeviceToHost) );
sprintf(buffer,"%s/data/-%d-%s.txt",BASE_PATH,count_print,kern_0);
write_mat_int( host_LTAKE_count, 4, 1, buffer );
}
elapsed_time += (int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 );// elapsed time [ms]:
// ***-1-***
start_t = clock();
imperviousness_change_histc_sh_4<<<grid,block,sh_mem>>>( dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth,
dev_LTAKE_count, mapel_per_thread );
/* imperviousness_change_histc_sh_4_double<<<grid,block,sh_mem_double>>>(
dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth,
dev_LTAKE_count, mapel_per_thread );
*/ CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
end_t = clock();
printf(" -%d- %34s\t%6d [msec]\n",++count_print,kern_1,(int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 ));
if (print_intermediate_arrays){
CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)sizeInt,cudaMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)4*sizeof(double),cudaMemcpyDeviceToHost) );
sprintf(buffer,"%s/data/-%d-%s.txt",BASE_PATH,count_print,kern_1);
write_mat_int( host_LTAKE_count, 4, 1, buffer );
}
elapsed_time += (int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 );// elapsed time [ms]:
// ***-2-***
start_t = clock();
if(use_large!=true){
imperviousness_change_double<<<grid_2,block_2>>>( dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth, dev_LTAKE_map );
//imperviousness_change<<<grid_2,block_2>>>( dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth, dev_LTAKE_map );
//imperviousness_change_char<<<grid_2,block_2>>>( dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth, dev_LTAKE_map );
}else{
printf("Error: imperviousness_change_large does not work yet!");
exit(EXIT_FAILURE);
/* imperviousness_change_large<<<grid_2,block_2>>>(dev_BIN1, dev_BIN2, MDbin.width, MDbin.heigth, dev_LTAKE_map, mapel_per_thread_2 );
kern_2 = "imperviousness_change_large" ;
*/ }
CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
end_t = clock();
printf(" -%d- %34s\t%6d [msec]\n",++count_print,kern_2,(int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 ));
if (print_intermediate_arrays){
CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeDouble,cudaMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeInt,cudaMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeChar,cudaMemcpyDeviceToHost) );
sprintf(buffer,"%s/data/-%d-%s.tif",BASE_PATH,count_print,kern_2);
geotiffwrite( FIL_BIN1, buffer, MDdouble, host_LTAKE_map );
//geotiffwrite( FIL_BIN1, buffer, MDint, host_LTAKE_map );
//geotiffwrite( FIL_BIN1, buffer, MDbin, host_LTAKE_map );
}
elapsed_time += (int)( (double)(end_t - start_t ) / (double)CLOCKS_PER_SEC * 1000 );// elapsed time [ms]:
printf("________________________________________________________________\n");
printf("%40s\t%6d [msec]\n", "Total time:",elapsed_time );
CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeDouble,cudaMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeInt,cudaMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_map,dev_LTAKE_map, (size_t)sizeChar,cudaMemcpyDeviceToHost) );
CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)sizeInt,cudaMemcpyDeviceToHost) );
//CUDA_CHECK_RETURN( cudaMemcpy(host_LTAKE_count,dev_LTAKE_count, (size_t)4*sizeof(double),cudaMemcpyDeviceToHost) );
// save on HDD
geotiffwrite( FIL_BIN1, FIL_LTAKE_grid, MDdouble, host_LTAKE_map );
//geotiffwrite( FIL_BIN1, FIL_LTAKE_grid, MDint, host_LTAKE_map );
//geotiffwrite( FIL_BIN1, FIL_LTAKE_grid, MDbin, host_LTAKE_map );
write_mat_T( host_LTAKE_count, 4, 1, FIL_LTAKE_count );
// CUDA free:
cudaFree( dev_BIN1 );
cudaFree( dev_BIN2 );
cudaFree( dev_LTAKE_map );
cudaFree( dev_LTAKE_count );
cudaFree( dev_ROI );
cudaFree( BIN1 );
cudaFree( BIN2 );
cudaFree( host_LTAKE_map );
cudaFree( host_LTAKE_count );
cudaFree( ROI );
// Destroy context
CUDA_CHECK_RETURN( cudaDeviceReset() );
printf("\n\n\nFinished!!\n");
return 0;// elapsed_time
}
|
af85b4b63016238e9bdc4c904d6e712e3e68353b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist2.cuh"
#include "split_properties_helpers.cuh"
#include "compute_point_hist2_loop.cuh"
#include "pointwise_hist2_half_byte_template.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cuda/wrappers/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int BlockSize, bool IsFullPass, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, float* __restrict__ binSums, int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 *BlockSize];
if (partition->Size)
{
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = IsFullPass;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int OUTER_UNROLL = 1;
#else
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 <BlockSize, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 1;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram <BlockSize, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
}
ui32 w = threadIdx.x & 1;
ui32 fid = (threadIdx.x >> 1);
if (fid < fCount)
{
const int groupId = fid / 4;
uchar fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
sum += counters[i * 16 + 2 * groupId + w];
}
}
if (abs(sum) > 1e-20f) {
if (M > 1)
{
atomicAdd(binSums + (feature[fid].FirstFoldIndex) * 2 + w, sum);
} else
{
binSums[(feature[fid].FirstFoldIndex) * 2 + w] = sum;
}
}
}
}
}
template <int BlockSize, int BlocksPerFeatureCount>
void RunComputeHist2BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, bool fullPass,
int totalFeatureCount,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesBImpl <BlockSize, true,
BlocksPerFeatureCount > << <numBlocks,BlockSize, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, totalFeatureCount
);
} else
{
ComputeSplitPropertiesBImpl <BlockSize, false,
BlocksPerFeatureCount > << <numBlocks,BlockSize, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, totalFeatureCount
);
}
};
void ComputeHist2Binary(const TCFeature* bFeatures, ui32 bCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition,
ui32 partsCount, ui32 foldCount,
bool fullPass,
ui32 totalFeatureCount,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (bCount) {
#define COMPUTE(k) \
RunComputeHist2BinaryKernel<blockSize, k>(bFeatures, bCount, cindex, target, weight, indices, \
partition, binSums, fullPass, totalFeatureCount, stream, numBlocks); \
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8);
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
}
| af85b4b63016238e9bdc4c904d6e712e3e68353b.cu | #include "pointwise_hist2.cuh"
#include "split_properties_helpers.cuh"
#include "compute_point_hist2_loop.cuh"
#include "pointwise_hist2_half_byte_template.cuh"
#include <cooperative_groups.h>
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cuda/wrappers/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int BlockSize, bool IsFullPass, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, float* __restrict__ binSums, int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 *BlockSize];
if (partition->Size)
{
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = IsFullPass;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int OUTER_UNROLL = 1;
#else
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 <BlockSize, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 1;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram <BlockSize, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
}
ui32 w = threadIdx.x & 1;
ui32 fid = (threadIdx.x >> 1);
if (fid < fCount)
{
const int groupId = fid / 4;
uchar fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
sum += counters[i * 16 + 2 * groupId + w];
}
}
if (abs(sum) > 1e-20f) {
if (M > 1)
{
atomicAdd(binSums + (feature[fid].FirstFoldIndex) * 2 + w, sum);
} else
{
binSums[(feature[fid].FirstFoldIndex) * 2 + w] = sum;
}
}
}
}
}
template <int BlockSize, int BlocksPerFeatureCount>
void RunComputeHist2BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, bool fullPass,
int totalFeatureCount,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesBImpl <BlockSize, true,
BlocksPerFeatureCount > << <numBlocks,BlockSize, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, totalFeatureCount
);
} else
{
ComputeSplitPropertiesBImpl <BlockSize, false,
BlocksPerFeatureCount > << <numBlocks,BlockSize, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, totalFeatureCount
);
}
};
void ComputeHist2Binary(const TCFeature* bFeatures, ui32 bCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition,
ui32 partsCount, ui32 foldCount,
bool fullPass,
ui32 totalFeatureCount,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (bCount) {
#define COMPUTE(k) \
RunComputeHist2BinaryKernel<blockSize, k>(bFeatures, bCount, cindex, target, weight, indices, \
partition, binSums, fullPass, totalFeatureCount, stream, numBlocks); \
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8);
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
}
|
7fd78c0bf25c6ca266c470afc1873ab186b61137.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Rodolfo Martinez Guevara
// template provided for cuda quizz 3.
// remember to write your own comments in the code below.
#include <stdio.h>
#define N 9 //size of original matrix
#define K N/3 //size of compressed matrrix
#define ThreadsPerBlock 1 // choose wisely
#define NumBlocks N // choose wisely
__global__ void compress(float *mat, int n, float *comp, int k){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
int index = tidx + tidy * n;
int aux = 0;
int current_col = 0;
int current_row = 0;
if(index < n){
current_col = index/k;
current_row = index%n;
for (int i = 0; i < k; i++){
for (int j = 0; j < k; j++){
aux += mat[i*(current_row*current_col)+j];
}
}
comp[index] = aux;
}
}
void print_mat(float *mat, int n){
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%.1f\t", mat[i*n+j]);
}
printf("\n");
}
printf("\n");
}
void fill_mat(float *mat, int n){
int c = 0;
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
mat[i*n+j] = c++;
}
}
}
int main(){
float *h_compress, *h_matrix;
float *d_compress, *d_matrix;
h_compress = (float *)malloc(sizeof(float)*K*K);
h_matrix = (float *)malloc(sizeof(float)*N*N);
hipMalloc((void **)&d_matrix, sizeof(float)*N*N);
hipMalloc((void **)&d_compress, sizeof(float)*K*K);
fill_mat(h_matrix, N);
// fill_mat(h_compress, K);
printf("\n input mat \n");
print_mat(h_matrix, N);
hipMemcpy(d_matrix, h_matrix, sizeof(float)*N*N, hipMemcpyHostToDevice);
dim3 dimThreads(ThreadsPerBlock, 1,1);
dim3 dimBlocks(NumBlocks, 1,1);
hipLaunchKernelGGL(( compress), dim3(dimBlocks), dim3(dimThreads), 0, 0, d_matrix, N, d_compress, K);
hipMemcpy(h_compress, d_compress, sizeof(float)*K*K, hipMemcpyDeviceToHost);
printf("\n input compress \n");
print_mat(h_compress, K);
free(h_matrix);
free(h_compress);
hipFree(d_matrix);
hipFree(d_compress);
}
| 7fd78c0bf25c6ca266c470afc1873ab186b61137.cu | //Rodolfo Martinez Guevara
// template provided for cuda quizz 3.
// remember to write your own comments in the code below.
#include <stdio.h>
#define N 9 //size of original matrix
#define K N/3 //size of compressed matrrix
#define ThreadsPerBlock 1 // choose wisely
#define NumBlocks N // choose wisely
__global__ void compress(float *mat, int n, float *comp, int k){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
int index = tidx + tidy * n;
int aux = 0;
int current_col = 0;
int current_row = 0;
if(index < n){
current_col = index/k;
current_row = index%n;
for (int i = 0; i < k; i++){
for (int j = 0; j < k; j++){
aux += mat[i*(current_row*current_col)+j];
}
}
comp[index] = aux;
}
}
void print_mat(float *mat, int n){
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%.1f\t", mat[i*n+j]);
}
printf("\n");
}
printf("\n");
}
void fill_mat(float *mat, int n){
int c = 0;
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
mat[i*n+j] = c++;
}
}
}
int main(){
float *h_compress, *h_matrix;
float *d_compress, *d_matrix;
h_compress = (float *)malloc(sizeof(float)*K*K);
h_matrix = (float *)malloc(sizeof(float)*N*N);
cudaMalloc((void **)&d_matrix, sizeof(float)*N*N);
cudaMalloc((void **)&d_compress, sizeof(float)*K*K);
fill_mat(h_matrix, N);
// fill_mat(h_compress, K);
printf("\n input mat \n");
print_mat(h_matrix, N);
cudaMemcpy(d_matrix, h_matrix, sizeof(float)*N*N, cudaMemcpyHostToDevice);
dim3 dimThreads(ThreadsPerBlock, 1,1);
dim3 dimBlocks(NumBlocks, 1,1);
compress<<<dimBlocks, dimThreads>>>(d_matrix, N, d_compress, K);
cudaMemcpy(h_compress, d_compress, sizeof(float)*K*K, cudaMemcpyDeviceToHost);
printf("\n input compress \n");
print_mat(h_compress, K);
free(h_matrix);
free(h_compress);
cudaFree(d_matrix);
cudaFree(d_compress);
}
|
72aa6184629ccbd4165a4523c2c732411216fe31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelFormBinStart ( int* devOutputBinStart, unsigned int* devInputBinCirPairBin, unsigned int bcPairLen)
{
__shared__ int cache[257]; //256 bcpair + the last bc pair in the previous block
int bcPairIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (bcPairIdx >= bcPairLen)
{
return;
}
cache[1 + threadIdx.x] = devInputBinCirPairBin[bcPairIdx];
if ( threadIdx.x == 0 )
{
if ( bcPairIdx != 0 )
{
cache[0] = devInputBinCirPairBin[bcPairIdx - 1];
}
else
{
cache[0] = -1;
}
}
__syncthreads();
if (cache[1 + threadIdx.x] != cache[threadIdx.x])
{
//printf("b: %d, s: %d\n", cache[1 + threadIdx.x], bcPairIdx);
devOutputBinStart[cache[1 + threadIdx.x]] = bcPairIdx;
}
} | 72aa6184629ccbd4165a4523c2c732411216fe31.cu | #include "includes.h"
__global__ void kernelFormBinStart ( int* devOutputBinStart, unsigned int* devInputBinCirPairBin, unsigned int bcPairLen)
{
__shared__ int cache[257]; //256 bcpair + the last bc pair in the previous block
int bcPairIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (bcPairIdx >= bcPairLen)
{
return;
}
cache[1 + threadIdx.x] = devInputBinCirPairBin[bcPairIdx];
if ( threadIdx.x == 0 )
{
if ( bcPairIdx != 0 )
{
cache[0] = devInputBinCirPairBin[bcPairIdx - 1];
}
else
{
cache[0] = -1;
}
}
__syncthreads();
if (cache[1 + threadIdx.x] != cache[threadIdx.x])
{
//printf("b: %d, s: %d\n", cache[1 + threadIdx.x], bcPairIdx);
devOutputBinStart[cache[1 + threadIdx.x]] = bcPairIdx;
}
} |
7ab0638bd752910c4ba9a2eb6ef9d6e4b5e1cfb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
__global__ void loop()
{
printf("This is iteration number %d\n", threadIdx.x);
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, only use 1 block of threads.
*/
int N = 10;
hipLaunchKernelGGL(( loop), dim3(1),dim3(N), 0, 0, );
hipDeviceSynchronize();
}
| 7ab0638bd752910c4ba9a2eb6ef9d6e4b5e1cfb3.cu | #include <stdio.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
__global__ void loop()
{
printf("This is iteration number %d\n", threadIdx.x);
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, only use 1 block of threads.
*/
int N = 10;
loop<<<1,N>>>();
cudaDeviceSynchronize();
}
|
fb893774ac6398c23cfe487273a6c9432b160337.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__);*/
__global__ static void long_vector_add(const float *A, const float *B, float *C, int elements_num)
{
/* gridDim: ,,,
,,.
grid,dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid == 0) {
printf("blockDim.x = %d, gridDim.x = %d\n", blockDim.x, gridDim.x); // support pritnf, but don't support fprintf
}
while (tid < elements_num) {
C[tid] = A[tid] + B[tid];
tid += blockDim.x * gridDim.x;
}
}
int long_vector_add_gpu(const float* A, const float* B, float* C, int elements_num, float* elapsed_time)
{
/* hipEvent_t: CUDA event types,, CUDA,GPU
,CUDAGPU,CUDA
GPU,*/
hipEvent_t start, stop;
// hipEventCreate: ,
hipEventCreate(&start);
hipEventCreate(&stop);
// hipEventRecord: ,,start
hipEventRecord(start, 0);
size_t lengthA{ elements_num * sizeof(float) }, lengthB{ elements_num * sizeof(float) };
size_t lengthC{ elements_num * sizeof(float) };
float *d_A{ nullptr }, *d_B{ nullptr }, *d_C{ nullptr };
// hipMalloc:
hipMalloc(&d_A, lengthA);
hipMalloc(&d_B, lengthB);
hipMalloc(&d_C, lengthC);
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
hipMemcpy(d_A, A, lengthA, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, lengthB, hipMemcpyHostToDevice);
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU,;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nssize_t,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
long_vector_add << < 512, 512 >> >(d_A, d_B, d_C, elements_num);
/* hipDeviceSynchronize: kernel, ,
cudaDeviceSynchronize; ,
,,
,,
,cudaDeviceSynchronize
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
//hipDeviceSynchronize();
hipMemcpy(C, d_C, lengthA, hipMemcpyDeviceToHost);
// hipFree: cudaMalloc
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// hipEventRecord: ,,stop
hipEventRecord(stop, 0);
// hipEventSynchronize: ,,
hipEventSynchronize(stop);
// cudaEventElapseTime: ,,
hipEventElapsedTime(elapsed_time, start, stop);
// hipEventDestroy: ,
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| fb893774ac6398c23cfe487273a6c9432b160337.cu | #include "funset.hpp"
#include <iostream>
#include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义);*/
__global__ static void long_vector_add(const float *A, const float *B, float *C, int elements_num)
{
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
一个grid为三维,为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid == 0) {
printf("blockDim.x = %d, gridDim.x = %d\n", blockDim.x, gridDim.x); // support pritnf, but don't support fprintf
}
while (tid < elements_num) {
C[tid] = A[tid] + B[tid];
tid += blockDim.x * gridDim.x;
}
}
int long_vector_add_gpu(const float* A, const float* B, float* C, int elements_num, float* elapsed_time)
{
/* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某
个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在
GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时*/
cudaEvent_t start, stop;
// cudaEventCreate: 创建一个事件对象,异步启动
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord: 记录一个事件,异步启动,start记录起始时间
cudaEventRecord(start, 0);
size_t lengthA{ elements_num * sizeof(float) }, lengthB{ elements_num * sizeof(float) };
size_t lengthC{ elements_num * sizeof(float) };
float *d_A{ nullptr }, *d_B{ nullptr }, *d_C{ nullptr };
// cudaMalloc: 在设备端分配内存
cudaMalloc(&d_A, lengthA);
cudaMalloc(&d_B, lengthB);
cudaMalloc(&d_C, lengthC);
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
cudaMemcpy(d_A, A, lengthA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, lengthB, cudaMemcpyHostToDevice);
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
long_vector_add << < 512, 512 >> >(d_A, d_B, d_C, elements_num);
/* cudaDeviceSynchronize: kernel的启动是异步的, 为了定位它是否出错, 一
般需要加上cudaDeviceSynchronize函数进行同步; 将会一直处于阻塞状态,直到
前面所有请求的任务已经被全部执行完毕,如果前面执行的某个任务失败,将会
返回一个错误;当程序中有多个流,并且流之间在某一点需要通信时,那就必须
在这一点处加上同步的语句,即cudaDeviceSynchronize;异步启动
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
//cudaDeviceSynchronize();
cudaMemcpy(C, d_C, lengthA, cudaMemcpyDeviceToHost);
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间
cudaEventRecord(stop, 0);
// cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动
cudaEventSynchronize(stop);
// cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动
cudaEventElapsedTime(elapsed_time, start, stop);
// cudaEventDestroy: 销毁事件对象,异步启动
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
c2efb3936d7a71ec8fc673b0c176114d9d098fb2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <omp.h>
#include <math.h>
#include "jacobi_gpu.h"
#define u(i,j) U[(i)*N + (j)]
#define u_old(i,j) U_old[(i)*N + (j)]
#define u_old_e(i,j) U_old_e[(i)*N + (j)]
#define f(i,j) F[(i)*N + (j)]
__global__ void jacobi_1(int N, double *U, double *U_old, int *F, double h, double delta_sq) {
int i,j;
// Update U
for (i=1; i<N-1; i++) {
for (j=1; j<N-1; j++) {
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
}
}
}
__global__ void jacobi_2(int N, double *U, double *U_old, int *F, double h, double delta_sq) {
int i, j;
j = blockIdx.x * blockDim.x + threadIdx.x + 1;
i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(i < N-1 && j < N-1){
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
}
//Swap Pointers
}
__global__ void jacobi_3_0(int N, double *U, double *U_old, double *U_old_e, int *F, double h, double delta_sq) {
int i,j;
j = blockIdx.x * blockDim.x + threadIdx.x + 1;
i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(i < (N/2)-1 && j < N-1){
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
} else if(i == (N/2) && j < N-1){
u(i-1,j) = h * (u_old(i-2,j) + u_old_e(0,j) + u_old(i-1,j-1) + u_old(i-1,j+1) + delta_sq * (double)f(i-1,j));
}
}
__global__ void jacobi_3_1(int N, double *U, double *U_old, double *U_old_e, int *F, double h, double delta_sq) {
int i,j;
j = blockIdx.x * blockDim.x + threadIdx.x + 1;
i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(i < (N/2)-1 && j < N-1){
if(i == 1){
u(i-1,j) = h * (u_old_e((N/2)-1,j) + u_old(i,j) + u_old(i-1,j-1) + u_old(i-1,j+1) + delta_sq * (double)f(i-1,j));
}
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
}
}
/* int i, j, k = 0;
double h = 1.0 / 4.0;
double delta = 2/((double)N - 1.0), delta_sq = delta * delta;
double *tmp;
double * U_gpu, * U_old_gpu, * F_gpu;
#pragma omp parallel firstprivate(k,U,U_old) private(i,j,tmp) \
shared(N, max_it, F, h, delta, delta_sq)
{
// Initialize U and U_old
#pragma omp for
for (i=0; i<N; i++) {
u_old(i,0) = 20.0;
u_old(0,i) = 20.0;
u_old(i,N-1) = 20.0;
u(i,0) = 20.0;
u(0,i) = 20.0;
u(i,N-1) = 20.0;
}
#pragma omp for
for (i=1; i<N; i++) {
for (j=1; j<N-1; j++) {
u_old(i,j) = 0.0;
}
}
} // end omp parallel
while (k < max_it) {
// Update U
for (i=1; i<N-1; i++) {
for (j=1; j<N-1; j++) {
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
}
}
//Swap Pointers
tmp = U;
U = U_old;
U_old = tmp;
k++;
}
} */
| c2efb3936d7a71ec8fc673b0c176114d9d098fb2.cu | #include <stdio.h>
#include <omp.h>
#include <math.h>
#include "jacobi_gpu.h"
#define u(i,j) U[(i)*N + (j)]
#define u_old(i,j) U_old[(i)*N + (j)]
#define u_old_e(i,j) U_old_e[(i)*N + (j)]
#define f(i,j) F[(i)*N + (j)]
__global__ void jacobi_1(int N, double *U, double *U_old, int *F, double h, double delta_sq) {
int i,j;
// Update U
for (i=1; i<N-1; i++) {
for (j=1; j<N-1; j++) {
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
}
}
}
__global__ void jacobi_2(int N, double *U, double *U_old, int *F, double h, double delta_sq) {
int i, j;
j = blockIdx.x * blockDim.x + threadIdx.x + 1;
i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(i < N-1 && j < N-1){
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
}
//Swap Pointers
}
__global__ void jacobi_3_0(int N, double *U, double *U_old, double *U_old_e, int *F, double h, double delta_sq) {
int i,j;
j = blockIdx.x * blockDim.x + threadIdx.x + 1;
i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(i < (N/2)-1 && j < N-1){
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
} else if(i == (N/2) && j < N-1){
u(i-1,j) = h * (u_old(i-2,j) + u_old_e(0,j) + u_old(i-1,j-1) + u_old(i-1,j+1) + delta_sq * (double)f(i-1,j));
}
}
__global__ void jacobi_3_1(int N, double *U, double *U_old, double *U_old_e, int *F, double h, double delta_sq) {
int i,j;
j = blockIdx.x * blockDim.x + threadIdx.x + 1;
i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if(i < (N/2)-1 && j < N-1){
if(i == 1){
u(i-1,j) = h * (u_old_e((N/2)-1,j) + u_old(i,j) + u_old(i-1,j-1) + u_old(i-1,j+1) + delta_sq * (double)f(i-1,j));
}
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
}
}
/* int i, j, k = 0;
double h = 1.0 / 4.0;
double delta = 2/((double)N - 1.0), delta_sq = delta * delta;
double *tmp;
double * U_gpu, * U_old_gpu, * F_gpu;
#pragma omp parallel firstprivate(k,U,U_old) private(i,j,tmp) \
shared(N, max_it, F, h, delta, delta_sq)
{
// Initialize U and U_old
#pragma omp for
for (i=0; i<N; i++) {
u_old(i,0) = 20.0;
u_old(0,i) = 20.0;
u_old(i,N-1) = 20.0;
u(i,0) = 20.0;
u(0,i) = 20.0;
u(i,N-1) = 20.0;
}
#pragma omp for
for (i=1; i<N; i++) {
for (j=1; j<N-1; j++) {
u_old(i,j) = 0.0;
}
}
} // end omp parallel
while (k < max_it) {
// Update U
for (i=1; i<N-1; i++) {
for (j=1; j<N-1; j++) {
u(i,j) = h * (u_old(i-1,j) + u_old(i+1,j) + u_old(i,j-1) + u_old(i,j+1) + delta_sq * (double)f(i,j));
}
}
//Swap Pointers
tmp = U;
U = U_old;
U_old = tmp;
k++;
}
} */
|
82fb95fc67e5350679af791f52d660b8cc6920af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "sru_kernel.h"
typedef float* __restrict__ cu_float;
__forceinline__ __device__ float sigmoidf(float x)
{
return 1.f / (1.f + expf(-x));
};
__forceinline__ __device__ float reluf(float x)
{
return (x > 0.f) ? x : 0.f;
};
__global__ void sru_fwd_kernel(const cu_float u, const cu_float x, const cu_float bias,
cu_float init, const cu_float mask_h, const int len,
int batch, const int d, const int k,
cu_float h, cu_float c, const int activation_type)
{
assert ((k == 3) || (x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
for (int row = 0; row < len; ++row)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u;
xp += ncols_x;
cp += ncols;
hp += ncols;
}
}
__global__ void sru_bwd_kernel(const cu_float u, const cu_float x,
cu_float bias, const cu_float init,
cu_float mask_h, const cu_float c,
cu_float grad_h, const cu_float grad_last,
int len, const int batch, const int d, const int k,
cu_float grad_u, cu_float grad_x, cu_float grad_bias,
cu_float grad_init, int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *xp = (k == 3) ? (x + col + (len-1)*ncols) : (up + 3);
const float *cp = c + col + (len-1)*ncols;
const float *ghp = grad_h + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float *gxp = (k == 3) ? (grad_x + col + (len-1)*ncols) : (gup + 3);
for (int row = len-1; row >= 0; --row)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u;
xp -= ncols_x;
cp -= ncols;
gup -= ncols_u;
gxp -= ncols_x;
ghp -= ncols;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
__global__ void sru_bi_fwd_kernel(const cu_float u, const cu_float x,
cu_float bias, const cu_float init,
cu_float mask_h, const int len,
int batch, const int d, const int k,
cu_float h, cu_float c, const int activation_type)
{
assert ((k == 3) || (x == NULL));
assert ((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const int d2 = d*2;
const bool flip = (col%d2) >= d;
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
if (flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
hp += (len-1)*ncols;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u_;
xp += ncols_x_;
cp += ncols_;
hp += ncols_;
}
}
__global__ void sru_bi_bwd_kernel(const cu_float u, const cu_float x,
cu_float bias, const cu_float init,
cu_float mask_h, const cu_float c,
cu_float grad_h, const cu_float grad_last,
int len, const int batch, const int d,
int k, cu_float grad_u, cu_float grad_x,
cu_float grad_bias, cu_float grad_init, int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
assert((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const int d2 = d*2;
const bool flip = ((col%d2) >= d);
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
const float *cp = c + col;
const float *ghp = grad_h + col;
float *gup = grad_u + (col*k);
float *gxp = (k == 3) ? (grad_x + col) : (gup + 3);
if (!flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
ghp += (len-1)*ncols;
gup += (len-1)*ncols_u;
gxp += (len-1)*ncols_x;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (cnt<len-1) ? (*(cp-ncols_)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u_;
xp -= ncols_x_;
cp -= ncols_;
gup -= ncols_u_;
gxp -= ncols_x_;
ghp -= ncols_;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
void sru_fwd_cu(float* u, float* x, float* bias,
float* init, float* mask_h, const int len,
int batch, const int d, const int k,
float* h, float* c, const int activation_type, hipStream_t stream)
{
int n_cols = batch * d;
int threads_per_block = min(512, n_cols);
int blocks_per_grid = (n_cols - 1) / threads_per_block + 1;
hipLaunchKernelGGL(( sru_fwd_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, stream,
u, x, bias, init, mask_h, len, batch, d, k, h, c, activation_type);
if (hipSuccess != hipGetLastError())
{
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(hipGetLastError()));
exit(-1);
}
}
void sru_bwd_cu(float* u, float* x,
float* bias, float* init,
float* mask_h, float* c,
float* grad_h, float* grad_last,
int len, const int batch, const int d, const int k,
float* grad_u, float* grad_x, float* grad_bias,
float* grad_init, int activation_type, hipStream_t stream)
{
int n_cols = batch * d;
int threads_per_block = min(512, n_cols);
int blocks_per_grid = (n_cols - 1) / threads_per_block + 1;
hipLaunchKernelGGL(( sru_bwd_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, stream, u, x, bias, init, mask_h, c,
grad_h, grad_last, len, batch, d, k, grad_u, grad_x, grad_bias, grad_init, activation_type);
if (hipSuccess != hipGetLastError())
{
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(hipGetLastError()));
exit(-1);
}
}
void sru_bi_fwd_cu(float* u, float* x,
float* bias, float* init,
float* mask_h, const int len,
int batch, const int d, const int k,
float* h, float* c, const int activation_type, hipStream_t stream)
{
int n_cols = batch * d * 2;
int threads_per_block = min(512, n_cols);
int blocks_per_grid = (n_cols - 1) / threads_per_block + 1;
hipLaunchKernelGGL(( sru_bi_fwd_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, stream, u, x, bias,
init, mask_h, len, batch, d, k, h, c, activation_type);
if (hipSuccess != hipGetLastError())
{
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(hipGetLastError()));
exit(-1);
}
}
void sru_bi_bwd_cu(float* u, float* x,
float* bias, float* init,
float* mask_h, float* c,
float* grad_h, float* grad_last,
int len, const int batch, const int d,
int k, float* grad_u, float* grad_x,
float* grad_bias, float* grad_init, int activation_type, hipStream_t stream)
{
int n_cols = batch * d * 2;
int threads_per_block = min(512, n_cols);
int blocks_per_grid = (n_cols - 1) / threads_per_block + 1;
hipLaunchKernelGGL(( sru_bi_bwd_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, stream, u, x, bias, init, mask_h, c,
grad_h, grad_last, len, batch, d, k, grad_u, grad_x, grad_bias, grad_init, activation_type);
if (hipSuccess != hipGetLastError())
{
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(hipGetLastError()));
exit(-1);
}
}
| 82fb95fc67e5350679af791f52d660b8cc6920af.cu | #include <assert.h>
#include <stdio.h>
#include "sru_kernel.h"
typedef float* __restrict__ cu_float;
__forceinline__ __device__ float sigmoidf(float x)
{
return 1.f / (1.f + expf(-x));
};
__forceinline__ __device__ float reluf(float x)
{
return (x > 0.f) ? x : 0.f;
};
__global__ void sru_fwd_kernel(const cu_float u, const cu_float x, const cu_float bias,
cu_float init, const cu_float mask_h, const int len,
int batch, const int d, const int k,
cu_float h, cu_float c, const int activation_type)
{
assert ((k == 3) || (x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
for (int row = 0; row < len; ++row)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u;
xp += ncols_x;
cp += ncols;
hp += ncols;
}
}
__global__ void sru_bwd_kernel(const cu_float u, const cu_float x,
cu_float bias, const cu_float init,
cu_float mask_h, const cu_float c,
cu_float grad_h, const cu_float grad_last,
int len, const int batch, const int d, const int k,
cu_float grad_u, cu_float grad_x, cu_float grad_bias,
cu_float grad_init, int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *xp = (k == 3) ? (x + col + (len-1)*ncols) : (up + 3);
const float *cp = c + col + (len-1)*ncols;
const float *ghp = grad_h + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float *gxp = (k == 3) ? (grad_x + col + (len-1)*ncols) : (gup + 3);
for (int row = len-1; row >= 0; --row)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u;
xp -= ncols_x;
cp -= ncols;
gup -= ncols_u;
gxp -= ncols_x;
ghp -= ncols;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
__global__ void sru_bi_fwd_kernel(const cu_float u, const cu_float x,
cu_float bias, const cu_float init,
cu_float mask_h, const int len,
int batch, const int d, const int k,
cu_float h, cu_float c, const int activation_type)
{
assert ((k == 3) || (x == NULL));
assert ((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const int d2 = d*2;
const bool flip = (col%d2) >= d;
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
if (flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
hp += (len-1)*ncols;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u_;
xp += ncols_x_;
cp += ncols_;
hp += ncols_;
}
}
__global__ void sru_bi_bwd_kernel(const cu_float u, const cu_float x,
cu_float bias, const cu_float init,
cu_float mask_h, const cu_float c,
cu_float grad_h, const cu_float grad_last,
int len, const int batch, const int d,
int k, cu_float grad_u, cu_float grad_x,
cu_float grad_bias, cu_float grad_init, int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
assert((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const int d2 = d*2;
const bool flip = ((col%d2) >= d);
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
const float *cp = c + col;
const float *ghp = grad_h + col;
float *gup = grad_u + (col*k);
float *gxp = (k == 3) ? (grad_x + col) : (gup + 3);
if (!flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
ghp += (len-1)*ncols;
gup += (len-1)*ncols_u;
gxp += (len-1)*ncols_x;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (cnt<len-1) ? (*(cp-ncols_)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u_;
xp -= ncols_x_;
cp -= ncols_;
gup -= ncols_u_;
gxp -= ncols_x_;
ghp -= ncols_;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
void sru_fwd_cu(float* u, float* x, float* bias,
float* init, float* mask_h, const int len,
int batch, const int d, const int k,
float* h, float* c, const int activation_type, cudaStream_t stream)
{
int n_cols = batch * d;
int threads_per_block = min(512, n_cols);
int blocks_per_grid = (n_cols - 1) / threads_per_block + 1;
sru_fwd_kernel<<<blocks_per_grid, threads_per_block, 0, stream>>>(
u, x, bias, init, mask_h, len, batch, d, k, h, c, activation_type);
if (cudaSuccess != cudaGetLastError())
{
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
}
void sru_bwd_cu(float* u, float* x,
float* bias, float* init,
float* mask_h, float* c,
float* grad_h, float* grad_last,
int len, const int batch, const int d, const int k,
float* grad_u, float* grad_x, float* grad_bias,
float* grad_init, int activation_type, cudaStream_t stream)
{
int n_cols = batch * d;
int threads_per_block = min(512, n_cols);
int blocks_per_grid = (n_cols - 1) / threads_per_block + 1;
sru_bwd_kernel<<<blocks_per_grid, threads_per_block, 0, stream>>>(u, x, bias, init, mask_h, c,
grad_h, grad_last, len, batch, d, k, grad_u, grad_x, grad_bias, grad_init, activation_type);
if (cudaSuccess != cudaGetLastError())
{
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
}
void sru_bi_fwd_cu(float* u, float* x,
float* bias, float* init,
float* mask_h, const int len,
int batch, const int d, const int k,
float* h, float* c, const int activation_type, cudaStream_t stream)
{
int n_cols = batch * d * 2;
int threads_per_block = min(512, n_cols);
int blocks_per_grid = (n_cols - 1) / threads_per_block + 1;
sru_bi_fwd_kernel<<<blocks_per_grid, threads_per_block, 0, stream>>>(u, x, bias,
init, mask_h, len, batch, d, k, h, c, activation_type);
if (cudaSuccess != cudaGetLastError())
{
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
}
void sru_bi_bwd_cu(float* u, float* x,
float* bias, float* init,
float* mask_h, float* c,
float* grad_h, float* grad_last,
int len, const int batch, const int d,
int k, float* grad_u, float* grad_x,
float* grad_bias, float* grad_init, int activation_type, cudaStream_t stream)
{
int n_cols = batch * d * 2;
int threads_per_block = min(512, n_cols);
int blocks_per_grid = (n_cols - 1) / threads_per_block + 1;
sru_bi_bwd_kernel<<<blocks_per_grid, threads_per_block, 0, stream>>>(u, x, bias, init, mask_h, c,
grad_h, grad_last, len, batch, d, k, grad_u, grad_x, grad_bias, grad_init, activation_type);
if (cudaSuccess != cudaGetLastError())
{
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
}
|
8eae302df648c6b8afd74ef48d346a4975244971.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__device__ int compute_layer(float tx, float ty, float radius) {
int x = int(std::abs(tx) / radius + 0.5);
int y = int(std::abs(ty) / radius + 0.5);
if (x == 1 && y == 1)
return 2;
int c = 0;
if (x + y < 2)
c = x + y;
else
c = x + y + 1;
if (c > 5)
c = 5;
return c;
}
__global__ void five_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, float* out, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
const float* points_array = points + (b * num_points * num_featdim);
float* out_array = out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
float* out_temp = out_array + layer * num_featdim;
const float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] += point_temp[j] / layers_counts[layer];
}
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
float* out_temp = out_array + i * num_featdim;
float* out_front = out_array + front * num_featdim;
float* out_rear = out_array + rear * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] = out_front[j] * weight_front + out_rear[j] * weight_rear;
}
}
}
}
__global__ void fivegrad_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
float* points_array = grad_points + (b * num_points * num_featdim);
const float* out_array = grad_out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
float weights_front[6] = {0, 0, 0, 0, 0, 0};
float weights_rear[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
weights_front[i] = weight_front;
weights_rear[i] = weight_rear;
}
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
const float* out_temp = out_array + layer * num_featdim;
float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
float signal = out_temp[j];
int l = layer - 1;
const float* out_temp_step = out_temp - num_featdim;
while (l >= 0 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_rear[l];
out_temp_step -= num_featdim;
l -= 1;
}
l = layer + 1;
out_temp_step = out_temp + num_featdim;
while (l < 6 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_front[l];
out_temp_step += num_featdim;
l += 1;
}
atomicAdd(&point_temp[j], signal / layers_counts[layer]);
}
}
}
void fivekernel_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, float* out, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
hipLaunchKernelGGL(( five_kernel), dim3(dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat)), dim3(dim3(256, 1, 1)), 0, 0, batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, out, radius);
}
void fivekernelgrad_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
hipLaunchKernelGGL(( fivegrad_kernel), dim3(dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat)), dim3(dim3(256, 1, 1)), 0, 0,
batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, grad_out, grad_points, radius);
} | 8eae302df648c6b8afd74ef48d346a4975244971.cu | #include <iostream>
__device__ int compute_layer(float tx, float ty, float radius) {
int x = int(std::abs(tx) / radius + 0.5);
int y = int(std::abs(ty) / radius + 0.5);
if (x == 1 && y == 1)
return 2;
int c = 0;
if (x + y < 2)
c = x + y;
else
c = x + y + 1;
if (c > 5)
c = 5;
return c;
}
__global__ void five_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, float* out, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
const float* points_array = points + (b * num_points * num_featdim);
float* out_array = out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
float* out_temp = out_array + layer * num_featdim;
const float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] += point_temp[j] / layers_counts[layer];
}
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
float* out_temp = out_array + i * num_featdim;
float* out_front = out_array + front * num_featdim;
float* out_rear = out_array + rear * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] = out_front[j] * weight_front + out_rear[j] * weight_rear;
}
}
}
}
__global__ void fivegrad_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
float* points_array = grad_points + (b * num_points * num_featdim);
const float* out_array = grad_out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
float weights_front[6] = {0, 0, 0, 0, 0, 0};
float weights_rear[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
weights_front[i] = weight_front;
weights_rear[i] = weight_rear;
}
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
const float* out_temp = out_array + layer * num_featdim;
float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
float signal = out_temp[j];
int l = layer - 1;
const float* out_temp_step = out_temp - num_featdim;
while (l >= 0 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_rear[l];
out_temp_step -= num_featdim;
l -= 1;
}
l = layer + 1;
out_temp_step = out_temp + num_featdim;
while (l < 6 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_front[l];
out_temp_step += num_featdim;
l += 1;
}
atomicAdd(&point_temp[j], signal / layers_counts[layer]);
}
}
}
void fivekernel_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, float* out, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
five_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, out, radius);
}
void fivekernelgrad_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
fivegrad_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(
batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, grad_out, grad_points, radius);
} |
070140c53b19e84d62ded635b150bea92cfbc3e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <c10/util/Logging.h>
#include <c10/util/bit_cast.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPGraphsUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/native/hip/PersistentSoftmax.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <c10/util/Optional.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/scalar_tensor.h>
#endif
#include <c10/hip/HIPMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/hip/sdp_utils.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#ifdef USE_FLASH_ATTENTION
// FlashAttention Specific Imports
#include <ATen/native/transformers/hip/flash_attn/flash_api.h>
#endif
#ifdef USE_MEM_EFF_ATTENTION
// MemoryEfficient Attention Specific Imports
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h>
#include <ATen/native/transformers/hip/mem_eff_attention/kernels/cutlassF.h>
#include <ATen/native/transformers/hip/mem_eff_attention/pytorch_utils.h>
#endif
namespace at {
namespace native {
namespace {
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_sizes().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = ::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_sizes());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_HIP_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 1-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_sizes().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask, 0.0, false};
auto backend = select_sdp_backend(kernel_params);
// strides from packed projection for nested tensors when seq_len is 1 will be
// and will trigger a contiguous call in the kernel, so we prevent this
bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, false) : true;
// The API for transfomer_encoder is a mask of shape (Batch_Size, Seq_len_q)
// For mem-eff attention this will cause the expand call to error
// For now I am going to turn of that path not have to deal with all the annoying
// Mask type shape grossness
if (!mask.has_value() && no_seq_len_1_nested &&
(backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention)) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, false, c10::nullopt);
auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor, Tensor, Tensor> _scaled_dot_product_flash_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention");
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key (Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
auto
[output,
logsumexp,
philox_seed,
philox_offset,
debug_attn_mask] =
at::_flash_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
c10::nullopt,
dropout_p,
is_causal,
return_debug_mask,
scale);
// Reshape output to convert nnz to batch_size and seq_len
Tensor attention = output.transpose(1,2);
return std::make_tuple(attention, logsumexp, Tensor(), Tensor(), max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask);
}
std::tuple<Tensor, Tensor, Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const c10::optional<at::Tensor>& attn_bias,
bool compute_log_sumexp,
double dropout_p,
bool is_causal,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
sdp::CustomMaskType custom_mask_type = is_causal
? sdp::CustomMaskType::CausalFromTopLeft
: sdp::CustomMaskType::NoCustomMask;
auto [attention, log_sumexp, seed, offset] = at::_efficient_attention_forward(
q_t,
k_t,
v_t,
attn_bias,
c10::nullopt,
c10::nullopt,
c10::nullopt,
dropout_p,
static_cast<int64_t>(custom_mask_type),
compute_log_sumexp,
scale);
attention = attention.transpose(1, 2);
return std::make_tuple(std::move(attention), std::move(log_sumexp), std::move(seed), std::move(offset));
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_, dropout_p, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {
TORCH_CHECK(
false,
"No viable backend for scaled_dot_product_attention was found. ",
"This is likely due to turning off both the math kernel and the fused kernels.");
}
return static_cast<int64_t>(backend);
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor>
_flash_attention_forward(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const c10::optional<Tensor>& cumulative_sequence_length_q,
const c10::optional<Tensor>& cumulative_sequence_length_k,
c10::optional<int64_t> max_seqlen_batch_q,
c10::optional<int64_t> max_seqlen_batch_k,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
#if defined(USE_FLASH_ATTENTION)
const auto softmax_scale =
sdp::calculate_scale(query, scale).as_float_unchecked();
c10::optional<Tensor> out = c10::nullopt;
// We are going to have two paths:
// 1. The standard MHA path for dense tensors
// 2. The Varseqlen path
TORCH_CHECK(
cumulative_sequence_length_q.has_value() ==
cumulative_sequence_length_k.has_value(),
"cumulative_sequence_length_q and cumulative_sequence_length_k must be both set or both not set");
TORCH_CHECK(
max_seqlen_batch_q.has_value() == max_seqlen_batch_k.has_value(),
"max_seqlen_batch_q and max_seqlen_batch_k must be both set or both not set");
Tensor output, q_padded, k_padded, v_padded, logsumexp, output_shape,
philox_seed, philox_offset, debug_attn_mask;
if (cumulative_sequence_length_q.has_value()) {
TORCH_CHECK(
max_seqlen_batch_q.has_value(),
"max_seqlen_batch_q must be set when cumulative_sequence_length_q is set");
std::tie(
output,
q_padded,
k_padded,
v_padded,
logsumexp,
philox_seed,
philox_offset,
debug_attn_mask) =
pytorch_flash::mha_varlen_fwd(
query,
key,
value,
out,
cumulative_sequence_length_q.value(),
cumulative_sequence_length_k.value(),
max_seqlen_batch_q.value(),
max_seqlen_batch_k.value(),
dropout_p,
softmax_scale,
false /*zero_tensors*/,
is_causal,
return_debug_mask,
c10::nullopt /*gen_*/);
} else {
std::tie(
output,
q_padded,
k_padded,
v_padded,
logsumexp,
philox_seed,
philox_offset,
debug_attn_mask) =
pytorch_flash::mha_fwd(
query,
key,
value,
out,
dropout_p,
softmax_scale,
is_causal,
return_debug_mask, /*return_softmax (this is used for testing)*/
c10::nullopt);
}
debug_attn_mask =
return_debug_mask ? debug_attn_mask : at::empty({0}, query.options());
return std::make_tuple(
output,
logsumexp,
philox_seed,
philox_offset,
debug_attn_mask);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(
Tensor(),
Tensor(),
Tensor(),
Tensor(),
Tensor());
}
std::tuple<at::Tensor, at::Tensor, Tensor, Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
const c10::optional<at::Tensor>& bias, // [b, num_heads, seqlen, seqlen]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& seqstart_q,
// (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& seqstart_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
double dropout_p, // attention matrix dropout probability
int64_t custom_mask_type,
bool compute_logsumexp,
c10::optional<double> scale,
const c10::optional<at::Tensor>& causal_diagonal,
const c10::optional<at::Tensor>& seqlen_k) {
#if defined(USE_MEM_EFF_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
// TODO_DRISS we should return max_seqlen_k;
int64_t max_seqlen_q, max_seqlen_k;
TORCH_CHECK(seqstart_q.has_value() == seqstart_k.has_value());
if (seqstart_q.has_value()) {
TORCH_CHECK(seqstart_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_q->dim() == 1 && seqstart_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_k));
TORCH_CHECK(seqstart_q->size(0) == seqstart_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
at::Tensor seed_t, offset_t;
const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO;
// Note [Seed and Offset Device]
// If we are currently in graph capture mode, we need to create the seed and offset tensors on the device.
// This is necessary for CUDA graph-safe random number generation, which requires the seed and offset tensors
// to be single element tensors on device. During graph capture, when the seed and offset tensors are passed
// the pointers act as scratch space for storing the RNG state for the backwards pass.
// When calling backwards, we either construct a PhiloxState with the pointers or the actual values.
// For more information on CUDA graph-safe RNG states, see Note [CUDA Graph-safe RNG states].
at::PhiloxCudaState philox_state;
const bool in_capture_stream =
at::cuda::currentStreamCaptureStatus() != at::cuda::CaptureStatus::None;
auto device = in_capture_stream ? at::kCUDA : at::kCPU;
if (use_dropout) {
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator());
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// if using dropout, we produce 1 random number for each element of the
// attention tensor
philox_state = gen->philox_cuda_state(B * num_heads * M * N);
if (in_capture_stream) {
// The seed and offset will be populated by the kernel
seed_t = at::empty({}, at::dtype(at::kLong).device(device));
offset_t = at::empty({}, at::dtype(at::kLong).device(device));
} else {
auto [seed, offset] = at::cuda::philox::unpack(philox_state);
seed_t = at::scalar_tensor(
at::Scalar(static_cast<int64_t>(seed)), at::dtype(at::kLong));
offset_t = at::scalar_tensor(
at::Scalar(static_cast<int64_t>(offset)), at::dtype(at::kLong));
}
} else {
// Not using dropout
seed_t = at::empty({}, at::dtype(at::kLong).device(device));
offset_t = at::empty({}, at::dtype(at::kLong).device(device));
}
hipDeviceProp_t* p = at::cuda::getDeviceProperties(query.device().index());
const int computeCapability = p->major * 10 + p->minor;
bool kernel_launched = false;
const auto maxShmem = p->sharedMemPerBlockOptin;
auto launchKernel = [&](auto _k, auto kernel_fn) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
if (kernel_launched) {
return;
}
// Check if this kernel is compatible
if (!Kernel::kSupportsDropout && use_dropout) {
return;
}
if (!Kernel::kSupportsBias && bias.has_value()) {
return;
}
if (value.size(3) > Kernel::kMaxK || key.size(3) > Kernel::kMaxK) {
return;
}
// Alignment
if ((query.stride(2) % Kernel::kAlignmentQ) ||
(key.stride(2) % Kernel::kAlignmentK) ||
(value.stride(2) % Kernel::kAlignmentV)) {
return;
}
// Uses too much shmem
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > maxShmem) {
return;
}
kernel_launched = true;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<
typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (seqstart_q.has_value()) {
p.seqstart_q_ptr = (int32_t*)seqstart_q->data_ptr();
p.seqstart_k_ptr = (int32_t*)seqstart_k->data_ptr();
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B;
p.custom_mask_type = custom_mask_type;
p.causal_diagonal_ptr = nullptr;
if (causal_diagonal.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(causal_diagonal.value());
TORCH_CHECK(causal_diagonal->scalar_type() == at::ScalarType::Int);
p.causal_diagonal_ptr = (int32_t*)causal_diagonal->data_ptr();
}
p.seqlen_k_ptr = nullptr;
if (seqlen_k.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(seqlen_k.value());
TORCH_CHECK(seqlen_k->scalar_type() == at::ScalarType::Int);
p.seqlen_k_ptr = (int32_t*)seqlen_k->data_ptr();
}
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
ASSIGN_CHECK_OVERFLOW(p.o_strideM, res.stride(1));
if (bias.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias));
TORCH_CHECK(
bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(),
"invalid dtype for bias - should match query's dtype");
p.attn_bias_ptr = (scalar_t*)bias->data_ptr();
// assign strides for bias, viewed as
// (batch_sz, n_heads, n_queries, n_keys)
// We make sure to expand prior to calling the kernel
const at::Tensor& bias_4d_view = *bias;
TORCH_CHECK(bias_4d_view.dim()==4);
TORCH_CHECK(bias_4d_view.size(0)==B);
TORCH_CHECK(bias_4d_view.size(1)==num_heads);
TORCH_CHECK(bias_4d_view.size(2)==M);
TORCH_CHECK(bias_4d_view.size(3)==N);
ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2));
}
p.use_dropout = use_dropout;
if (p.use_dropout) {
p.rng_engine_inputs = philox_state;
p.dropout_prob = dropout_p;
p.seed = seed_t.data_ptr<int64_t>();
p.extragraph_offset = offset_t.data_ptr<int64_t>();
}
if (smem_bytes > 0xc000) {
auto err = hipFuncSetAttribute(
kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
TORCH_CHECK(
err != hipErrorInvalidValue,
"This GPU does not have enough shared-memory (kernel requires ",
smem_bytes / 1024,
" kb)");
AT_CUDA_CHECK(err);
}
Kernel::check_supported(p);
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p);
};
// Dispatch to the right kernel
DISPATCH_TYPES(query, ([&]() {
dispatch_cutlassF<scalar_t>(launchKernel, computeCapability);
}));
TORCH_CHECK(kernel_launched, "cutlassF: no kernel found to launch!");
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(
std::move(res),
std::move(logsumexp),
std::move(seed_t),
std::move(offset_t));
#endif
TORCH_CHECK(false, "USE_MEM_EFF_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda);
#ifdef USE_MEM_EFF_ATTENTION
namespace {
/**
* simple kernel that populates a tensor with rand uniform values.
* currently only used for testing purposes, not much attention
* is paid to performance.
*
* problem is partitioned as follows:
* - (batch, head) is given by block coordinates
* - each thread handles a row for a given (batch, head)
*/
template <typename mask_t>
__global__ void rand_uniform_kernel(
int64_t n_heads,
int64_t n_queries,
int64_t n_keys,
float dropout_prob,
at::PhiloxCudaState rng_engine_inputs,
mask_t* mask_out,
int64_t mask_numel) {
const int64_t batch_id = blockIdx.x;
const int64_t head_id = blockIdx.y;
const int64_t query_idx = threadIdx.x;
const auto seeds = at::cuda::philox::unpack(rng_engine_inputs);
const int dropout_seq_start = batch_id * (n_heads * n_queries * n_keys) +
head_id * (n_queries * n_keys);
const int64_t query_start_idx = query_idx * n_keys;
hiprandStatePhilox4_32_10_t curand_state;
hiprand_init(
std::get<0>(seeds),
0,
std::get<1>(seeds) + dropout_seq_start + query_start_idx,
&curand_state);
for (int key_start_idx = 0; key_start_idx < n_keys; key_start_idx += 4) {
float4 rand_quad = hiprand_uniform4(&curand_state);
#pragma unroll
for (int i = 0; i < 4; ++i) {
const int64_t linear_idx = dropout_seq_start + query_start_idx + key_start_idx + i;
if (linear_idx < mask_numel) {
mask_out[linear_idx] = (&rand_quad.x)[i];
}
}
}
}
} // namespace
#endif
/**
* fill tensor with random uniform values. only used for testing, not much
* attention is paid to performance
*/
at::Tensor& _fill_mem_eff_dropout_mask_(
Tensor& self,
double dropout_p,
const int64_t seed,
const int64_t offset) {
TORCH_CHECK(self.is_contiguous());
TORCH_CHECK(self.dtype() == at::ScalarType::Float);
const int64_t batch_sz = self.size(0);
const int64_t n_heads = self.size(1);
const int64_t n_queries = self.size(2);
const int64_t n_keys = self.size(3);
#if defined(USE_MEM_EFF_ATTENTION)
at::PhiloxCudaState rng_engine_inputs;
rng_engine_inputs = at::PhiloxCudaState(seed, offset);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(self.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( rand_uniform_kernel<float>), dim3(dim3(batch_sz, n_heads)), dim3(n_queries), 0, stream,
n_heads,
n_queries,
n_keys,
dropout_p,
rng_engine_inputs,
reinterpret_cast<float*>(self.data_ptr()),
self.numel());
return self;
#endif
TORCH_CHECK(false, "USE_MEM_EFF_ATTENTION was not enabled for build.")
return self;
}
} // namespace native
} // namespace at
| 070140c53b19e84d62ded635b150bea92cfbc3e3.cu | #include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <c10/util/Logging.h>
#include <c10/util/bit_cast.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/native/cuda/PersistentSoftmax.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <c10/util/Optional.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/scalar_tensor.h>
#endif
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/cuda/sdp_utils.h>
#include <ATen/native/transformers/sdp_utils_cpp.h>
#ifdef USE_FLASH_ATTENTION
// FlashAttention Specific Imports
#include <ATen/native/transformers/cuda/flash_attn/flash_api.h>
#endif
#ifdef USE_MEM_EFF_ATTENTION
// MemoryEfficient Attention Specific Imports
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/kernels/cutlassF.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/pytorch_utils.h>
#endif
namespace at {
namespace native {
namespace {
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_sizes().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = std::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_sizes());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 1-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_sizes().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask, 0.0, false};
auto backend = select_sdp_backend(kernel_params);
// strides from packed projection for nested tensors when seq_len is 1 will be
// and will trigger a contiguous call in the kernel, so we prevent this
bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, false) : true;
// The API for transfomer_encoder is a mask of shape (Batch_Size, Seq_len_q)
// For mem-eff attention this will cause the expand call to error
// For now I am going to turn of that path not have to deal with all the annoying
// Mask type shape grossness
if (!mask.has_value() && no_seq_len_1_nested &&
(backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention)) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, false, c10::nullopt);
auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor, Tensor, Tensor> _scaled_dot_product_flash_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention");
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key (Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
auto
[output,
logsumexp,
philox_seed,
philox_offset,
debug_attn_mask] =
at::_flash_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
c10::nullopt,
dropout_p,
is_causal,
return_debug_mask,
scale);
// Reshape output to convert nnz to batch_size and seq_len
Tensor attention = output.transpose(1,2);
return std::make_tuple(attention, logsumexp, Tensor(), Tensor(), max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask);
}
std::tuple<Tensor, Tensor, Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const c10::optional<at::Tensor>& attn_bias,
bool compute_log_sumexp,
double dropout_p,
bool is_causal,
c10::optional<double> scale) {
// Used for tracking usage statistics
C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
sdp::CustomMaskType custom_mask_type = is_causal
? sdp::CustomMaskType::CausalFromTopLeft
: sdp::CustomMaskType::NoCustomMask;
auto [attention, log_sumexp, seed, offset] = at::_efficient_attention_forward(
q_t,
k_t,
v_t,
attn_bias,
c10::nullopt,
c10::nullopt,
c10::nullopt,
dropout_p,
static_cast<int64_t>(custom_mask_type),
compute_log_sumexp,
scale);
attention = attention.transpose(1, 2);
return std::make_tuple(std::move(attention), std::move(log_sumexp), std::move(seed), std::move(offset));
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_, dropout_p, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {
TORCH_CHECK(
false,
"No viable backend for scaled_dot_product_attention was found. ",
"This is likely due to turning off both the math kernel and the fused kernels.");
}
return static_cast<int64_t>(backend);
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor>
_flash_attention_forward(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const c10::optional<Tensor>& cumulative_sequence_length_q,
const c10::optional<Tensor>& cumulative_sequence_length_k,
c10::optional<int64_t> max_seqlen_batch_q,
c10::optional<int64_t> max_seqlen_batch_k,
double dropout_p,
bool is_causal,
bool return_debug_mask,
c10::optional<double> scale) {
#if defined(USE_FLASH_ATTENTION)
const auto softmax_scale =
sdp::calculate_scale(query, scale).as_float_unchecked();
c10::optional<Tensor> out = c10::nullopt;
// We are going to have two paths:
// 1. The standard MHA path for dense tensors
// 2. The Varseqlen path
TORCH_CHECK(
cumulative_sequence_length_q.has_value() ==
cumulative_sequence_length_k.has_value(),
"cumulative_sequence_length_q and cumulative_sequence_length_k must be both set or both not set");
TORCH_CHECK(
max_seqlen_batch_q.has_value() == max_seqlen_batch_k.has_value(),
"max_seqlen_batch_q and max_seqlen_batch_k must be both set or both not set");
Tensor output, q_padded, k_padded, v_padded, logsumexp, output_shape,
philox_seed, philox_offset, debug_attn_mask;
if (cumulative_sequence_length_q.has_value()) {
TORCH_CHECK(
max_seqlen_batch_q.has_value(),
"max_seqlen_batch_q must be set when cumulative_sequence_length_q is set");
std::tie(
output,
q_padded,
k_padded,
v_padded,
logsumexp,
philox_seed,
philox_offset,
debug_attn_mask) =
pytorch_flash::mha_varlen_fwd(
query,
key,
value,
out,
cumulative_sequence_length_q.value(),
cumulative_sequence_length_k.value(),
max_seqlen_batch_q.value(),
max_seqlen_batch_k.value(),
dropout_p,
softmax_scale,
false /*zero_tensors*/,
is_causal,
return_debug_mask,
c10::nullopt /*gen_*/);
} else {
std::tie(
output,
q_padded,
k_padded,
v_padded,
logsumexp,
philox_seed,
philox_offset,
debug_attn_mask) =
pytorch_flash::mha_fwd(
query,
key,
value,
out,
dropout_p,
softmax_scale,
is_causal,
return_debug_mask, /*return_softmax (this is used for testing)*/
c10::nullopt);
}
debug_attn_mask =
return_debug_mask ? debug_attn_mask : at::empty({0}, query.options());
return std::make_tuple(
output,
logsumexp,
philox_seed,
philox_offset,
debug_attn_mask);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(
Tensor(),
Tensor(),
Tensor(),
Tensor(),
Tensor());
}
std::tuple<at::Tensor, at::Tensor, Tensor, Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
const c10::optional<at::Tensor>& bias, // [b, num_heads, seqlen, seqlen]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& seqstart_q,
// (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& seqstart_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
double dropout_p, // attention matrix dropout probability
int64_t custom_mask_type,
bool compute_logsumexp,
c10::optional<double> scale,
const c10::optional<at::Tensor>& causal_diagonal,
const c10::optional<at::Tensor>& seqlen_k) {
#if defined(USE_MEM_EFF_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
// TODO_DRISS we should return max_seqlen_k;
int64_t max_seqlen_q, max_seqlen_k;
TORCH_CHECK(seqstart_q.has_value() == seqstart_k.has_value());
if (seqstart_q.has_value()) {
TORCH_CHECK(seqstart_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(seqstart_q->dim() == 1 && seqstart_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_k));
TORCH_CHECK(seqstart_q->size(0) == seqstart_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::cuda::CUDAGuard device_guard(query.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
at::Tensor seed_t, offset_t;
const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO;
// Note [Seed and Offset Device]
// If we are currently in graph capture mode, we need to create the seed and offset tensors on the device.
// This is necessary for CUDA graph-safe random number generation, which requires the seed and offset tensors
// to be single element tensors on device. During graph capture, when the seed and offset tensors are passed
// the pointers act as scratch space for storing the RNG state for the backwards pass.
// When calling backwards, we either construct a PhiloxState with the pointers or the actual values.
// For more information on CUDA graph-safe RNG states, see Note [CUDA Graph-safe RNG states].
at::PhiloxCudaState philox_state;
const bool in_capture_stream =
at::cuda::currentStreamCaptureStatus() != at::cuda::CaptureStatus::None;
auto device = in_capture_stream ? at::kCUDA : at::kCPU;
if (use_dropout) {
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator());
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// if using dropout, we produce 1 random number for each element of the
// attention tensor
philox_state = gen->philox_cuda_state(B * num_heads * M * N);
if (in_capture_stream) {
// The seed and offset will be populated by the kernel
seed_t = at::empty({}, at::dtype(at::kLong).device(device));
offset_t = at::empty({}, at::dtype(at::kLong).device(device));
} else {
auto [seed, offset] = at::cuda::philox::unpack(philox_state);
seed_t = at::scalar_tensor(
at::Scalar(static_cast<int64_t>(seed)), at::dtype(at::kLong));
offset_t = at::scalar_tensor(
at::Scalar(static_cast<int64_t>(offset)), at::dtype(at::kLong));
}
} else {
// Not using dropout
seed_t = at::empty({}, at::dtype(at::kLong).device(device));
offset_t = at::empty({}, at::dtype(at::kLong).device(device));
}
cudaDeviceProp* p = at::cuda::getDeviceProperties(query.device().index());
const int computeCapability = p->major * 10 + p->minor;
bool kernel_launched = false;
const auto maxShmem = p->sharedMemPerBlockOptin;
auto launchKernel = [&](auto _k, auto kernel_fn) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
if (kernel_launched) {
return;
}
// Check if this kernel is compatible
if (!Kernel::kSupportsDropout && use_dropout) {
return;
}
if (!Kernel::kSupportsBias && bias.has_value()) {
return;
}
if (value.size(3) > Kernel::kMaxK || key.size(3) > Kernel::kMaxK) {
return;
}
// Alignment
if ((query.stride(2) % Kernel::kAlignmentQ) ||
(key.stride(2) % Kernel::kAlignmentK) ||
(value.stride(2) % Kernel::kAlignmentV)) {
return;
}
// Uses too much shmem
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > maxShmem) {
return;
}
kernel_launched = true;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
CutlassToAtenDtype<
typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (seqstart_q.has_value()) {
p.seqstart_q_ptr = (int32_t*)seqstart_q->data_ptr();
p.seqstart_k_ptr = (int32_t*)seqstart_k->data_ptr();
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B;
p.custom_mask_type = custom_mask_type;
p.causal_diagonal_ptr = nullptr;
if (causal_diagonal.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(causal_diagonal.value());
TORCH_CHECK(causal_diagonal->scalar_type() == at::ScalarType::Int);
p.causal_diagonal_ptr = (int32_t*)causal_diagonal->data_ptr();
}
p.seqlen_k_ptr = nullptr;
if (seqlen_k.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(seqlen_k.value());
TORCH_CHECK(seqlen_k->scalar_type() == at::ScalarType::Int);
p.seqlen_k_ptr = (int32_t*)seqlen_k->data_ptr();
}
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
ASSIGN_CHECK_OVERFLOW(p.o_strideM, res.stride(1));
if (bias.has_value()) {
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias));
TORCH_CHECK(
bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(),
"invalid dtype for bias - should match query's dtype");
p.attn_bias_ptr = (scalar_t*)bias->data_ptr();
// assign strides for bias, viewed as
// (batch_sz, n_heads, n_queries, n_keys)
// We make sure to expand prior to calling the kernel
const at::Tensor& bias_4d_view = *bias;
TORCH_CHECK(bias_4d_view.dim()==4);
TORCH_CHECK(bias_4d_view.size(0)==B);
TORCH_CHECK(bias_4d_view.size(1)==num_heads);
TORCH_CHECK(bias_4d_view.size(2)==M);
TORCH_CHECK(bias_4d_view.size(3)==N);
ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0));
ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1));
ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2));
}
p.use_dropout = use_dropout;
if (p.use_dropout) {
p.rng_engine_inputs = philox_state;
p.dropout_prob = dropout_p;
p.seed = seed_t.data_ptr<int64_t>();
p.extragraph_offset = offset_t.data_ptr<int64_t>();
}
if (smem_bytes > 0xc000) {
auto err = cudaFuncSetAttribute(
kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
TORCH_CHECK(
err != cudaErrorInvalidValue,
"This GPU does not have enough shared-memory (kernel requires ",
smem_bytes / 1024,
" kb)");
AT_CUDA_CHECK(err);
}
Kernel::check_supported(p);
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p);
};
// Dispatch to the right kernel
DISPATCH_TYPES(query, ([&]() {
dispatch_cutlassF<scalar_t>(launchKernel, computeCapability);
}));
TORCH_CHECK(kernel_launched, "cutlassF: no kernel found to launch!");
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(
std::move(res),
std::move(logsumexp),
std::move(seed_t),
std::move(offset_t));
#endif
TORCH_CHECK(false, "USE_MEM_EFF_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda);
#ifdef USE_MEM_EFF_ATTENTION
namespace {
/**
* simple kernel that populates a tensor with rand uniform values.
* currently only used for testing purposes, not much attention
* is paid to performance.
*
* problem is partitioned as follows:
* - (batch, head) is given by block coordinates
* - each thread handles a row for a given (batch, head)
*/
template <typename mask_t>
__global__ void rand_uniform_kernel(
int64_t n_heads,
int64_t n_queries,
int64_t n_keys,
float dropout_prob,
at::PhiloxCudaState rng_engine_inputs,
mask_t* mask_out,
int64_t mask_numel) {
const int64_t batch_id = blockIdx.x;
const int64_t head_id = blockIdx.y;
const int64_t query_idx = threadIdx.x;
const auto seeds = at::cuda::philox::unpack(rng_engine_inputs);
const int dropout_seq_start = batch_id * (n_heads * n_queries * n_keys) +
head_id * (n_queries * n_keys);
const int64_t query_start_idx = query_idx * n_keys;
curandStatePhilox4_32_10_t curand_state;
curand_init(
std::get<0>(seeds),
0,
std::get<1>(seeds) + dropout_seq_start + query_start_idx,
&curand_state);
for (int key_start_idx = 0; key_start_idx < n_keys; key_start_idx += 4) {
float4 rand_quad = curand_uniform4(&curand_state);
#pragma unroll
for (int i = 0; i < 4; ++i) {
const int64_t linear_idx = dropout_seq_start + query_start_idx + key_start_idx + i;
if (linear_idx < mask_numel) {
mask_out[linear_idx] = (&rand_quad.x)[i];
}
}
}
}
} // namespace
#endif
/**
* fill tensor with random uniform values. only used for testing, not much
* attention is paid to performance
*/
at::Tensor& _fill_mem_eff_dropout_mask_(
Tensor& self,
double dropout_p,
const int64_t seed,
const int64_t offset) {
TORCH_CHECK(self.is_contiguous());
TORCH_CHECK(self.dtype() == at::ScalarType::Float);
const int64_t batch_sz = self.size(0);
const int64_t n_heads = self.size(1);
const int64_t n_queries = self.size(2);
const int64_t n_keys = self.size(3);
#if defined(USE_MEM_EFF_ATTENTION)
at::PhiloxCudaState rng_engine_inputs;
rng_engine_inputs = at::PhiloxCudaState(seed, offset);
at::cuda::CUDAGuard device_guard(self.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
rand_uniform_kernel<float><<<dim3(batch_sz, n_heads), n_queries, 0, stream>>>(
n_heads,
n_queries,
n_keys,
dropout_p,
rng_engine_inputs,
reinterpret_cast<float*>(self.data_ptr()),
self.numel());
return self;
#endif
TORCH_CHECK(false, "USE_MEM_EFF_ATTENTION was not enabled for build.")
return self;
}
} // namespace native
} // namespace at
|
1abe4adbfd4e615348ae194c5944cb48574f4ce5.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <set>
#include <string>
#include <unordered_map>
#include "open3d/core/CUDAState.cuh"
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/MemoryManager.h"
namespace open3d {
namespace core {
struct Block;
// This is a simplified reimplementation of PyTorch's CUDA memory manager.
// Refrence: https://git.io/JUqUA
// We need raw pointers (not smart ptrs) for exact comparison and reference
typedef Block* BlockPtr;
struct Block {
int device_; // gpu id
size_t size_; // block size in bytes
void* ptr_; // memory address
BlockPtr prev_;
BlockPtr next_;
bool in_use_;
Block(int device,
size_t size,
void* ptr = nullptr,
BlockPtr prev = nullptr,
BlockPtr next = nullptr)
: device_(device),
size_(size),
ptr_(ptr),
prev_(prev),
next_(next),
in_use_(false) {}
};
struct BlockComparator {
bool operator()(const BlockPtr& a, const BlockPtr& b) const {
// Not on the same device: treat as smaller, will be filtered in
// lower_bound operation.
if (a->device_ != b->device_) {
return true;
}
if (a->size_ != b->size_) {
return a->size_ < b->size_;
}
return (size_t)a->ptr_ < (size_t)b->ptr_;
}
};
// Singleton cacher.
// To improve performance, the cacher will not release cuda memory when Free is
// called. Instead, it will cache these memory in trees' nodes, and reuse them
// in following Malloc calls via size queries. Each node can be split to smaller
// nodes upon malloc requests, and merged when they are all freed.
// To clear the cache, use cuda::ReleaseCache().
class CUDACacher {
public:
static std::shared_ptr<CUDACacher> GetInstance() {
if (instance_ == nullptr) {
instance_ = std::make_shared<CUDACacher>();
}
return instance_;
}
public:
typedef std::set<BlockPtr, BlockComparator> BlockPool;
inline std::shared_ptr<BlockPool>& get_pool(size_t byte_size) {
// largest "small" allocation is 1 MiB (1024 * 1024)
constexpr size_t kSmallSize = 1048576;
return byte_size <= kSmallSize ? small_block_pool_ : large_block_pool_;
}
inline size_t align_bytes(size_t byte_size, size_t alignment = 8) {
return ((byte_size + alignment - 1) / alignment) * alignment;
}
CUDACacher() {
small_block_pool_ = std::make_shared<BlockPool>();
large_block_pool_ = std::make_shared<BlockPool>();
}
~CUDACacher() {
if (!allocated_blocks_.empty()) {
// Should never reach here
utility::LogError("[CUDACacher] Memory leak in destructor.");
}
ReleaseCache();
}
void* Malloc(size_t byte_size, const Device& device) {
auto find_free_block = [&](BlockPtr query_block) -> BlockPtr {
auto pool = get_pool(query_block->size_);
auto it = pool->lower_bound(query_block);
if (it != pool->end()) {
BlockPtr block = *it;
pool->erase(it);
return block;
}
return nullptr;
};
void* ptr;
size_t alloc_size = align_bytes(byte_size);
Block query_block = Block(device.GetID(), alloc_size);
BlockPtr found_block = find_free_block(&query_block);
if (found_block == nullptr) {
// Allocate a new block and insert it to the allocated pool
OPEN3D_CUDA_CHECK(hipMalloc(&ptr, alloc_size));
BlockPtr new_block = new Block(device.GetID(), alloc_size, ptr);
new_block->in_use_ = true;
allocated_blocks_.insert({ptr, new_block});
} else {
ptr = found_block->ptr_;
size_t remain_size = found_block->size_ - alloc_size;
if (remain_size > 0) {
// Split block
// found_block <-> remain_block <-> found_block->next_
BlockPtr next_block = found_block->next_;
BlockPtr remain_block =
new Block(device.GetID(), remain_size,
static_cast<char*>(ptr) + alloc_size,
found_block, next_block);
found_block->next_ = remain_block;
if (next_block) {
next_block->prev_ = remain_block;
}
// Place the remain block to cache pool
get_pool(remain_size)->emplace(remain_block);
}
found_block->size_ = alloc_size;
found_block->in_use_ = true;
allocated_blocks_.insert({ptr, found_block});
}
return ptr;
}
void Free(void* ptr, const Device& device) {
auto release_block = [&](BlockPtr block) {
auto block_pool = get_pool(block->size_);
auto it = block_pool->find(block);
if (it == block_pool->end()) {
// Should never reach here
utility::LogError(
"[CUDACacher] Linked list node {} not found in pool.",
fmt::ptr(block));
}
block_pool->erase(it);
delete block;
};
auto it = allocated_blocks_.find(ptr);
if (it == allocated_blocks_.end()) {
// Should never reach here
utility::LogError("[CUDACacher] Block should have been recorded.");
} else {
// Release memory and check if merge is required
BlockPtr block = it->second;
allocated_blocks_.erase(it);
// Merge free blocks towards 'next' direction
BlockPtr block_it = block;
while (block_it != nullptr && block_it->next_ != nullptr) {
BlockPtr next_block = block_it->next_;
if (next_block->prev_ != block_it) {
// Should never reach here
utility::LogError(
"[CUDACacher] Linked list nodes mismatch in "
"forward-direction merge.");
}
if (next_block->in_use_) {
break;
}
// Merge
block_it->next_ = next_block->next_;
if (block_it->next_) {
block_it->next_->prev_ = block_it;
}
block_it->size_ += next_block->size_;
release_block(next_block);
block_it = block_it->next_;
}
// Merge free blocks towards 'prev' direction
block_it = block;
while (block_it != nullptr && block_it->prev_ != nullptr) {
BlockPtr prev_block = block_it->prev_;
if (prev_block->next_ != block_it) {
// Should never reach here.
utility::LogError(
"[CUDACacher]: linked list nodes mismatch in "
"backward-direction merge.");
}
if (prev_block->in_use_) {
break;
}
// Merge
block_it->prev_ = prev_block->prev_;
if (block_it->prev_) {
block_it->prev_->next_ = block_it;
}
block_it->size_ += prev_block->size_;
block_it->ptr_ = prev_block->ptr_;
release_block(prev_block);
block_it = block_it->prev_;
}
block->in_use_ = false;
get_pool(block->size_)->emplace(block);
}
}
void ReleaseCache() {
size_t total_bytes = 0;
// Reference:
// https://stackoverflow.com/questions/2874441/deleting-elements-from-stdset-while-iterating
auto release_pool = [&](std::set<BlockPtr, BlockComparator>& pool) {
auto it = pool.begin();
auto end = pool.end();
while (it != end) {
BlockPtr block = *it;
if (block->prev_ == nullptr && block->next_ == nullptr) {
OPEN3D_CUDA_CHECK(hipFree(block->ptr_));
total_bytes += block->size_;
delete block;
it = pool.erase(it);
} else {
++it;
}
}
};
release_pool(*small_block_pool_);
release_pool(*large_block_pool_);
utility::LogDebug("[CUDACacher] {} bytes released.", total_bytes);
}
private:
std::unordered_map<void*, BlockPtr> allocated_blocks_;
std::shared_ptr<BlockPool> small_block_pool_;
std::shared_ptr<BlockPool> large_block_pool_;
static std::shared_ptr<CUDACacher> instance_;
};
// Create instance on intialization to avoid 'cuda error driver shutdown'
std::shared_ptr<CUDACacher> CUDACacher::instance_ = CUDACacher::GetInstance();
CUDACachedMemoryManager::CUDACachedMemoryManager() {}
void* CUDACachedMemoryManager::Malloc(size_t byte_size, const Device& device) {
if (byte_size == 0) return nullptr;
CUDADeviceSwitcher switcher(device);
if (device.GetType() == Device::DeviceType::CUDA) {
std::shared_ptr<CUDACacher> instance = CUDACacher::GetInstance();
return instance->Malloc(byte_size, device);
} else {
utility::LogError(
"[CUDACachedMemoryManager] Malloc: Unimplemented device.");
return nullptr;
}
// Should never reach here
return nullptr;
}
void CUDACachedMemoryManager::Free(void* ptr, const Device& device) {
if (ptr == nullptr) return;
CUDADeviceSwitcher switcher(device);
if (device.GetType() == Device::DeviceType::CUDA) {
if (ptr && IsCUDAPointer(ptr)) {
std::shared_ptr<CUDACacher> instance = CUDACacher::GetInstance();
instance->Free(ptr, device);
} else {
utility::LogError(
"[CUDACachedMemoryManager] Free: Invalid pointer.");
}
} else {
utility::LogError(
"[CUDACachedMemoryManager] Free: Unimplemented device.");
}
}
void CUDACachedMemoryManager::Memcpy(void* dst_ptr,
const Device& dst_device,
const void* src_ptr,
const Device& src_device,
size_t num_bytes) {
if (dst_device.GetType() == Device::DeviceType::CUDA &&
src_device.GetType() == Device::DeviceType::CPU) {
CUDADeviceSwitcher switcher(dst_device);
if (!IsCUDAPointer(dst_ptr)) {
utility::LogError("dst_ptr is not a CUDA pointer.");
}
OPEN3D_CUDA_CHECK(hipMemcpy(dst_ptr, src_ptr, num_bytes,
hipMemcpyHostToDevice));
} else if (dst_device.GetType() == Device::DeviceType::CPU &&
src_device.GetType() == Device::DeviceType::CUDA) {
CUDADeviceSwitcher switcher(src_device);
if (!IsCUDAPointer(src_ptr)) {
utility::LogError("src_ptr is not a CUDA pointer.");
}
OPEN3D_CUDA_CHECK(hipMemcpy(dst_ptr, src_ptr, num_bytes,
hipMemcpyDeviceToHost));
} else if (dst_device.GetType() == Device::DeviceType::CUDA &&
src_device.GetType() == Device::DeviceType::CUDA) {
CUDADeviceSwitcher switcher(dst_device);
if (!IsCUDAPointer(dst_ptr)) {
utility::LogError("dst_ptr is not a CUDA pointer.");
}
switcher.SwitchTo(src_device);
if (!IsCUDAPointer(src_ptr)) {
utility::LogError("src_ptr is not a CUDA pointer.");
}
if (dst_device == src_device) {
switcher.SwitchTo(src_device);
OPEN3D_CUDA_CHECK(hipMemcpy(dst_ptr, src_ptr, num_bytes,
hipMemcpyDeviceToDevice));
} else if (CUDAState::GetInstance()->IsP2PEnabled(src_device.GetID(),
dst_device.GetID())) {
OPEN3D_CUDA_CHECK(hipMemcpyPeer(dst_ptr, dst_device.GetID(),
src_ptr, src_device.GetID(),
num_bytes));
} else {
void* cpu_buf = MemoryManager::Malloc(num_bytes, Device("CPU:0"));
switcher.SwitchTo(src_device);
OPEN3D_CUDA_CHECK(hipMemcpy(cpu_buf, src_ptr, num_bytes,
hipMemcpyDeviceToHost));
switcher.SwitchTo(dst_device);
OPEN3D_CUDA_CHECK(hipMemcpy(dst_ptr, cpu_buf, num_bytes,
hipMemcpyHostToDevice));
MemoryManager::Free(cpu_buf, Device("CPU:0"));
}
} else {
utility::LogError("Wrong hipMemcpyKind.");
}
}
bool CUDACachedMemoryManager::IsCUDAPointer(const void* ptr) {
hipPointerAttribute_t attributes;
hipPointerGetAttributes(&attributes, ptr);
if (attributes.devicePointer != nullptr) {
return true;
}
return false;
}
void CUDACachedMemoryManager::ReleaseCache() {
std::shared_ptr<CUDACacher> instance = CUDACacher::GetInstance();
instance->ReleaseCache();
}
} // namespace core
} // namespace open3d
| 1abe4adbfd4e615348ae194c5944cb48574f4ce5.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <cuda.h>
#include <cuda_runtime.h>
#include <set>
#include <string>
#include <unordered_map>
#include "open3d/core/CUDAState.cuh"
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/MemoryManager.h"
namespace open3d {
namespace core {
struct Block;
// This is a simplified reimplementation of PyTorch's CUDA memory manager.
// Refrence: https://git.io/JUqUA
// We need raw pointers (not smart ptrs) for exact comparison and reference
typedef Block* BlockPtr;
struct Block {
int device_; // gpu id
size_t size_; // block size in bytes
void* ptr_; // memory address
BlockPtr prev_;
BlockPtr next_;
bool in_use_;
Block(int device,
size_t size,
void* ptr = nullptr,
BlockPtr prev = nullptr,
BlockPtr next = nullptr)
: device_(device),
size_(size),
ptr_(ptr),
prev_(prev),
next_(next),
in_use_(false) {}
};
struct BlockComparator {
bool operator()(const BlockPtr& a, const BlockPtr& b) const {
// Not on the same device: treat as smaller, will be filtered in
// lower_bound operation.
if (a->device_ != b->device_) {
return true;
}
if (a->size_ != b->size_) {
return a->size_ < b->size_;
}
return (size_t)a->ptr_ < (size_t)b->ptr_;
}
};
// Singleton cacher.
// To improve performance, the cacher will not release cuda memory when Free is
// called. Instead, it will cache these memory in trees' nodes, and reuse them
// in following Malloc calls via size queries. Each node can be split to smaller
// nodes upon malloc requests, and merged when they are all freed.
// To clear the cache, use cuda::ReleaseCache().
class CUDACacher {
public:
static std::shared_ptr<CUDACacher> GetInstance() {
if (instance_ == nullptr) {
instance_ = std::make_shared<CUDACacher>();
}
return instance_;
}
public:
typedef std::set<BlockPtr, BlockComparator> BlockPool;
inline std::shared_ptr<BlockPool>& get_pool(size_t byte_size) {
// largest "small" allocation is 1 MiB (1024 * 1024)
constexpr size_t kSmallSize = 1048576;
return byte_size <= kSmallSize ? small_block_pool_ : large_block_pool_;
}
inline size_t align_bytes(size_t byte_size, size_t alignment = 8) {
return ((byte_size + alignment - 1) / alignment) * alignment;
}
CUDACacher() {
small_block_pool_ = std::make_shared<BlockPool>();
large_block_pool_ = std::make_shared<BlockPool>();
}
~CUDACacher() {
if (!allocated_blocks_.empty()) {
// Should never reach here
utility::LogError("[CUDACacher] Memory leak in destructor.");
}
ReleaseCache();
}
void* Malloc(size_t byte_size, const Device& device) {
auto find_free_block = [&](BlockPtr query_block) -> BlockPtr {
auto pool = get_pool(query_block->size_);
auto it = pool->lower_bound(query_block);
if (it != pool->end()) {
BlockPtr block = *it;
pool->erase(it);
return block;
}
return nullptr;
};
void* ptr;
size_t alloc_size = align_bytes(byte_size);
Block query_block = Block(device.GetID(), alloc_size);
BlockPtr found_block = find_free_block(&query_block);
if (found_block == nullptr) {
// Allocate a new block and insert it to the allocated pool
OPEN3D_CUDA_CHECK(cudaMalloc(&ptr, alloc_size));
BlockPtr new_block = new Block(device.GetID(), alloc_size, ptr);
new_block->in_use_ = true;
allocated_blocks_.insert({ptr, new_block});
} else {
ptr = found_block->ptr_;
size_t remain_size = found_block->size_ - alloc_size;
if (remain_size > 0) {
// Split block
// found_block <-> remain_block <-> found_block->next_
BlockPtr next_block = found_block->next_;
BlockPtr remain_block =
new Block(device.GetID(), remain_size,
static_cast<char*>(ptr) + alloc_size,
found_block, next_block);
found_block->next_ = remain_block;
if (next_block) {
next_block->prev_ = remain_block;
}
// Place the remain block to cache pool
get_pool(remain_size)->emplace(remain_block);
}
found_block->size_ = alloc_size;
found_block->in_use_ = true;
allocated_blocks_.insert({ptr, found_block});
}
return ptr;
}
void Free(void* ptr, const Device& device) {
auto release_block = [&](BlockPtr block) {
auto block_pool = get_pool(block->size_);
auto it = block_pool->find(block);
if (it == block_pool->end()) {
// Should never reach here
utility::LogError(
"[CUDACacher] Linked list node {} not found in pool.",
fmt::ptr(block));
}
block_pool->erase(it);
delete block;
};
auto it = allocated_blocks_.find(ptr);
if (it == allocated_blocks_.end()) {
// Should never reach here
utility::LogError("[CUDACacher] Block should have been recorded.");
} else {
// Release memory and check if merge is required
BlockPtr block = it->second;
allocated_blocks_.erase(it);
// Merge free blocks towards 'next' direction
BlockPtr block_it = block;
while (block_it != nullptr && block_it->next_ != nullptr) {
BlockPtr next_block = block_it->next_;
if (next_block->prev_ != block_it) {
// Should never reach here
utility::LogError(
"[CUDACacher] Linked list nodes mismatch in "
"forward-direction merge.");
}
if (next_block->in_use_) {
break;
}
// Merge
block_it->next_ = next_block->next_;
if (block_it->next_) {
block_it->next_->prev_ = block_it;
}
block_it->size_ += next_block->size_;
release_block(next_block);
block_it = block_it->next_;
}
// Merge free blocks towards 'prev' direction
block_it = block;
while (block_it != nullptr && block_it->prev_ != nullptr) {
BlockPtr prev_block = block_it->prev_;
if (prev_block->next_ != block_it) {
// Should never reach here.
utility::LogError(
"[CUDACacher]: linked list nodes mismatch in "
"backward-direction merge.");
}
if (prev_block->in_use_) {
break;
}
// Merge
block_it->prev_ = prev_block->prev_;
if (block_it->prev_) {
block_it->prev_->next_ = block_it;
}
block_it->size_ += prev_block->size_;
block_it->ptr_ = prev_block->ptr_;
release_block(prev_block);
block_it = block_it->prev_;
}
block->in_use_ = false;
get_pool(block->size_)->emplace(block);
}
}
void ReleaseCache() {
size_t total_bytes = 0;
// Reference:
// https://stackoverflow.com/questions/2874441/deleting-elements-from-stdset-while-iterating
auto release_pool = [&](std::set<BlockPtr, BlockComparator>& pool) {
auto it = pool.begin();
auto end = pool.end();
while (it != end) {
BlockPtr block = *it;
if (block->prev_ == nullptr && block->next_ == nullptr) {
OPEN3D_CUDA_CHECK(cudaFree(block->ptr_));
total_bytes += block->size_;
delete block;
it = pool.erase(it);
} else {
++it;
}
}
};
release_pool(*small_block_pool_);
release_pool(*large_block_pool_);
utility::LogDebug("[CUDACacher] {} bytes released.", total_bytes);
}
private:
std::unordered_map<void*, BlockPtr> allocated_blocks_;
std::shared_ptr<BlockPool> small_block_pool_;
std::shared_ptr<BlockPool> large_block_pool_;
static std::shared_ptr<CUDACacher> instance_;
};
// Create instance on intialization to avoid 'cuda error driver shutdown'
std::shared_ptr<CUDACacher> CUDACacher::instance_ = CUDACacher::GetInstance();
CUDACachedMemoryManager::CUDACachedMemoryManager() {}
void* CUDACachedMemoryManager::Malloc(size_t byte_size, const Device& device) {
if (byte_size == 0) return nullptr;
CUDADeviceSwitcher switcher(device);
if (device.GetType() == Device::DeviceType::CUDA) {
std::shared_ptr<CUDACacher> instance = CUDACacher::GetInstance();
return instance->Malloc(byte_size, device);
} else {
utility::LogError(
"[CUDACachedMemoryManager] Malloc: Unimplemented device.");
return nullptr;
}
// Should never reach here
return nullptr;
}
void CUDACachedMemoryManager::Free(void* ptr, const Device& device) {
if (ptr == nullptr) return;
CUDADeviceSwitcher switcher(device);
if (device.GetType() == Device::DeviceType::CUDA) {
if (ptr && IsCUDAPointer(ptr)) {
std::shared_ptr<CUDACacher> instance = CUDACacher::GetInstance();
instance->Free(ptr, device);
} else {
utility::LogError(
"[CUDACachedMemoryManager] Free: Invalid pointer.");
}
} else {
utility::LogError(
"[CUDACachedMemoryManager] Free: Unimplemented device.");
}
}
void CUDACachedMemoryManager::Memcpy(void* dst_ptr,
const Device& dst_device,
const void* src_ptr,
const Device& src_device,
size_t num_bytes) {
if (dst_device.GetType() == Device::DeviceType::CUDA &&
src_device.GetType() == Device::DeviceType::CPU) {
CUDADeviceSwitcher switcher(dst_device);
if (!IsCUDAPointer(dst_ptr)) {
utility::LogError("dst_ptr is not a CUDA pointer.");
}
OPEN3D_CUDA_CHECK(cudaMemcpy(dst_ptr, src_ptr, num_bytes,
cudaMemcpyHostToDevice));
} else if (dst_device.GetType() == Device::DeviceType::CPU &&
src_device.GetType() == Device::DeviceType::CUDA) {
CUDADeviceSwitcher switcher(src_device);
if (!IsCUDAPointer(src_ptr)) {
utility::LogError("src_ptr is not a CUDA pointer.");
}
OPEN3D_CUDA_CHECK(cudaMemcpy(dst_ptr, src_ptr, num_bytes,
cudaMemcpyDeviceToHost));
} else if (dst_device.GetType() == Device::DeviceType::CUDA &&
src_device.GetType() == Device::DeviceType::CUDA) {
CUDADeviceSwitcher switcher(dst_device);
if (!IsCUDAPointer(dst_ptr)) {
utility::LogError("dst_ptr is not a CUDA pointer.");
}
switcher.SwitchTo(src_device);
if (!IsCUDAPointer(src_ptr)) {
utility::LogError("src_ptr is not a CUDA pointer.");
}
if (dst_device == src_device) {
switcher.SwitchTo(src_device);
OPEN3D_CUDA_CHECK(cudaMemcpy(dst_ptr, src_ptr, num_bytes,
cudaMemcpyDeviceToDevice));
} else if (CUDAState::GetInstance()->IsP2PEnabled(src_device.GetID(),
dst_device.GetID())) {
OPEN3D_CUDA_CHECK(cudaMemcpyPeer(dst_ptr, dst_device.GetID(),
src_ptr, src_device.GetID(),
num_bytes));
} else {
void* cpu_buf = MemoryManager::Malloc(num_bytes, Device("CPU:0"));
switcher.SwitchTo(src_device);
OPEN3D_CUDA_CHECK(cudaMemcpy(cpu_buf, src_ptr, num_bytes,
cudaMemcpyDeviceToHost));
switcher.SwitchTo(dst_device);
OPEN3D_CUDA_CHECK(cudaMemcpy(dst_ptr, cpu_buf, num_bytes,
cudaMemcpyHostToDevice));
MemoryManager::Free(cpu_buf, Device("CPU:0"));
}
} else {
utility::LogError("Wrong cudaMemcpyKind.");
}
}
bool CUDACachedMemoryManager::IsCUDAPointer(const void* ptr) {
cudaPointerAttributes attributes;
cudaPointerGetAttributes(&attributes, ptr);
if (attributes.devicePointer != nullptr) {
return true;
}
return false;
}
void CUDACachedMemoryManager::ReleaseCache() {
std::shared_ptr<CUDACacher> instance = CUDACacher::GetInstance();
instance->ReleaseCache();
}
} // namespace core
} // namespace open3d
|
a13446c04c445fa2db5e255a12d84cb9fd5189ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// GPU version of Matrix addition
#include <stdio.h>
#include <math.h>
const int N = 1024;
const int blocksize = (N>32)?32:N; //MAX threads per block = 1024 Sqrt(1024)=32
__global__
void add_matrix(float *a, float *b, float *c, int N, int gridsize)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int id = idy*gridsize*blockDim.x + idx;
if(id < N*N){
c[id] = a[id] + b[id];
}
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
float *a_g;
float *b_g;
float *c_g;
float t; //excution time in ms
int gridsize;
gridsize =(int)ceil((double)N/blocksize); //handle for the situation that N%blocksize != 0
size_t size = N*N*sizeof(float);
hipEvent_t start, end;
/*
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("Name: %s\n",prop.name);
printf("MAX Threads per block: %d\n", prop.maxThreadsPerBlock);
printf("MAX Grid: [%d %d %d]\n", prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
printf("MAX shared Mem per block: %lu\n", prop.sharedMemPerBlock);
//more property can be find : https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g1bf9d625a931d657e08db2b4391170f0
*/
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
hipEventRecord(end, 0);
dim3 dimBlock(blocksize, blocksize);
dim3 dimGrid(gridsize, gridsize);
hipMalloc((void**)&a_g, size);
hipMalloc((void**)&b_g, size);
hipMalloc((void**)&c_g, size);
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
}
hipMemcpy(a_g, a, size, hipMemcpyHostToDevice);
hipMemcpy(b_g, b, size, hipMemcpyHostToDevice);
hipEventSynchronize(start);
hipLaunchKernelGGL(( add_matrix), dim3(dimGrid), dim3(dimBlock), 0, 0, a_g, b_g, c_g, N, gridsize);
hipDeviceSynchronize();
hipEventSynchronize(end);
hipEventElapsedTime(&t, start, end);
hipMemcpy(c, c_g, size, hipMemcpyDeviceToHost);
int i;
FILE *f = fopen("gpu.txt", "wb");
for (i = 0; i < N*N; i++) {
fprintf(f, "%f\n", c[i]);
}
fclose(f);
/*
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}*/
delete[] a;
delete[] b;
delete[] c;
hipFree(a_g);
hipFree(b_g);
hipFree(c_g);
printf("Cost %0.8f miliseconds\n", t);
return EXIT_SUCCESS;
}
| a13446c04c445fa2db5e255a12d84cb9fd5189ff.cu | // GPU version of Matrix addition
#include <stdio.h>
#include <math.h>
const int N = 1024;
const int blocksize = (N>32)?32:N; //MAX threads per block = 1024 Sqrt(1024)=32
__global__
void add_matrix(float *a, float *b, float *c, int N, int gridsize)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int id = idy*gridsize*blockDim.x + idx;
if(id < N*N){
c[id] = a[id] + b[id];
}
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
float *a_g;
float *b_g;
float *c_g;
float t; //excution time in ms
int gridsize;
gridsize =(int)ceil((double)N/blocksize); //handle for the situation that N%blocksize != 0
size_t size = N*N*sizeof(float);
cudaEvent_t start, end;
/*
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("Name: %s\n",prop.name);
printf("MAX Threads per block: %d\n", prop.maxThreadsPerBlock);
printf("MAX Grid: [%d %d %d]\n", prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
printf("MAX shared Mem per block: %lu\n", prop.sharedMemPerBlock);
//more property can be find : https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g1bf9d625a931d657e08db2b4391170f0
*/
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
cudaEventRecord(end, 0);
dim3 dimBlock(blocksize, blocksize);
dim3 dimGrid(gridsize, gridsize);
cudaMalloc((void**)&a_g, size);
cudaMalloc((void**)&b_g, size);
cudaMalloc((void**)&c_g, size);
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
}
cudaMemcpy(a_g, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_g, b, size, cudaMemcpyHostToDevice);
cudaEventSynchronize(start);
add_matrix<<<dimGrid, dimBlock>>>(a_g, b_g, c_g, N, gridsize);
cudaDeviceSynchronize();
cudaEventSynchronize(end);
cudaEventElapsedTime(&t, start, end);
cudaMemcpy(c, c_g, size, cudaMemcpyDeviceToHost);
int i;
FILE *f = fopen("gpu.txt", "wb");
for (i = 0; i < N*N; i++) {
fprintf(f, "%f\n", c[i]);
}
fclose(f);
/*
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}*/
delete[] a;
delete[] b;
delete[] c;
cudaFree(a_g);
cudaFree(b_g);
cudaFree(c_g);
printf("Cost %0.8f miliseconds\n", t);
return EXIT_SUCCESS;
}
|
88543d317b9084df71ab30784041d4e33787db81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
namespace at {
namespace cuda {
#define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100
#define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000
#define FOR_KERNEL_LOOP(i, lim) \
for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \
i += gridDim.x * blockDim.x)
/*
Memory types used for the 3 histogram implementations.
See `CUDA_tensor_histogram` below.
*/
enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL };
namespace {
template<typename input_t, typename IndexType>
__device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int64_t nbins) {
IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue));
// (only applicable for histc)
// while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end)
// the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists
// therefore when bin == nbins, adjust bin to the last bin
if (bin == nbins) bin -= 1;
return bin;
}
}
/*
Kernel for computing the histogram of the input.
*/
template <
typename output_t,
typename input_t,
typename IndexType,
int ADims,
int PDims,
int BDims,
CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK,
typename Op>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void kernelHistogram1D(
detail::TensorInfo<output_t, IndexType> a, /* output */
detail::TensorInfo<output_t, IndexType> p, /* partial output */
detail::TensorInfo<input_t, IndexType> b, /* input */
int64_t nbins,
input_t minvalue,
input_t maxvalue,
IndexType totalElements,
Op getOp) {
extern __shared__ unsigned char my_smem[];
output_t* smem = nullptr;
if (MemoryType == CUDAHistogramMemoryType::SHARED) {
////////////////////////// Shared memory //////////////////////////
// atomically add to block specific shared memory
// then atomically add to the global output tensor
smem = reinterpret_cast<output_t*>(my_smem);
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
smem[i] = 0;
}
__syncthreads();
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `smem`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
gpuAtomicAddNoReturn(&smem[bin], getOp(linearIndex));
}
}
__syncthreads();
// NOTE: atomically update output bin count.
// Atomic update is imp since __syncthread() will only synchronize threads
// in a given block, not across blocks.
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a);
gpuAtomicAddNoReturn(&a.data[aOffset], smem[i]);
}
} else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) {
////////////////////////// Multi Block memory //////////////////////////
// atomically add to block specific global tensor
// then atomically add to the global output tensor
// compute histogram for the block
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `p`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
const IndexType pIdx = p.strides[0] * blockIdx.x + bin;
const IndexType pOffset =
detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p);
gpuAtomicAddNoReturn(&p.data[pOffset], getOp(linearIndex));
}
}
__syncthreads();
// NOTE: atomically update output bin count.
// Atomic update is imp since __syncthread() will only synchronize threads
// in a given block, not across blocks.
const IndexType pIdx = p.strides[0] * blockIdx.x;
const IndexType pOffset =
detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p);
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a);
gpuAtomicAddNoReturn(&a.data[aOffset], p.data[pOffset + i]);
}
} else {
////////////////////////// Global memory //////////////////////////
// atomically add to the output tensor
// compute histogram for the block
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `a`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a);
gpuAtomicAddNoReturn(&a.data[aOffset], getOp(linearIndex));
}
}
}
}
#define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \
hipLaunchKernelGGL(( kernelHistogram1D<output_t, input_t, IndexType, 1, 2, -1, MEMORY_TYPE>) \
, dim3(grid), \
block, \
SHARED_MEM, \
getCurrentHIPStreamMasqueradingAsCUDA(), \
aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define HANDLE_SWITCH_CASE(mType, getOp) \
switch (mType) { \
case CUDAHistogramMemoryType::SHARED: \
HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \
break; \
case CUDAHistogramMemoryType::MULTI_BLOCK: \
HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \
break; \
default: \
HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \
}
inline int64_t getFreeGlobalMemory() {
// no need to use `hipSetDevice`
size_t free_mem, total_mem;
hipMemGetInfo(&free_mem, &total_mem);
TORCH_INTERNAL_ASSERT(
hipGetLastError() == hipSuccess,
"CUDA_tensor_histogram failed to get free global memory");
return static_cast<int64_t>(free_mem);
}
/*
Calculate the frequency of the input values.
`a` contains the final output or the histogram.
Input `b` is assumed to be 1-D non-negative int array.
`c` optionally contains the weight vector.
See `help torch.bincount` for details on the math.
3 implementations based of input size and memory usage:
case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem
SHARED: Each block atomically adds to it's own **shared** hist copy,
then atomically updates the global tensor.
case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem
MULTI_BLOCK: Each block atomically adds to it's own **global** hist
copy, then atomically updates the global tensor.
case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins
GLOBAL: all threads atomically update to a single **global** hist copy.
*/
template <typename output_t, typename input_t, bool HasWeights>
bool CUDA_tensor_histogram(
at::Tensor a, /* output */
at::Tensor b, /* input */
at::Tensor c, /* weights(optional) */
int64_t nbins,
input_t minvalue,
input_t maxvalue,
TensorArgType aType = TensorArgType::ReadWrite,
TensorArgType bType = TensorArgType::ReadOnly,
TensorArgType cType = TensorArgType::ReadOnly) {
checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA);
if (HasWeights) {
checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA);
}
auto totalElements = b.numel();
if (totalElements == 0) {
return false;
}
const dim3 block = getApplyBlock();
dim3 grid;
int64_t curDevice = current_device();
if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) {
return false;
}
CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL;
auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock;
auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes
auto maxGlobalMem = getFreeGlobalMemory();
auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes
// determine memory type to use in the kernel
if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM &&
sharedMem < maxSharedMem) {
memType = CUDAHistogramMemoryType::SHARED;
} else if (
nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM &&
multiBlockMem < (maxGlobalMem / 2)) {
// check against half of free mem to be extra safe
// due to cached allocator, we may anyway have slightly more free mem
memType = CUDAHistogramMemoryType::MULTI_BLOCK;
}
// alloc memory for MULTI_BLOCK
using IndexType = int64_t;
auto aInfo = detail::getTensorInfo<output_t, IndexType>(a);
auto bInfo = detail::getTensorInfo<input_t, IndexType>(b);
detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {});
Tensor partial_output;
if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) {
partial_output = native::zeros(
{grid.x, nbins},
optTypeMetaToScalarType(a.options().dtype_opt()),
a.options().layout_opt(),
a.options().device_opt(),
a.options().pinned_memory_opt());
pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output);
}
if (HasWeights) {
auto cInfo = detail::getTensorInfo<output_t, IndexType>(c);
const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) {
const IndexType cOffset =
detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo);
return cInfo.data[cOffset];
};
HANDLE_SWITCH_CASE(memType, getWeightsOp)
} else {
static const auto getDummyOp = [] __device__(IndexType) { return 1L; };
HANDLE_SWITCH_CASE(memType, getDummyOp)
}
return true;
}
#undef HANDLE_CASE
#undef HANDLE_SWITCH_CASE
#undef FOR_KERNEL_LOOP
#undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM
#undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM
} // namespace cuda
namespace {
///////////////// bincount /////////////////
template <typename input_t, typename weights_t>
Tensor _bincount_cuda_template(
const Tensor& self,
const Tensor& weights,
int64_t minlength) {
if (minlength < 0) {
AT_ERROR("minlength should be >= 0");
}
if (self.dim() == 1 && self.numel() == 0) {
return native::zeros(
{minlength},
kLong,
c10::nullopt /* layout */,
kCUDA,
c10::nullopt /* pin_memory */);
}
if (self.dim() != 1 ||
(!std::is_same<input_t, uint8_t>::value &&
*self.min().cpu().data_ptr<input_t>() < 0)) {
AT_ERROR("bincount only supports 1-d non-negative integral inputs.");
}
bool has_weights = weights.defined();
if (has_weights && weights.size(0) != self.size(0)) {
AT_ERROR("input and weights should have the same length");
}
const int64_t nbins = ::max(*self.max().cpu().data_ptr<input_t>() + (int64_t)1, minlength);
const input_t minvalue = 0;
const input_t maxvalue = nbins;
// alloc output counter on GPU
Tensor output;
if (has_weights) {
output = native::zeros(
{nbins},
optTypeMetaToScalarType(weights.options().dtype_opt()),
weights.options().layout_opt(),
weights.options().device_opt(),
weights.options().pinned_memory_opt());
cuda::CUDA_tensor_histogram<weights_t, input_t, true>(
output, self, weights, nbins, minvalue, maxvalue);
} else {
output = native::zeros(
{nbins},
kLong,
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
cuda::CUDA_tensor_histogram<int64_t, input_t, false>(
output, self, weights, nbins, minvalue, maxvalue);
}
return output;
}
///////////////// histc /////////////////
template <typename input_t>
Tensor _histc_cuda_template(
const Tensor& self,
int64_t nbins,
input_t min,
input_t max) {
if (nbins <= 0) {
AT_ERROR("bins must be > 0");
}
Tensor output = native::zeros(
{nbins},
self.scalar_type(),
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
input_t minvalue = min;
input_t maxvalue = max;
if (min == max && self.numel() > 0) {
minvalue = *self.min().cpu().data_ptr<input_t>();
maxvalue = *self.max().cpu().data_ptr<input_t>();
}
if (minvalue == maxvalue) {
minvalue = minvalue - 1;
maxvalue = maxvalue + 1;
}
#if !defined(USE_ROCM)
TORCH_CHECK(
!(at::_isinf(minvalue) || at::_isinf(maxvalue) ||
at::_isnan(minvalue) || at::_isnan(maxvalue)),
"range of [",
minvalue,
", ",
maxvalue,
"] is not finite");
#else
TORCH_CHECK(
!(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) ||
std::isnan(maxvalue)),
"range of [",
minvalue,
", ",
maxvalue,
"] is not finite");
#endif
TORCH_CHECK(minvalue < maxvalue, "max must be larger than min");
cuda::CUDA_tensor_histogram<input_t, input_t, false>(
output, self, Tensor(), nbins, minvalue, maxvalue);
return output;
}
} // namespace
namespace native {
Tensor _bincount_cuda(
const Tensor& self, const c10::optional<Tensor>& weights_opt,
int64_t minlength) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weights_maybe_owned = at::borrow_from_optional_tensor(weights_opt);
const Tensor& weights = *weights_maybe_owned;
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("_bincount_cuda");
return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] {
const auto scalar = weights.scalar_type();
if (scalar == ScalarType::Undefined || scalar == ScalarType::Float)
return _bincount_cuda_template<scalar_t, float>(self, weights, minlength);
return _bincount_cuda_template<scalar_t, double>(
self, weights.to(kDouble), minlength);
});
}
Tensor _histc_cuda(
const Tensor& self,
int64_t nbins,
const Scalar& min,
const Scalar& max) {
if (self.scalar_type() == ScalarType::Half) {
AT_ERROR("HalfTensor is not supported");
}
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("_histc_cuda");
return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] {
return _histc_cuda_template<scalar_t>(self, nbins, min.to<scalar_t>(), max.to<scalar_t>());
});
}
Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) {
auto ret = _histc_cuda(self, bins, min, max);
resize_output(result, ret.sizes());
result.copy_(ret);
return result;
}
} // namespace native
} // namespace at
| 88543d317b9084df71ab30784041d4e33787db81.cu | #include <ATen/ATen.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace at {
namespace cuda {
#define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100
#define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000
#define FOR_KERNEL_LOOP(i, lim) \
for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \
i += gridDim.x * blockDim.x)
/*
Memory types used for the 3 histogram implementations.
See `CUDA_tensor_histogram` below.
*/
enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL };
namespace {
template<typename input_t, typename IndexType>
__device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int64_t nbins) {
IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue));
// (only applicable for histc)
// while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end)
// the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists
// therefore when bin == nbins, adjust bin to the last bin
if (bin == nbins) bin -= 1;
return bin;
}
}
/*
Kernel for computing the histogram of the input.
*/
template <
typename output_t,
typename input_t,
typename IndexType,
int ADims,
int PDims,
int BDims,
CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK,
typename Op>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void kernelHistogram1D(
detail::TensorInfo<output_t, IndexType> a, /* output */
detail::TensorInfo<output_t, IndexType> p, /* partial output */
detail::TensorInfo<input_t, IndexType> b, /* input */
int64_t nbins,
input_t minvalue,
input_t maxvalue,
IndexType totalElements,
Op getOp) {
extern __shared__ unsigned char my_smem[];
output_t* smem = nullptr;
if (MemoryType == CUDAHistogramMemoryType::SHARED) {
////////////////////////// Shared memory //////////////////////////
// atomically add to block specific shared memory
// then atomically add to the global output tensor
smem = reinterpret_cast<output_t*>(my_smem);
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
smem[i] = 0;
}
__syncthreads();
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `smem`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
gpuAtomicAddNoReturn(&smem[bin], getOp(linearIndex));
}
}
__syncthreads();
// NOTE: atomically update output bin count.
// Atomic update is imp since __syncthread() will only synchronize threads
// in a given block, not across blocks.
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a);
gpuAtomicAddNoReturn(&a.data[aOffset], smem[i]);
}
} else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) {
////////////////////////// Multi Block memory //////////////////////////
// atomically add to block specific global tensor
// then atomically add to the global output tensor
// compute histogram for the block
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `p`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
const IndexType pIdx = p.strides[0] * blockIdx.x + bin;
const IndexType pOffset =
detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p);
gpuAtomicAddNoReturn(&p.data[pOffset], getOp(linearIndex));
}
}
__syncthreads();
// NOTE: atomically update output bin count.
// Atomic update is imp since __syncthread() will only synchronize threads
// in a given block, not across blocks.
const IndexType pIdx = p.strides[0] * blockIdx.x;
const IndexType pOffset =
detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p);
for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) {
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a);
gpuAtomicAddNoReturn(&a.data[aOffset], p.data[pOffset + i]);
}
} else {
////////////////////////// Global memory //////////////////////////
// atomically add to the output tensor
// compute histogram for the block
FOR_KERNEL_LOOP(linearIndex, totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b);
const auto bVal = b.data[bOffset];
if (bVal >= minvalue && bVal <= maxvalue) {
// Use value at `b` as an offset of `a`
const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
const IndexType aOffset =
detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a);
gpuAtomicAddNoReturn(&a.data[aOffset], getOp(linearIndex));
}
}
}
}
#define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \
kernelHistogram1D<output_t, input_t, IndexType, 1, 2, -1, MEMORY_TYPE> \
<<<grid, \
block, \
SHARED_MEM, \
getCurrentCUDAStream()>>>( \
aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define HANDLE_SWITCH_CASE(mType, getOp) \
switch (mType) { \
case CUDAHistogramMemoryType::SHARED: \
HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \
break; \
case CUDAHistogramMemoryType::MULTI_BLOCK: \
HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \
break; \
default: \
HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \
}
inline int64_t getFreeGlobalMemory() {
// no need to use `cudaSetDevice`
size_t free_mem, total_mem;
cudaMemGetInfo(&free_mem, &total_mem);
TORCH_INTERNAL_ASSERT(
cudaGetLastError() == cudaSuccess,
"CUDA_tensor_histogram failed to get free global memory");
return static_cast<int64_t>(free_mem);
}
/*
Calculate the frequency of the input values.
`a` contains the final output or the histogram.
Input `b` is assumed to be 1-D non-negative int array.
`c` optionally contains the weight vector.
See `help torch.bincount` for details on the math.
3 implementations based of input size and memory usage:
case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem
SHARED: Each block atomically adds to it's own **shared** hist copy,
then atomically updates the global tensor.
case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem
MULTI_BLOCK: Each block atomically adds to it's own **global** hist
copy, then atomically updates the global tensor.
case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins
GLOBAL: all threads atomically update to a single **global** hist copy.
*/
template <typename output_t, typename input_t, bool HasWeights>
bool CUDA_tensor_histogram(
at::Tensor a, /* output */
at::Tensor b, /* input */
at::Tensor c, /* weights(optional) */
int64_t nbins,
input_t minvalue,
input_t maxvalue,
TensorArgType aType = TensorArgType::ReadWrite,
TensorArgType bType = TensorArgType::ReadOnly,
TensorArgType cType = TensorArgType::ReadOnly) {
checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA);
if (HasWeights) {
checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA);
}
auto totalElements = b.numel();
if (totalElements == 0) {
return false;
}
const dim3 block = getApplyBlock();
dim3 grid;
int64_t curDevice = current_device();
if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) {
return false;
}
CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL;
auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock;
auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes
auto maxGlobalMem = getFreeGlobalMemory();
auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes
// determine memory type to use in the kernel
if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM &&
sharedMem < maxSharedMem) {
memType = CUDAHistogramMemoryType::SHARED;
} else if (
nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM &&
multiBlockMem < (maxGlobalMem / 2)) {
// check against half of free mem to be extra safe
// due to cached allocator, we may anyway have slightly more free mem
memType = CUDAHistogramMemoryType::MULTI_BLOCK;
}
// alloc memory for MULTI_BLOCK
using IndexType = int64_t;
auto aInfo = detail::getTensorInfo<output_t, IndexType>(a);
auto bInfo = detail::getTensorInfo<input_t, IndexType>(b);
detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {});
Tensor partial_output;
if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) {
partial_output = native::zeros(
{grid.x, nbins},
optTypeMetaToScalarType(a.options().dtype_opt()),
a.options().layout_opt(),
a.options().device_opt(),
a.options().pinned_memory_opt());
pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output);
}
if (HasWeights) {
auto cInfo = detail::getTensorInfo<output_t, IndexType>(c);
const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) {
const IndexType cOffset =
detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo);
return cInfo.data[cOffset];
};
HANDLE_SWITCH_CASE(memType, getWeightsOp)
} else {
static const auto getDummyOp = [] __device__(IndexType) { return 1L; };
HANDLE_SWITCH_CASE(memType, getDummyOp)
}
return true;
}
#undef HANDLE_CASE
#undef HANDLE_SWITCH_CASE
#undef FOR_KERNEL_LOOP
#undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM
#undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM
} // namespace cuda
namespace {
///////////////// bincount /////////////////
template <typename input_t, typename weights_t>
Tensor _bincount_cuda_template(
const Tensor& self,
const Tensor& weights,
int64_t minlength) {
if (minlength < 0) {
AT_ERROR("minlength should be >= 0");
}
if (self.dim() == 1 && self.numel() == 0) {
return native::zeros(
{minlength},
kLong,
c10::nullopt /* layout */,
kCUDA,
c10::nullopt /* pin_memory */);
}
if (self.dim() != 1 ||
(!std::is_same<input_t, uint8_t>::value &&
*self.min().cpu().data_ptr<input_t>() < 0)) {
AT_ERROR("bincount only supports 1-d non-negative integral inputs.");
}
bool has_weights = weights.defined();
if (has_weights && weights.size(0) != self.size(0)) {
AT_ERROR("input and weights should have the same length");
}
const int64_t nbins = std::max(*self.max().cpu().data_ptr<input_t>() + (int64_t)1, minlength);
const input_t minvalue = 0;
const input_t maxvalue = nbins;
// alloc output counter on GPU
Tensor output;
if (has_weights) {
output = native::zeros(
{nbins},
optTypeMetaToScalarType(weights.options().dtype_opt()),
weights.options().layout_opt(),
weights.options().device_opt(),
weights.options().pinned_memory_opt());
cuda::CUDA_tensor_histogram<weights_t, input_t, true>(
output, self, weights, nbins, minvalue, maxvalue);
} else {
output = native::zeros(
{nbins},
kLong,
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
cuda::CUDA_tensor_histogram<int64_t, input_t, false>(
output, self, weights, nbins, minvalue, maxvalue);
}
return output;
}
///////////////// histc /////////////////
template <typename input_t>
Tensor _histc_cuda_template(
const Tensor& self,
int64_t nbins,
input_t min,
input_t max) {
if (nbins <= 0) {
AT_ERROR("bins must be > 0");
}
Tensor output = native::zeros(
{nbins},
self.scalar_type(),
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
input_t minvalue = min;
input_t maxvalue = max;
if (min == max && self.numel() > 0) {
minvalue = *self.min().cpu().data_ptr<input_t>();
maxvalue = *self.max().cpu().data_ptr<input_t>();
}
if (minvalue == maxvalue) {
minvalue = minvalue - 1;
maxvalue = maxvalue + 1;
}
#if !defined(USE_ROCM)
TORCH_CHECK(
!(at::_isinf(minvalue) || at::_isinf(maxvalue) ||
at::_isnan(minvalue) || at::_isnan(maxvalue)),
"range of [",
minvalue,
", ",
maxvalue,
"] is not finite");
#else
TORCH_CHECK(
!(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) ||
std::isnan(maxvalue)),
"range of [",
minvalue,
", ",
maxvalue,
"] is not finite");
#endif
TORCH_CHECK(minvalue < maxvalue, "max must be larger than min");
cuda::CUDA_tensor_histogram<input_t, input_t, false>(
output, self, Tensor(), nbins, minvalue, maxvalue);
return output;
}
} // namespace
namespace native {
Tensor _bincount_cuda(
const Tensor& self, const c10::optional<Tensor>& weights_opt,
int64_t minlength) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weights_maybe_owned = at::borrow_from_optional_tensor(weights_opt);
const Tensor& weights = *weights_maybe_owned;
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("_bincount_cuda");
return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] {
const auto scalar = weights.scalar_type();
if (scalar == ScalarType::Undefined || scalar == ScalarType::Float)
return _bincount_cuda_template<scalar_t, float>(self, weights, minlength);
return _bincount_cuda_template<scalar_t, double>(
self, weights.to(kDouble), minlength);
});
}
Tensor _histc_cuda(
const Tensor& self,
int64_t nbins,
const Scalar& min,
const Scalar& max) {
if (self.scalar_type() == ScalarType::Half) {
AT_ERROR("HalfTensor is not supported");
}
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("_histc_cuda");
return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] {
return _histc_cuda_template<scalar_t>(self, nbins, min.to<scalar_t>(), max.to<scalar_t>());
});
}
Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) {
auto ret = _histc_cuda(self, bins, min, max);
resize_output(result, ret.sizes());
result.copy_(ret);
return result;
}
} // namespace native
} // namespace at
|
abe2d0b262bb9a8410b19410f8eb48ff742a354e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************************/
/** \file
\brief update voxel block (integrate)
\details
\author Yizhong Zhang
\date 12/9/2013
*/
/***********************************************************/
#include "device_utils.h"
#include "voxel_hashing_device.h"
#include "voxel_hashing_internal.h"
#include "voxel_block_hash_table.cuh"
#include "kernel_containers.h"
// ==================================================================
template<int CTA_SIZE_, typename T>
static __device__ __forceinline__ void reduceMin(volatile T* buffer)
{
int tid = dfusion::Block::flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = min(val, buffer[tid + 512]); __syncthreads(); }
if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = min(val, buffer[tid + 256]); __syncthreads(); }
if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = min(val, buffer[tid + 128]); __syncthreads(); }
if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = min(val, buffer[tid + 64]); __syncthreads(); }
if (tid < 32){
if (CTA_SIZE_ >= 64) { buffer[tid] = val = min(val, buffer[tid + 32]); }
if (CTA_SIZE_ >= 32) { buffer[tid] = val = min(val, buffer[tid + 16]); }
if (CTA_SIZE_ >= 16) { buffer[tid] = val = min(val, buffer[tid + 8]); }
if (CTA_SIZE_ >= 8) { buffer[tid] = val = min(val, buffer[tid + 4]); }
if (CTA_SIZE_ >= 4) { buffer[tid] = val = min(val, buffer[tid + 2]); }
if (CTA_SIZE_ >= 2) { buffer[tid] = val = min(val, buffer[tid + 1]); }
}
}
template<int CTA_SIZE_, typename T>
static __device__ __forceinline__ void reduceMax(volatile T* buffer)
{
int tid = dfusion::Block::flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = max(val, buffer[tid + 512]); __syncthreads(); }
if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = max(val, buffer[tid + 256]); __syncthreads(); }
if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = max(val, buffer[tid + 128]); __syncthreads(); }
if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = max(val, buffer[tid + 64]); __syncthreads(); }
if (tid < 32){
if (CTA_SIZE_ >= 64) { buffer[tid] = val = max(val, buffer[tid + 32]); }
if (CTA_SIZE_ >= 32) { buffer[tid] = val = max(val, buffer[tid + 16]); }
if (CTA_SIZE_ >= 16) { buffer[tid] = val = max(val, buffer[tid + 8]); }
if (CTA_SIZE_ >= 8) { buffer[tid] = val = max(val, buffer[tid + 4]); }
if (CTA_SIZE_ >= 4) { buffer[tid] = val = max(val, buffer[tid + 2]); }
if (CTA_SIZE_ >= 2) { buffer[tid] = val = max(val, buffer[tid + 1]); }
}
}
// ==================================================================
__global__ void
createScaleDepth (const PtrStepSz<float> depth, PtrStep<float> scaled, const dfusion::Intr intr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
float Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
scaled.ptr (y)[x] = Dp * lambda;
}
// ==================================================================
struct VoxelBlockUpdater{
enum{
CTA_SIZE = BLOCK_DIM * BLOCK_DIM * BLOCK_DIM
};
PtrStepSz<float> depth; // the scaled depth
dfusion::Intr intr; // intrinsic parameters of camera
dfusion::Mat33 Rw2c; // world to camera
float3 tw2c;
float block_size; // edge length of the block cube, voxel_size * BLOCK_DIM
float voxel_size; // edge length of the voxel cube
float trunc_dist; // truncation distance
unsigned char max_weight; // max weight
PtrSz<HashEntry> visible_hash_entry; // the hash table, organized in sequence of each entry
mutable PtrSz<VoxelBlock> voxel_block; // the array of voxel block
mutable PtrSz<unsigned char> delete_hash_entry; // label whether this hash entry should be deleted
float abs_tsdf_thre; // if minimun tsdf in this block is bigger than this threshold, delete this entry
__device__ __forceinline__ void operator () () const {
// calculate coordinate of this voxel, one thread each voxel
int voxel_x = threadIdx.x;
int voxel_y = threadIdx.y;
int voxel_z = threadIdx.z;
int voxel_idx = (voxel_z * BLOCK_DIM + voxel_y) * BLOCK_DIM + voxel_x;
// coordinate of the voxel block, one CUDA block each voxel block
int block_x = visible_hash_entry[blockIdx.x].position[0];
int block_y = visible_hash_entry[blockIdx.x].position[1];
int block_z = visible_hash_entry[blockIdx.x].position[2];
int block_idx = visible_hash_entry[blockIdx.x].pointer;
// copy tsdf and weight to shared memory
__shared__ float tsdf[CTA_SIZE];
__shared__ unsigned char weight[CTA_SIZE];
tsdf[voxel_idx] = voxel_block[block_idx].voxel[voxel_idx].sdf;
weight[voxel_idx] = voxel_block[block_idx].voxel[voxel_idx].weight;
__syncthreads ();
// world coordinate
float3 xyz = make_float3(
block_x * block_size + (voxel_x + 0.5f) * voxel_size,
block_y * block_size + (voxel_y + 0.5f) * voxel_size,
block_z * block_size + (voxel_z + 0.5f) * voxel_size );
// transform to camera coordinate
xyz = Rw2c * xyz + tw2c;
// project the point onto screen
float3 uvd = intr.xyz2uvd(xyz);
int2 ukr;
ukr.x = __float2int_rn (uvd.x);
ukr.y = __float2int_rn (uvd.y);
// if this voxel is in the view frustum
if (ukr.x >= 0 && ukr.y >= 0 && ukr.x < depth.cols && ukr.y < depth.rows){
// calculate signed distance function
float depthVal = depth(ukr.y, ukr.x) * 0.001f;
float3 dxyz = intr.uvd2xyz(make_float3(ukr.x, ukr.y, depthVal));
float sdf = xyz.z - dxyz.z;
// if the projection point has depth value and this voxel is able to update
if (depthVal > 0.001f && sdf >= -trunc_dist) // meters
{
float _tsdf = min (1.0f, sdf / trunc_dist); // range -1 to +1, negative means behind observed depth
float tsdf_prev = tsdf[voxel_idx];
int weight_prev = weight[voxel_idx];
//int Wrk = xyz.z>2.5f ? 1.0f : (3.0f - xyz.z)/0.5f;
int Wrk = (3.5f - xyz.z)/0.5f;
if( Wrk > 0 ){
float tsdf_new = (tsdf_prev * weight_prev + Wrk * _tsdf) / (weight_prev + Wrk);
unsigned char weight_new = min (weight_prev + Wrk, max_weight);
tsdf[voxel_idx] = tsdf_new;
weight[voxel_idx] = weight_new;
}
}
}
// write tsdf and weight to voxel block
voxel_block[block_idx].voxel[voxel_idx].sdf = tsdf[voxel_idx];
voxel_block[block_idx].voxel[voxel_idx].weight = weight[voxel_idx];
// calculate max weight and min abs tsdf
tsdf[voxel_idx] = fabsf( tsdf[voxel_idx] );
__syncthreads ();
reduceMin<CTA_SIZE>(tsdf);
reduceMax<CTA_SIZE>(weight);
// in the first thread, check whether this block should be deleted
if( voxel_idx == 0 ){
if( weight[0] == 0 || tsdf[0] > abs_tsdf_thre )
delete_hash_entry[blockIdx.x] = 1;
else
delete_hash_entry[blockIdx.x] = 0;
}
}
};
__global__ void updateVoxelBlockKernel( const VoxelBlockUpdater updater ){
updater();
}
// ==================================================================
struct VoxelBlockDeleter : public VoxelBlockHashTable{
int visible_hash_entry_number;
PtrSz<HashEntry> visible_hash_entry;
PtrSz<unsigned char> delete_hash_entry;
__device__ __forceinline__ void operator() () const{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx >= visible_hash_entry_number )
return;
if( delete_hash_entry[idx] ){
int X = visible_hash_entry[idx].position[0];
int Y = visible_hash_entry[idx].position[1];
int Z = visible_hash_entry[idx].position[2];
DeleteHashEntryStaggered(X, Y, Z);
}
}
};
__global__ void deleteVoxelBlockKernel( const VoxelBlockDeleter deleter ){
deleter();
}
// ==================================================================
void updateVoxelBlock(
const PtrStepSz<float>& depth,
const dfusion::Intr& intr,
const dfusion::Mat33& Rw2c,
const float3& tw2c,
float block_size,
float voxel_size,
float trunc_dist,
DeviceArray<HashEntry>& hash_entry,
int bucket_size,
DeviceArray<unsigned int>& hash_bucket_atomic_lock,
DeviceArray<VoxelBlock>& voxel_block,
DeviceArray<int>& available_voxel_block,
DeviceArray<int>& hash_parameters,
const DeviceArray<HashEntry>& visible_hash_entry,
int visible_hash_entry_number,
DeviceArray<unsigned char> delete_hash_entry,
float abs_tsdf_thre )
{
if (visible_hash_entry_number == 0)
return;
// update each voxel in voxel block
VoxelBlockUpdater updater;
updater.depth = depth;
updater.intr = intr;
updater.Rw2c = Rw2c;
updater.tw2c = tw2c;
updater.block_size = block_size;
updater.voxel_size = voxel_size;
updater.trunc_dist = trunc_dist;
updater.max_weight = 128;
updater.visible_hash_entry = visible_hash_entry;
updater.voxel_block = voxel_block;
updater.delete_hash_entry = delete_hash_entry;
updater.abs_tsdf_thre = abs_tsdf_thre;
dim3 block (BLOCK_DIM, BLOCK_DIM, BLOCK_DIM);
dim3 grid (visible_hash_entry_number);
hipLaunchKernelGGL(( updateVoxelBlockKernel), dim3(grid), dim3(block), 0, 0, updater );
// setup block bucket atomic lock
int threadPerBlock = 256;
int blocksPerGrid = divUp(hash_bucket_atomic_lock.size(), threadPerBlock);
initHashBucketAtomicLock << <blocksPerGrid, threadPerBlock >> >(hash_bucket_atomic_lock);
cudaSafeCall(hipGetLastError(), "updateVoxelBlock::initHashBucketAtomicLock");
// delete hash entries that has been marked to delete
VoxelBlockDeleter deleter;
deleter.hash_table_size = hash_entry.size() / bucket_size;
deleter.bucket_size = bucket_size;
deleter.hash_entry = hash_entry;
deleter.hash_bucket_atomic_lock = hash_bucket_atomic_lock;
deleter.available_voxel_block = available_voxel_block;
deleter.hash_parameters = hash_parameters;
deleter.visible_hash_entry_number = visible_hash_entry_number;
deleter.visible_hash_entry = visible_hash_entry;
deleter.delete_hash_entry = delete_hash_entry;
threadPerBlock = 256;
blocksPerGrid = divUp(visible_hash_entry_number, threadPerBlock);
hipLaunchKernelGGL(( deleteVoxelBlockKernel), dim3(blocksPerGrid), dim3(threadPerBlock), 0, 0, deleter );
cudaSafeCall(hipGetLastError(), "updateVoxelBlock::deleteVoxelBlockKernel");
}
| abe2d0b262bb9a8410b19410f8eb48ff742a354e.cu | /***********************************************************/
/** \file
\brief update voxel block (integrate)
\details
\author Yizhong Zhang
\date 12/9/2013
*/
/***********************************************************/
#include "device_utils.h"
#include "voxel_hashing_device.h"
#include "voxel_hashing_internal.h"
#include "voxel_block_hash_table.cuh"
#include "kernel_containers.h"
// ==================================================================
template<int CTA_SIZE_, typename T>
static __device__ __forceinline__ void reduceMin(volatile T* buffer)
{
int tid = dfusion::Block::flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = min(val, buffer[tid + 512]); __syncthreads(); }
if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = min(val, buffer[tid + 256]); __syncthreads(); }
if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = min(val, buffer[tid + 128]); __syncthreads(); }
if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = min(val, buffer[tid + 64]); __syncthreads(); }
if (tid < 32){
if (CTA_SIZE_ >= 64) { buffer[tid] = val = min(val, buffer[tid + 32]); }
if (CTA_SIZE_ >= 32) { buffer[tid] = val = min(val, buffer[tid + 16]); }
if (CTA_SIZE_ >= 16) { buffer[tid] = val = min(val, buffer[tid + 8]); }
if (CTA_SIZE_ >= 8) { buffer[tid] = val = min(val, buffer[tid + 4]); }
if (CTA_SIZE_ >= 4) { buffer[tid] = val = min(val, buffer[tid + 2]); }
if (CTA_SIZE_ >= 2) { buffer[tid] = val = min(val, buffer[tid + 1]); }
}
}
template<int CTA_SIZE_, typename T>
static __device__ __forceinline__ void reduceMax(volatile T* buffer)
{
int tid = dfusion::Block::flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = max(val, buffer[tid + 512]); __syncthreads(); }
if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = max(val, buffer[tid + 256]); __syncthreads(); }
if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = max(val, buffer[tid + 128]); __syncthreads(); }
if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = max(val, buffer[tid + 64]); __syncthreads(); }
if (tid < 32){
if (CTA_SIZE_ >= 64) { buffer[tid] = val = max(val, buffer[tid + 32]); }
if (CTA_SIZE_ >= 32) { buffer[tid] = val = max(val, buffer[tid + 16]); }
if (CTA_SIZE_ >= 16) { buffer[tid] = val = max(val, buffer[tid + 8]); }
if (CTA_SIZE_ >= 8) { buffer[tid] = val = max(val, buffer[tid + 4]); }
if (CTA_SIZE_ >= 4) { buffer[tid] = val = max(val, buffer[tid + 2]); }
if (CTA_SIZE_ >= 2) { buffer[tid] = val = max(val, buffer[tid + 1]); }
}
}
// ==================================================================
__global__ void
createScaleDepth (const PtrStepSz<float> depth, PtrStep<float> scaled, const dfusion::Intr intr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
float Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
scaled.ptr (y)[x] = Dp * lambda;
}
// ==================================================================
struct VoxelBlockUpdater{
enum{
CTA_SIZE = BLOCK_DIM * BLOCK_DIM * BLOCK_DIM
};
PtrStepSz<float> depth; // the scaled depth
dfusion::Intr intr; // intrinsic parameters of camera
dfusion::Mat33 Rw2c; // world to camera
float3 tw2c;
float block_size; // edge length of the block cube, voxel_size * BLOCK_DIM
float voxel_size; // edge length of the voxel cube
float trunc_dist; // truncation distance
unsigned char max_weight; // max weight
PtrSz<HashEntry> visible_hash_entry; // the hash table, organized in sequence of each entry
mutable PtrSz<VoxelBlock> voxel_block; // the array of voxel block
mutable PtrSz<unsigned char> delete_hash_entry; // label whether this hash entry should be deleted
float abs_tsdf_thre; // if minimun tsdf in this block is bigger than this threshold, delete this entry
__device__ __forceinline__ void operator () () const {
// calculate coordinate of this voxel, one thread each voxel
int voxel_x = threadIdx.x;
int voxel_y = threadIdx.y;
int voxel_z = threadIdx.z;
int voxel_idx = (voxel_z * BLOCK_DIM + voxel_y) * BLOCK_DIM + voxel_x;
// coordinate of the voxel block, one CUDA block each voxel block
int block_x = visible_hash_entry[blockIdx.x].position[0];
int block_y = visible_hash_entry[blockIdx.x].position[1];
int block_z = visible_hash_entry[blockIdx.x].position[2];
int block_idx = visible_hash_entry[blockIdx.x].pointer;
// copy tsdf and weight to shared memory
__shared__ float tsdf[CTA_SIZE];
__shared__ unsigned char weight[CTA_SIZE];
tsdf[voxel_idx] = voxel_block[block_idx].voxel[voxel_idx].sdf;
weight[voxel_idx] = voxel_block[block_idx].voxel[voxel_idx].weight;
__syncthreads ();
// world coordinate
float3 xyz = make_float3(
block_x * block_size + (voxel_x + 0.5f) * voxel_size,
block_y * block_size + (voxel_y + 0.5f) * voxel_size,
block_z * block_size + (voxel_z + 0.5f) * voxel_size );
// transform to camera coordinate
xyz = Rw2c * xyz + tw2c;
// project the point onto screen
float3 uvd = intr.xyz2uvd(xyz);
int2 ukr;
ukr.x = __float2int_rn (uvd.x);
ukr.y = __float2int_rn (uvd.y);
// if this voxel is in the view frustum
if (ukr.x >= 0 && ukr.y >= 0 && ukr.x < depth.cols && ukr.y < depth.rows){
// calculate signed distance function
float depthVal = depth(ukr.y, ukr.x) * 0.001f;
float3 dxyz = intr.uvd2xyz(make_float3(ukr.x, ukr.y, depthVal));
float sdf = xyz.z - dxyz.z;
// if the projection point has depth value and this voxel is able to update
if (depthVal > 0.001f && sdf >= -trunc_dist) // meters
{
float _tsdf = min (1.0f, sdf / trunc_dist); // range -1 to +1, negative means behind observed depth
float tsdf_prev = tsdf[voxel_idx];
int weight_prev = weight[voxel_idx];
//int Wrk = xyz.z>2.5f ? 1.0f : (3.0f - xyz.z)/0.5f;
int Wrk = (3.5f - xyz.z)/0.5f;
if( Wrk > 0 ){
float tsdf_new = (tsdf_prev * weight_prev + Wrk * _tsdf) / (weight_prev + Wrk);
unsigned char weight_new = min (weight_prev + Wrk, max_weight);
tsdf[voxel_idx] = tsdf_new;
weight[voxel_idx] = weight_new;
}
}
}
// write tsdf and weight to voxel block
voxel_block[block_idx].voxel[voxel_idx].sdf = tsdf[voxel_idx];
voxel_block[block_idx].voxel[voxel_idx].weight = weight[voxel_idx];
// calculate max weight and min abs tsdf
tsdf[voxel_idx] = fabsf( tsdf[voxel_idx] );
__syncthreads ();
reduceMin<CTA_SIZE>(tsdf);
reduceMax<CTA_SIZE>(weight);
// in the first thread, check whether this block should be deleted
if( voxel_idx == 0 ){
if( weight[0] == 0 || tsdf[0] > abs_tsdf_thre )
delete_hash_entry[blockIdx.x] = 1;
else
delete_hash_entry[blockIdx.x] = 0;
}
}
};
__global__ void updateVoxelBlockKernel( const VoxelBlockUpdater updater ){
updater();
}
// ==================================================================
struct VoxelBlockDeleter : public VoxelBlockHashTable{
int visible_hash_entry_number;
PtrSz<HashEntry> visible_hash_entry;
PtrSz<unsigned char> delete_hash_entry;
__device__ __forceinline__ void operator() () const{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx >= visible_hash_entry_number )
return;
if( delete_hash_entry[idx] ){
int X = visible_hash_entry[idx].position[0];
int Y = visible_hash_entry[idx].position[1];
int Z = visible_hash_entry[idx].position[2];
DeleteHashEntryStaggered(X, Y, Z);
}
}
};
__global__ void deleteVoxelBlockKernel( const VoxelBlockDeleter deleter ){
deleter();
}
// ==================================================================
void updateVoxelBlock(
const PtrStepSz<float>& depth,
const dfusion::Intr& intr,
const dfusion::Mat33& Rw2c,
const float3& tw2c,
float block_size,
float voxel_size,
float trunc_dist,
DeviceArray<HashEntry>& hash_entry,
int bucket_size,
DeviceArray<unsigned int>& hash_bucket_atomic_lock,
DeviceArray<VoxelBlock>& voxel_block,
DeviceArray<int>& available_voxel_block,
DeviceArray<int>& hash_parameters,
const DeviceArray<HashEntry>& visible_hash_entry,
int visible_hash_entry_number,
DeviceArray<unsigned char> delete_hash_entry,
float abs_tsdf_thre )
{
if (visible_hash_entry_number == 0)
return;
// update each voxel in voxel block
VoxelBlockUpdater updater;
updater.depth = depth;
updater.intr = intr;
updater.Rw2c = Rw2c;
updater.tw2c = tw2c;
updater.block_size = block_size;
updater.voxel_size = voxel_size;
updater.trunc_dist = trunc_dist;
updater.max_weight = 128;
updater.visible_hash_entry = visible_hash_entry;
updater.voxel_block = voxel_block;
updater.delete_hash_entry = delete_hash_entry;
updater.abs_tsdf_thre = abs_tsdf_thre;
dim3 block (BLOCK_DIM, BLOCK_DIM, BLOCK_DIM);
dim3 grid (visible_hash_entry_number);
updateVoxelBlockKernel<<<grid, block>>>( updater );
// setup block bucket atomic lock
int threadPerBlock = 256;
int blocksPerGrid = divUp(hash_bucket_atomic_lock.size(), threadPerBlock);
initHashBucketAtomicLock << <blocksPerGrid, threadPerBlock >> >(hash_bucket_atomic_lock);
cudaSafeCall(cudaGetLastError(), "updateVoxelBlock::initHashBucketAtomicLock");
// delete hash entries that has been marked to delete
VoxelBlockDeleter deleter;
deleter.hash_table_size = hash_entry.size() / bucket_size;
deleter.bucket_size = bucket_size;
deleter.hash_entry = hash_entry;
deleter.hash_bucket_atomic_lock = hash_bucket_atomic_lock;
deleter.available_voxel_block = available_voxel_block;
deleter.hash_parameters = hash_parameters;
deleter.visible_hash_entry_number = visible_hash_entry_number;
deleter.visible_hash_entry = visible_hash_entry;
deleter.delete_hash_entry = delete_hash_entry;
threadPerBlock = 256;
blocksPerGrid = divUp(visible_hash_entry_number, threadPerBlock);
deleteVoxelBlockKernel<<<blocksPerGrid, threadPerBlock>>>( deleter );
cudaSafeCall(cudaGetLastError(), "updateVoxelBlock::deleteVoxelBlockKernel");
}
|
b7aa35060550d3c8f63b13942f6386b2a80ca244.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// 32 x 32 si fuera cuadrada...
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} | b7aa35060550d3c8f63b13942f6386b2a80ca244.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// 32 x 32 si fuera cuadrada...
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
a226c381b555235ee1101bd9209ed686b2c12e83.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <device_launch_parameters.h>
#include <torch/extension.h>
#include "sparse_kernels.h"
#include "sparse_layout.cuh"
#define USE_VERY_OPTIMIZED_KERNEL
/**
* Compute sparse matrix multiplication with SDD mode.
*
* It multiplies a dense matrix with other dense matrix and create a new sparse
* matrix through corresponding sparse layout.
*
* Blocks : (Sparse Blocks, Total Batches)
* Threads per Block : 256
*/
#ifdef USE_32x8_TILING
template <bool trans_a, bool trans_b>
__global__ void __launch_bounds__(256, 8) sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
) {
/******** Define shared memory ********/
constexpr int TILE_SIZE = 32 * 8;
constexpr int PADDING = 8;
__shared__ float tile_a[2][(TILE_SIZE + PADDING + 32 - 1) / 32 * 32];
__shared__ float tile_b[2][(TILE_SIZE + PADDING + 32 - 1) / 32 * 32];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float buffer_a = matrix_a[
load_a
+ ((trans_a ? 0 : m) + (trans_a ? tid / 32 : tid / 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + (trans_a ? tid % 32 : tid % 8))
];
float buffer_b = matrix_b[
load_b
+ ((trans_b ? n : 0) + (trans_b ? tid / 8 : tid / 32)) * (trans_b ? size_k : size_m)
+ ((trans_b ? 0 : n) + (trans_b ? tid % 8 : tid % 32))
];
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 8) {
int page = k / 8 % 2;
int next_k = k + 8;
/******** Commit the prefetched buffers to the shared memory ********/
tile_a[page][(trans_a ? tid % 32 : tid / 8) * 8 + (trans_a ? tid / 32 : tid % 8) + (trans_a ? tid % 32 / 4 : tid / 32)] = buffer_a;
tile_b[page][(trans_b ? tid / 8 : tid % 32) * 8 + (trans_b ? tid % 8 : tid / 32) + (trans_b ? tid / 32 : tid % 32 / 4)] = buffer_b;
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
buffer_a = matrix_a[
load_a
+ ((trans_a ? next_k : m) + (trans_a ? tid / 32 : tid / 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + (trans_a ? tid % 32 : tid % 8))
];
buffer_b = matrix_b[
load_b
+ ((trans_b ? n : next_k) + (trans_b ? tid / 8 : tid / 32)) * (trans_b ? size_k : size_m)
+ ((trans_b ? next_k : n) + (trans_b ? tid % 8 : tid % 32))
];
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 8; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[page][(tid / 16 * 2 + 0) * 8 + i + (tid / 32)];
local_a[1] = tile_a[page][(tid / 16 * 2 + 1) * 8 + i + (tid / 32)];
local_b[0] = tile_b[page][(tid % 16 * 2 + 0) * 8 + i + (tid % 16 / 2)];
local_b[1] = tile_b[page][(tid % 16 * 2 + 1) * 8 + i + (tid % 16 / 2)];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_8x32_TILING
template <bool trans_a, bool trans_b>
__global__ void __launch_bounds__(256, 8) sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
) {
/******** Define shared memory ********/
constexpr int TILE_SIZE = 8 * 32;
constexpr int PADDING = 8;
__shared__ float tile_a[2][(TILE_SIZE + PADDING + 32 - 1) / 32 * 32];
__shared__ float tile_b[2][(TILE_SIZE + PADDING + 32 - 1) / 32 * 32];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float buffer_a = matrix_a[
load_a
+ ((trans_a ? 0 : m) + (trans_a ? tid / 32 : tid / 8 % 4 * 8 + tid / 32)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + (trans_a ? tid % 32 : tid % 8))
];
float buffer_b = matrix_b[
load_b
+ ((trans_b ? n : 0) + (trans_b ? tid / 8 % 4 * 8 + tid / 32 : tid / 32)) * (trans_b ? size_k : size_m)
+ ((trans_b ? 0 : n) + (trans_b ? tid % 8 : tid % 32))
];
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 8) {
int page = k / 8 % 2;
int next_k = k + 8;
/******** Commit the prefetched buffers to the shared memory ********/
tile_a[page][(trans_a ? tid / 32 : tid % 8) * (32 + 1) + (trans_a ? tid % 32 : tid / 8 % 4 * 8 + tid / 32)] = buffer_a;
tile_b[page][(trans_b ? tid % 8 : tid / 32) * (32 + 1) + (trans_b ? tid / 8 % 4 * 8 + tid / 32 : tid % 32)] = buffer_b;
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
buffer_a = matrix_a[
load_a
+ ((trans_a ? next_k : m) + (trans_a ? tid / 32 : tid / 8 % 4 * 8 + tid / 32)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + (trans_a ? tid % 32 : tid % 8))
];
buffer_b = matrix_b[
load_b
+ ((trans_b ? n : next_k) + (trans_b ? tid / 8 % 4 * 8 + tid / 32 : tid / 32)) * (trans_b ? size_k : size_m)
+ ((trans_b ? next_k : n) + (trans_b ? tid % 8 : tid % 32))
];
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 8; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[page][i * (32 + 1) + (tid / 16 * 2 + 0)];
local_a[1] = tile_a[page][i * (32 + 1) + (tid / 16 * 2 + 1)];
local_b[0] = tile_b[page][i * (32 + 1) + (tid % 16 * 2 + 0)];
local_b[1] = tile_b[page][i * (32 + 1) + (tid % 16 * 2 + 1)];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_32x32_TILING
template <bool trans_a, bool trans_b>
__global__ void __launch_bounds__(256, 3) sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
//bool trans_a, bool trans_b
) {
/******** Define shared memory ********/
__shared__ float tile_a[2 * 32 * (32 + 1)];
__shared__ float tile_b[2 * 32 * (32 + 1)];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float buffer_a[4], buffer_b[4];
#pragma unroll
for (int i = 0; i < 4; ++ i) {
buffer_a[i] = matrix_a[
load_a
+ ((trans_a ? 0 : m) + (tid / 32 + i * 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + (tid % 32))
];
buffer_b[i] = matrix_b[
load_b
+ ((trans_b ? n : 0) + (tid / 32 + i * 8)) * (trans_b ? size_k : size_m)
+ ((trans_b ? 0 : n) + (tid % 32))
];
}
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 32) {
int page = k / 32 % 2;
int next_k = k + 32;
/******** Commit the prefetched buffers to the shared memory ********/
#pragma unroll
for (int i = 0; i < 4; ++ i) {
tile_a[page * 32 * (32 + 1) + (trans_a ? tid % 32 : tid / 32 + i * 8) * (32 + 1) + (trans_a ? tid / 32 + i * 8 : tid % 32)] = buffer_a[i];
tile_b[page * 32 * (32 + 1) + (trans_b ? tid / 32 + i * 8 : tid % 32) * (32 + 1) + (trans_b ? tid % 32 : tid / 32 + i * 8)] = buffer_b[i];
}
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
#pragma unroll
for (int i = 0; i < 4; ++ i) {
buffer_a[i] = matrix_a[
load_a
+ ((trans_a ? next_k : m) + (tid / 32 + i * 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + (tid % 32))
];
buffer_b[i] = matrix_b[
load_b
+ ((trans_b ? n : next_k) + (tid / 32 + i * 8)) * (trans_b ? size_k : size_m)
+ ((trans_b ? next_k : n) + (tid % 32))
];
}
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 32; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[page * 32 * (32 + 1) + (tid / 16 * 2 + 0) * (32 + 1) + i];
local_a[1] = tile_a[page * 32 * (32 + 1) + (tid / 16 * 2 + 1) * (32 + 1) + i];
local_b[0] = tile_b[page * 32 * (32 + 1) + (tid % 16 * 2 + 0) * (32 + 1) + i];
local_b[1] = tile_b[page * 32 * (32 + 1) + (tid % 16 * 2 + 1) * (32 + 1) + i];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_32x32_TILING_NO_BUFFERING
template <bool trans_a, bool trans_b>
__global__ void sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
//bool trans_a, bool trans_b
) {
/******** Define shared memory ********/
__shared__ float tile_a[32 * (32 + 1)];
__shared__ float tile_b[32 * (32 + 1)];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float buffer_a[4], buffer_b[4];
#pragma unroll
for (int i = 0; i < 4; ++ i) {
buffer_a[i] = matrix_a[
load_a
+ ((trans_a ? 0 : m) + (tid / 32 + i * 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + (tid % 32))
];
buffer_b[i] = matrix_b[
load_b
+ ((trans_b ? n : 0) + (tid / 32 + i * 8)) * (trans_b ? size_k : size_m)
+ ((trans_b ? 0 : n) + (tid % 32))
];
}
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 32) {
int next_k = k + 32;
/******** Commit the prefetched buffers to the shared memory ********/
__syncthreads();
#pragma unroll
for (int i = 0; i < 4; ++ i) {
tile_a[(trans_a ? tid % 32 : tid / 32 + i * 8) * (32 + 1) + (trans_a ? tid / 32 + i * 8 : tid % 32)] = buffer_a[i];
tile_b[(trans_b ? tid / 32 + i * 8 : tid % 32) * (32 + 1) + (trans_b ? tid % 32 : tid / 32 + i * 8)] = buffer_b[i];
}
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
#pragma unroll
for (int i = 0; i < 4; ++ i) {
buffer_a[i] = matrix_a[
load_a
+ ((trans_a ? next_k : m) + (tid / 32 + i * 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + (tid % 32))
];
buffer_b[i] = matrix_b[
load_b
+ ((trans_b ? n : next_k) + (tid / 32 + i * 8)) * (trans_b ? size_k : size_m)
+ ((trans_b ? next_k : n) + (tid % 32))
];
}
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 32; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[(tid / 16 * 2 + 0) * (32 + 1) + i];
local_a[1] = tile_a[(tid / 16 * 2 + 1) * (32 + 1) + i];
local_b[0] = tile_b[(tid % 16 * 2 + 0) * (32 + 1) + i];
local_b[1] = tile_b[(tid % 16 * 2 + 1) * (32 + 1) + i];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_32x32_TILING_FUSED_COPY
template <bool trans_a, bool trans_b>
__global__ void sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
//bool trans_a, bool trans_b
) {
/******** Define shared memory ********/
__shared__ float tile_a[32 * (32 + 1)];
__shared__ float tile_b[32 * (32 + 1)];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
int i = tid / 8;
int j = tid % 8 * 4;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float4 buffer_a, buffer_b;
buffer_a = *(float4 *) &matrix_a[
load_a
+ ((trans_a ? 0 : m) + i) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + j)
];
buffer_b = *(float4 *) &matrix_b[
load_b
+ ((trans_a ? n : 0) + i) * (trans_a ? size_k : size_n)
+ ((trans_a ? 0 : n) + j)
];
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 32) {
int next_k = k + 32;
/******** Commit the prefetched buffers to the shared memory ********/
__syncthreads();
tile_a[(trans_a ? j + 0 : i) * (32 + 1) + (trans_a ? i : j + 0)] = buffer_a.x;
tile_a[(trans_a ? j + 1 : i) * (32 + 1) + (trans_a ? i : j + 1)] = buffer_a.y;
tile_a[(trans_a ? j + 2 : i) * (32 + 1) + (trans_a ? i : j + 2)] = buffer_a.z;
tile_a[(trans_a ? j + 3 : i) * (32 + 1) + (trans_a ? i : j + 3)] = buffer_a.w;
tile_b[(trans_a ? i : j + 0) * (32 + 1) + (trans_a ? j + 0 : i)] = buffer_b.x;
tile_b[(trans_a ? i : j + 1) * (32 + 1) + (trans_a ? j + 1 : i)] = buffer_b.y;
tile_b[(trans_a ? i : j + 2) * (32 + 1) + (trans_a ? j + 2 : i)] = buffer_b.z;
tile_b[(trans_a ? i : j + 3) * (32 + 1) + (trans_a ? j + 3 : i)] = buffer_b.w;
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
buffer_a = *(float4 *) &matrix_a[
load_a
+ ((trans_a ? next_k : m) + tid / 8) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + tid % 8 * 4)
];
buffer_b = *(float4 *) &matrix_b[
load_b
+ ((trans_a ? n : next_k) + tid / 8) * (trans_a ? size_k : size_n)
+ ((trans_a ? next_k : n) + tid % 8 * 4)
];
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 32; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[(tid / 16 * 2 + 0) * (32 + 1) + i];
local_a[1] = tile_a[(tid / 16 * 2 + 1) * (32 + 1) + i];
local_b[0] = tile_b[(tid % 16 * 2 + 0) * (32 + 1) + i];
local_b[1] = tile_b[(tid % 16 * 2 + 1) * (32 + 1) + i];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_VERY_OPTIMIZED_KERNEL
template <bool tr_a, bool tr_b>
__global__ void sparse_matmul_sdd_32x32x32_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, int num_blocks,
int size_m, int size_n, int size_k
) {
float accum[2][2] = { 0 };
float4 buffer_a, buffer_b;
__shared__ float shared_a[32 * 33], shared_b[32 * 33];
// Fetch current block and get corresponding row and column positions.
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
// Get an offset of each matrix and calculate mapping indices.
int stride_a = tr_a ? size_m : size_k;
int stride_b = tr_b ? size_k : size_n;
int offset_a = blockIdx.y * size_m * size_k + (tr_a ? m : m * size_k);
int offset_b = blockIdx.y * size_k * size_n + (tr_b ? n * size_k : n);
int offset_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
int p = threadIdx.x / 8;
int q = threadIdx.x % 8 * 4;
int r = threadIdx.x / 16 * 2;
int s = threadIdx.x % 16 * 2;
// Prefetch first tiles from matrices in global memory.
buffer_a = *(float4 *) &matrix_a[offset_a + p * stride_a + q];
buffer_b = *(float4 *) &matrix_b[offset_b + p * stride_b + q];
#pragma unroll 1
for (int k = 32; k <= size_k; k += 32) {
// Commit the prefetched tiles to the shared memory storage.
__syncthreads();
shared_a[tr_a ? ((q + 0) * 33 + p) : (p * 33 + (q + 0))] = buffer_a.x;
shared_a[tr_a ? ((q + 1) * 33 + p) : (p * 33 + (q + 1))] = buffer_a.y;
shared_a[tr_a ? ((q + 2) * 33 + p) : (p * 33 + (q + 2))] = buffer_a.z;
shared_a[tr_a ? ((q + 3) * 33 + p) : (p * 33 + (q + 3))] = buffer_a.w;
shared_b[tr_b ? (p * 33 + (q + 0)) : ((q + 0) * 33 + p)] = buffer_b.x;
shared_b[tr_b ? (p * 33 + (q + 1)) : ((q + 1) * 33 + p)] = buffer_b.y;
shared_b[tr_b ? (p * 33 + (q + 2)) : ((q + 2) * 33 + p)] = buffer_b.z;
shared_b[tr_b ? (p * 33 + (q + 3)) : ((q + 3) * 33 + p)] = buffer_b.w;
__syncthreads();
// Prefetch next tiles from matrices in global memory.
if (k < size_k) {
buffer_a = *(float4 *) &matrix_a[offset_a + (tr_a ? k * size_m : k)
+ p * stride_a + q];
buffer_b = *(float4 *) &matrix_b[offset_b + (tr_b ? k : k * size_n)
+ p * stride_b + q];
}
// Accumulate the tiled matrix multiplications by loading sliced vectors
// from the shared memory to local register file.
#pragma unroll
for (int i = 0; i < 32; ++ i) {
float reg_a[2], reg_b[2];
reg_a[0] = shared_a[(r + 0) * 33 + i];
reg_a[1] = shared_a[(r + 1) * 33 + i];
reg_b[0] = shared_b[(s + 0) * 33 + i];
reg_b[1] = shared_b[(s + 1) * 33 + i];
accum[0][0] += reg_a[0] * reg_b[0];
accum[0][1] += reg_a[0] * reg_b[1];
accum[1][0] += reg_a[1] * reg_b[0];
accum[1][1] += reg_a[1] * reg_b[1];
}
}
// Write the accumulated results to the output matrix.
matrix_c[offset_c + (r + 0) * 32 + (s + 0)] = accum[0][0];
matrix_c[offset_c + (r + 0) * 32 + (s + 1)] = accum[0][1];
matrix_c[offset_c + (r + 1) * 32 + (s + 0)] = accum[1][0];
matrix_c[offset_c + (r + 1) * 32 + (s + 1)] = accum[1][1];
}
#endif
torch::Tensor sparse_matmul(
torch::Tensor a, torch::Tensor b, const std::string& mode,
const layout_tensors& row_layout, const layout_tensors& col_layout,
bool trans_a, bool trans_b
) {
// Select current sparse layout by the given sparse mode.
auto layout = (mode == "sdd"
|| mode == "dsd" && !trans_a
|| mode == "dds" && trans_b) ? row_layout : col_layout;
uint num_blocks = std::get<0>(layout).size(0) / 2;
uint sparse_width = (std::get<1>(layout).size(0) - 1) * 32;
// Get the dimension sizes from the tensors.
uint size_m = mode.at(1) == 'd' ? a.size(trans_a ? -1 : -2) : sparse_width;
uint size_n = mode.at(2) == 'd' ? b.size(trans_b ? -2 : -1) : sparse_width;
uint size_k = mode.at(2) == 'd' ? b.size(trans_b ? -1 : -2)
: a.size(trans_a ? -2 : -1);
// Construct output tensor shape with preserving multiple batch dimensions.
auto dense = mode.at(1) == 'd' ? a : b;
auto shape = dense.sizes().slice(0, dense.dim() - 2).vec();
if (mode.at(0) == 'd') shape.insert(shape.end(), { size_m, size_n });
else shape.insert(shape.end(), { num_blocks, 32, 32 });
// Merge the batch dimensions to one.
a = a.flatten(0, mode.at(1) == 'd' ? -3 : -4);
b = b.flatten(0, mode.at(2) == 'd' ? -3 : -4);
uint num_batches = a.size(0);
// Create an empty output tensor to store the multiplication result.
torch::Tensor c;
if (mode.at(0) == 'd') c = a.new_empty({ num_batches, size_m, size_n });
else c = a.new_empty({ num_batches, num_blocks, 32, 32 });
// Launch CUDA kernel with corresponding sparse mode and dimension sizes.
dim3 blocks;
if (mode == "sdd") blocks = dim3(num_blocks, num_batches);
else blocks = dim3(num_batches,
(size_m + 32 - 1) / 32, (size_n + 32 - 1) / 32);
auto kernel = mode == "sdd" ? sparse_matmul_sdd_32x32x32_kernel<false, false> :
mode == "dsd" ? sparse_matmul_sdd_32x32x32_kernel<false, false> :
sparse_matmul_sdd_32x32x32_kernel<false, false>;
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(256), 0, 0, //tile<float, 32, 8>::THREADS>>>(
a.data_ptr<float>(), b.data_ptr<float>(), c.data_ptr<float>(),
layout, num_blocks, size_m, size_n, size_k //,
//trans_a, trans_b
);
// Return the output tensor with multiple batch dimensions.
return c.reshape(shape);
}
| a226c381b555235ee1101bd9209ed686b2c12e83.cu | #include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <device_functions.h>
#include <device_launch_parameters.h>
#include <torch/extension.h>
#include "sparse_kernels.h"
#include "sparse_layout.cuh"
#define USE_VERY_OPTIMIZED_KERNEL
/**
* Compute sparse matrix multiplication with SDD mode.
*
* It multiplies a dense matrix with other dense matrix and create a new sparse
* matrix through corresponding sparse layout.
*
* Blocks : (Sparse Blocks, Total Batches)
* Threads per Block : 256
*/
#ifdef USE_32x8_TILING
template <bool trans_a, bool trans_b>
__global__ void __launch_bounds__(256, 8) sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
) {
/******** Define shared memory ********/
constexpr int TILE_SIZE = 32 * 8;
constexpr int PADDING = 8;
__shared__ float tile_a[2][(TILE_SIZE + PADDING + 32 - 1) / 32 * 32];
__shared__ float tile_b[2][(TILE_SIZE + PADDING + 32 - 1) / 32 * 32];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float buffer_a = matrix_a[
load_a
+ ((trans_a ? 0 : m) + (trans_a ? tid / 32 : tid / 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + (trans_a ? tid % 32 : tid % 8))
];
float buffer_b = matrix_b[
load_b
+ ((trans_b ? n : 0) + (trans_b ? tid / 8 : tid / 32)) * (trans_b ? size_k : size_m)
+ ((trans_b ? 0 : n) + (trans_b ? tid % 8 : tid % 32))
];
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 8) {
int page = k / 8 % 2;
int next_k = k + 8;
/******** Commit the prefetched buffers to the shared memory ********/
tile_a[page][(trans_a ? tid % 32 : tid / 8) * 8 + (trans_a ? tid / 32 : tid % 8) + (trans_a ? tid % 32 / 4 : tid / 32)] = buffer_a;
tile_b[page][(trans_b ? tid / 8 : tid % 32) * 8 + (trans_b ? tid % 8 : tid / 32) + (trans_b ? tid / 32 : tid % 32 / 4)] = buffer_b;
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
buffer_a = matrix_a[
load_a
+ ((trans_a ? next_k : m) + (trans_a ? tid / 32 : tid / 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + (trans_a ? tid % 32 : tid % 8))
];
buffer_b = matrix_b[
load_b
+ ((trans_b ? n : next_k) + (trans_b ? tid / 8 : tid / 32)) * (trans_b ? size_k : size_m)
+ ((trans_b ? next_k : n) + (trans_b ? tid % 8 : tid % 32))
];
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 8; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[page][(tid / 16 * 2 + 0) * 8 + i + (tid / 32)];
local_a[1] = tile_a[page][(tid / 16 * 2 + 1) * 8 + i + (tid / 32)];
local_b[0] = tile_b[page][(tid % 16 * 2 + 0) * 8 + i + (tid % 16 / 2)];
local_b[1] = tile_b[page][(tid % 16 * 2 + 1) * 8 + i + (tid % 16 / 2)];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_8x32_TILING
template <bool trans_a, bool trans_b>
__global__ void __launch_bounds__(256, 8) sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
) {
/******** Define shared memory ********/
constexpr int TILE_SIZE = 8 * 32;
constexpr int PADDING = 8;
__shared__ float tile_a[2][(TILE_SIZE + PADDING + 32 - 1) / 32 * 32];
__shared__ float tile_b[2][(TILE_SIZE + PADDING + 32 - 1) / 32 * 32];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float buffer_a = matrix_a[
load_a
+ ((trans_a ? 0 : m) + (trans_a ? tid / 32 : tid / 8 % 4 * 8 + tid / 32)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + (trans_a ? tid % 32 : tid % 8))
];
float buffer_b = matrix_b[
load_b
+ ((trans_b ? n : 0) + (trans_b ? tid / 8 % 4 * 8 + tid / 32 : tid / 32)) * (trans_b ? size_k : size_m)
+ ((trans_b ? 0 : n) + (trans_b ? tid % 8 : tid % 32))
];
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 8) {
int page = k / 8 % 2;
int next_k = k + 8;
/******** Commit the prefetched buffers to the shared memory ********/
tile_a[page][(trans_a ? tid / 32 : tid % 8) * (32 + 1) + (trans_a ? tid % 32 : tid / 8 % 4 * 8 + tid / 32)] = buffer_a;
tile_b[page][(trans_b ? tid % 8 : tid / 32) * (32 + 1) + (trans_b ? tid / 8 % 4 * 8 + tid / 32 : tid % 32)] = buffer_b;
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
buffer_a = matrix_a[
load_a
+ ((trans_a ? next_k : m) + (trans_a ? tid / 32 : tid / 8 % 4 * 8 + tid / 32)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + (trans_a ? tid % 32 : tid % 8))
];
buffer_b = matrix_b[
load_b
+ ((trans_b ? n : next_k) + (trans_b ? tid / 8 % 4 * 8 + tid / 32 : tid / 32)) * (trans_b ? size_k : size_m)
+ ((trans_b ? next_k : n) + (trans_b ? tid % 8 : tid % 32))
];
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 8; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[page][i * (32 + 1) + (tid / 16 * 2 + 0)];
local_a[1] = tile_a[page][i * (32 + 1) + (tid / 16 * 2 + 1)];
local_b[0] = tile_b[page][i * (32 + 1) + (tid % 16 * 2 + 0)];
local_b[1] = tile_b[page][i * (32 + 1) + (tid % 16 * 2 + 1)];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_32x32_TILING
template <bool trans_a, bool trans_b>
__global__ void __launch_bounds__(256, 3) sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
//bool trans_a, bool trans_b
) {
/******** Define shared memory ********/
__shared__ float tile_a[2 * 32 * (32 + 1)];
__shared__ float tile_b[2 * 32 * (32 + 1)];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float buffer_a[4], buffer_b[4];
#pragma unroll
for (int i = 0; i < 4; ++ i) {
buffer_a[i] = matrix_a[
load_a
+ ((trans_a ? 0 : m) + (tid / 32 + i * 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + (tid % 32))
];
buffer_b[i] = matrix_b[
load_b
+ ((trans_b ? n : 0) + (tid / 32 + i * 8)) * (trans_b ? size_k : size_m)
+ ((trans_b ? 0 : n) + (tid % 32))
];
}
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 32) {
int page = k / 32 % 2;
int next_k = k + 32;
/******** Commit the prefetched buffers to the shared memory ********/
#pragma unroll
for (int i = 0; i < 4; ++ i) {
tile_a[page * 32 * (32 + 1) + (trans_a ? tid % 32 : tid / 32 + i * 8) * (32 + 1) + (trans_a ? tid / 32 + i * 8 : tid % 32)] = buffer_a[i];
tile_b[page * 32 * (32 + 1) + (trans_b ? tid / 32 + i * 8 : tid % 32) * (32 + 1) + (trans_b ? tid % 32 : tid / 32 + i * 8)] = buffer_b[i];
}
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
#pragma unroll
for (int i = 0; i < 4; ++ i) {
buffer_a[i] = matrix_a[
load_a
+ ((trans_a ? next_k : m) + (tid / 32 + i * 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + (tid % 32))
];
buffer_b[i] = matrix_b[
load_b
+ ((trans_b ? n : next_k) + (tid / 32 + i * 8)) * (trans_b ? size_k : size_m)
+ ((trans_b ? next_k : n) + (tid % 32))
];
}
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 32; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[page * 32 * (32 + 1) + (tid / 16 * 2 + 0) * (32 + 1) + i];
local_a[1] = tile_a[page * 32 * (32 + 1) + (tid / 16 * 2 + 1) * (32 + 1) + i];
local_b[0] = tile_b[page * 32 * (32 + 1) + (tid % 16 * 2 + 0) * (32 + 1) + i];
local_b[1] = tile_b[page * 32 * (32 + 1) + (tid % 16 * 2 + 1) * (32 + 1) + i];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_32x32_TILING_NO_BUFFERING
template <bool trans_a, bool trans_b>
__global__ void sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
//bool trans_a, bool trans_b
) {
/******** Define shared memory ********/
__shared__ float tile_a[32 * (32 + 1)];
__shared__ float tile_b[32 * (32 + 1)];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float buffer_a[4], buffer_b[4];
#pragma unroll
for (int i = 0; i < 4; ++ i) {
buffer_a[i] = matrix_a[
load_a
+ ((trans_a ? 0 : m) + (tid / 32 + i * 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + (tid % 32))
];
buffer_b[i] = matrix_b[
load_b
+ ((trans_b ? n : 0) + (tid / 32 + i * 8)) * (trans_b ? size_k : size_m)
+ ((trans_b ? 0 : n) + (tid % 32))
];
}
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 32) {
int next_k = k + 32;
/******** Commit the prefetched buffers to the shared memory ********/
__syncthreads();
#pragma unroll
for (int i = 0; i < 4; ++ i) {
tile_a[(trans_a ? tid % 32 : tid / 32 + i * 8) * (32 + 1) + (trans_a ? tid / 32 + i * 8 : tid % 32)] = buffer_a[i];
tile_b[(trans_b ? tid / 32 + i * 8 : tid % 32) * (32 + 1) + (trans_b ? tid % 32 : tid / 32 + i * 8)] = buffer_b[i];
}
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
#pragma unroll
for (int i = 0; i < 4; ++ i) {
buffer_a[i] = matrix_a[
load_a
+ ((trans_a ? next_k : m) + (tid / 32 + i * 8)) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + (tid % 32))
];
buffer_b[i] = matrix_b[
load_b
+ ((trans_b ? n : next_k) + (tid / 32 + i * 8)) * (trans_b ? size_k : size_m)
+ ((trans_b ? next_k : n) + (tid % 32))
];
}
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 32; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[(tid / 16 * 2 + 0) * (32 + 1) + i];
local_a[1] = tile_a[(tid / 16 * 2 + 1) * (32 + 1) + i];
local_b[0] = tile_b[(tid % 16 * 2 + 0) * (32 + 1) + i];
local_b[1] = tile_b[(tid % 16 * 2 + 1) * (32 + 1) + i];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_32x32_TILING_FUSED_COPY
template <bool trans_a, bool trans_b>
__global__ void sparse_matmul_sdd_32x32x8_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, uint num_blocks,
uint size_m, uint size_n, uint size_k
//bool trans_a, bool trans_b
) {
/******** Define shared memory ********/
__shared__ float tile_a[32 * (32 + 1)];
__shared__ float tile_b[32 * (32 + 1)];
/******** Fetch sparse block descriptor ********/
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
/******** Define accumulator and warp informations ********/
float accum[2][2] = { { 0.0f, 0.0f }, { 0.0f, 0.0f } };
int tid = threadIdx.x;
int i = tid / 8;
int j = tid % 8 * 4;
/******** Prefetch first tiles ********/
int load_a = blockIdx.y * size_m * size_k;
int load_b = blockIdx.y * size_k * size_n;
float4 buffer_a, buffer_b;
buffer_a = *(float4 *) &matrix_a[
load_a
+ ((trans_a ? 0 : m) + i) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : 0) + j)
];
buffer_b = *(float4 *) &matrix_b[
load_b
+ ((trans_a ? n : 0) + i) * (trans_a ? size_k : size_n)
+ ((trans_a ? 0 : n) + j)
];
/******** Iterate over k-dim ********/
#pragma unroll 1
for (int k = 0; k < size_k; k += 32) {
int next_k = k + 32;
/******** Commit the prefetched buffers to the shared memory ********/
__syncthreads();
tile_a[(trans_a ? j + 0 : i) * (32 + 1) + (trans_a ? i : j + 0)] = buffer_a.x;
tile_a[(trans_a ? j + 1 : i) * (32 + 1) + (trans_a ? i : j + 1)] = buffer_a.y;
tile_a[(trans_a ? j + 2 : i) * (32 + 1) + (trans_a ? i : j + 2)] = buffer_a.z;
tile_a[(trans_a ? j + 3 : i) * (32 + 1) + (trans_a ? i : j + 3)] = buffer_a.w;
tile_b[(trans_a ? i : j + 0) * (32 + 1) + (trans_a ? j + 0 : i)] = buffer_b.x;
tile_b[(trans_a ? i : j + 1) * (32 + 1) + (trans_a ? j + 1 : i)] = buffer_b.y;
tile_b[(trans_a ? i : j + 2) * (32 + 1) + (trans_a ? j + 2 : i)] = buffer_b.z;
tile_b[(trans_a ? i : j + 3) * (32 + 1) + (trans_a ? j + 3 : i)] = buffer_b.w;
__syncthreads();
/******** Prefetch next tiles if available ********/
if (next_k < size_k) {
buffer_a = *(float4 *) &matrix_a[
load_a
+ ((trans_a ? next_k : m) + tid / 8) * (trans_a ? size_m : size_k)
+ ((trans_a ? m : next_k) + tid % 8 * 4)
];
buffer_b = *(float4 *) &matrix_b[
load_b
+ ((trans_a ? n : next_k) + tid / 8) * (trans_a ? size_k : size_n)
+ ((trans_a ? next_k : n) + tid % 8 * 4)
];
}
/******** Accumulate tile matmul by using register file ********/
#pragma unroll
for (int i = 0; i < 32; ++ i) {
float local_a[2], local_b[2];
local_a[0] = tile_a[(tid / 16 * 2 + 0) * (32 + 1) + i];
local_a[1] = tile_a[(tid / 16 * 2 + 1) * (32 + 1) + i];
local_b[0] = tile_b[(tid % 16 * 2 + 0) * (32 + 1) + i];
local_b[1] = tile_b[(tid % 16 * 2 + 1) * (32 + 1) + i];
accum[0][0] += local_a[0] * local_b[0];
accum[0][1] += local_a[0] * local_b[1];
accum[1][0] += local_a[1] * local_b[0];
accum[1][1] += local_a[1] * local_b[1];
}
}
/******** Apply accumulation to output matrix ********/
int load_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 0)] = accum[0][0];
matrix_c[load_c + (tid / 16 * 2 + 0) * 32 + (tid % 16 * 2 + 1)] = accum[0][1];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 0)] = accum[1][0];
matrix_c[load_c + (tid / 16 * 2 + 1) * 32 + (tid % 16 * 2 + 1)] = accum[1][1];
}
#endif
#ifdef USE_VERY_OPTIMIZED_KERNEL
template <bool tr_a, bool tr_b>
__global__ void sparse_matmul_sdd_32x32x32_kernel(
const float* __restrict__ matrix_a,
const float* __restrict__ matrix_b,
float* __restrict__ matrix_c,
sparse_layout layout, int num_blocks,
int size_m, int size_n, int size_k
) {
float accum[2][2] = { 0 };
float4 buffer_a, buffer_b;
__shared__ float shared_a[32 * 33], shared_b[32 * 33];
// Fetch current block and get corresponding row and column positions.
auto block = layout.get(blockIdx.x);
int m = block.row() * 32;
int n = block.col() * 32;
// Get an offset of each matrix and calculate mapping indices.
int stride_a = tr_a ? size_m : size_k;
int stride_b = tr_b ? size_k : size_n;
int offset_a = blockIdx.y * size_m * size_k + (tr_a ? m : m * size_k);
int offset_b = blockIdx.y * size_k * size_n + (tr_b ? n * size_k : n);
int offset_c = (blockIdx.y * num_blocks + block.idx()) * 32 * 32;
int p = threadIdx.x / 8;
int q = threadIdx.x % 8 * 4;
int r = threadIdx.x / 16 * 2;
int s = threadIdx.x % 16 * 2;
// Prefetch first tiles from matrices in global memory.
buffer_a = *(float4 *) &matrix_a[offset_a + p * stride_a + q];
buffer_b = *(float4 *) &matrix_b[offset_b + p * stride_b + q];
#pragma unroll 1
for (int k = 32; k <= size_k; k += 32) {
// Commit the prefetched tiles to the shared memory storage.
__syncthreads();
shared_a[tr_a ? ((q + 0) * 33 + p) : (p * 33 + (q + 0))] = buffer_a.x;
shared_a[tr_a ? ((q + 1) * 33 + p) : (p * 33 + (q + 1))] = buffer_a.y;
shared_a[tr_a ? ((q + 2) * 33 + p) : (p * 33 + (q + 2))] = buffer_a.z;
shared_a[tr_a ? ((q + 3) * 33 + p) : (p * 33 + (q + 3))] = buffer_a.w;
shared_b[tr_b ? (p * 33 + (q + 0)) : ((q + 0) * 33 + p)] = buffer_b.x;
shared_b[tr_b ? (p * 33 + (q + 1)) : ((q + 1) * 33 + p)] = buffer_b.y;
shared_b[tr_b ? (p * 33 + (q + 2)) : ((q + 2) * 33 + p)] = buffer_b.z;
shared_b[tr_b ? (p * 33 + (q + 3)) : ((q + 3) * 33 + p)] = buffer_b.w;
__syncthreads();
// Prefetch next tiles from matrices in global memory.
if (k < size_k) {
buffer_a = *(float4 *) &matrix_a[offset_a + (tr_a ? k * size_m : k)
+ p * stride_a + q];
buffer_b = *(float4 *) &matrix_b[offset_b + (tr_b ? k : k * size_n)
+ p * stride_b + q];
}
// Accumulate the tiled matrix multiplications by loading sliced vectors
// from the shared memory to local register file.
#pragma unroll
for (int i = 0; i < 32; ++ i) {
float reg_a[2], reg_b[2];
reg_a[0] = shared_a[(r + 0) * 33 + i];
reg_a[1] = shared_a[(r + 1) * 33 + i];
reg_b[0] = shared_b[(s + 0) * 33 + i];
reg_b[1] = shared_b[(s + 1) * 33 + i];
accum[0][0] += reg_a[0] * reg_b[0];
accum[0][1] += reg_a[0] * reg_b[1];
accum[1][0] += reg_a[1] * reg_b[0];
accum[1][1] += reg_a[1] * reg_b[1];
}
}
// Write the accumulated results to the output matrix.
matrix_c[offset_c + (r + 0) * 32 + (s + 0)] = accum[0][0];
matrix_c[offset_c + (r + 0) * 32 + (s + 1)] = accum[0][1];
matrix_c[offset_c + (r + 1) * 32 + (s + 0)] = accum[1][0];
matrix_c[offset_c + (r + 1) * 32 + (s + 1)] = accum[1][1];
}
#endif
torch::Tensor sparse_matmul(
torch::Tensor a, torch::Tensor b, const std::string& mode,
const layout_tensors& row_layout, const layout_tensors& col_layout,
bool trans_a, bool trans_b
) {
// Select current sparse layout by the given sparse mode.
auto layout = (mode == "sdd"
|| mode == "dsd" && !trans_a
|| mode == "dds" && trans_b) ? row_layout : col_layout;
uint num_blocks = std::get<0>(layout).size(0) / 2;
uint sparse_width = (std::get<1>(layout).size(0) - 1) * 32;
// Get the dimension sizes from the tensors.
uint size_m = mode.at(1) == 'd' ? a.size(trans_a ? -1 : -2) : sparse_width;
uint size_n = mode.at(2) == 'd' ? b.size(trans_b ? -2 : -1) : sparse_width;
uint size_k = mode.at(2) == 'd' ? b.size(trans_b ? -1 : -2)
: a.size(trans_a ? -2 : -1);
// Construct output tensor shape with preserving multiple batch dimensions.
auto dense = mode.at(1) == 'd' ? a : b;
auto shape = dense.sizes().slice(0, dense.dim() - 2).vec();
if (mode.at(0) == 'd') shape.insert(shape.end(), { size_m, size_n });
else shape.insert(shape.end(), { num_blocks, 32, 32 });
// Merge the batch dimensions to one.
a = a.flatten(0, mode.at(1) == 'd' ? -3 : -4);
b = b.flatten(0, mode.at(2) == 'd' ? -3 : -4);
uint num_batches = a.size(0);
// Create an empty output tensor to store the multiplication result.
torch::Tensor c;
if (mode.at(0) == 'd') c = a.new_empty({ num_batches, size_m, size_n });
else c = a.new_empty({ num_batches, num_blocks, 32, 32 });
// Launch CUDA kernel with corresponding sparse mode and dimension sizes.
dim3 blocks;
if (mode == "sdd") blocks = dim3(num_blocks, num_batches);
else blocks = dim3(num_batches,
(size_m + 32 - 1) / 32, (size_n + 32 - 1) / 32);
auto kernel = mode == "sdd" ? sparse_matmul_sdd_32x32x32_kernel<false, false> :
mode == "dsd" ? sparse_matmul_sdd_32x32x32_kernel<false, false> :
sparse_matmul_sdd_32x32x32_kernel<false, false>;
kernel<<<blocks, 256>>>( //tile<float, 32, 8>::THREADS>>>(
a.data_ptr<float>(), b.data_ptr<float>(), c.data_ptr<float>(),
layout, num_blocks, size_m, size_n, size_k //,
//trans_a, trans_b
);
// Return the output tensor with multiple batch dimensions.
return c.reshape(shape);
}
|
f2482b7ed8d3e213601444183cf33af356604dd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void NN_DownSampling( float *target, const float *source, const int wt, const int ht, const int ws, const int hs )
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = y*wt+x;
const int curs = (y*2)*ws+x*2;
if(y < ht and x < wt) {
target[curt*3+0] = source[curs*3+0];
target[curt*3+1] = source[curs*3+1];
target[curt*3+2] = source[curs*3+2];
}
} | f2482b7ed8d3e213601444183cf33af356604dd2.cu | #include "includes.h"
__global__ void NN_DownSampling( float *target, const float *source, const int wt, const int ht, const int ws, const int hs )
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = y*wt+x;
const int curs = (y*2)*ws+x*2;
if(y < ht and x < wt) {
target[curt*3+0] = source[curs*3+0];
target[curt*3+1] = source[curs*3+1];
target[curt*3+2] = source[curs*3+2];
}
} |
f71fc1810de1893570b00df69396d98c52c106cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdio.h>
#include <memory.h>
#include <time.h>
#include "lodepng.h"
//kernel dimentions x and y
const int ker_x_dim = 5;
const int ker_y_dim = 5;
const double sigma = 3.0;
float h_kernel[(ker_x_dim * 2)*(ker_y_dim * 2)];
__constant__ float d_kernel[(ker_x_dim * 2)*(ker_y_dim * 2)];
// Function to generate gaussian kernel values and store in h_kernel array
void getGaussianKernel()
{
double temp = 0.0;
int r_i, r_j = 0;
for (int i = -ker_x_dim; i <= ker_x_dim; i++) {
r_i = i + ker_x_dim;
for (int j = -ker_y_dim; j <= ker_y_dim; j++) {
r_j = j + ker_y_dim;
temp = exp(-((i*i) + (j*j)) / (2 * (sigma*sigma)));
h_kernel[r_i*(ker_y_dim*2)+r_j] = temp / (2*M_PI*sigma*sigma);
}
}
printf("Kernel generated successfully\n");
}
__host__ __device__ int get1dIndex(int width, int height, int x, int y)
{
if (x < 0) x = 0;
if (x >= width) x = width - 1;
if (y < 0) y = 0;
if (y >= height) y = height - 1;
return y*width + x;
}
__global__ void runFilter(float* input, float* output, int width, int height) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float new_val = 0.0f;
int r_i, r_j = 0;
// check that thread aligns within the image dimentions
if (col < width && row < height) {
// run through the kernel matrix
for (int i = -ker_x_dim; i < ker_x_dim; i++) {
// get real kernel index
r_i = i + ker_x_dim;
for (int j = -ker_y_dim; j < ker_y_dim; j++) {
r_j = j + ker_y_dim;
// get index image index
int idx = get1dIndex(width, height, col + i, row + j);
// work out new value by multiplying kernel value by pixel value
new_val += d_kernel[r_i*(ker_y_dim * 2) + r_j] * input[idx];
}
}
// set new values to output array
output[get1dIndex(width, height, col, row)] = new_val;
}
}
void convolveImage(float* input, float* output, int width, int height)
{
float* d_input;
float* d_output;
// allocate memory in the devoce for the input and output pixel arrays
hipMalloc(&d_input, width*height * sizeof(float));
hipMalloc(&d_output, width*height * sizeof(float));
// copy the values of the arrays stored on the host to the arrays stored on the device
hipMemcpyToSymbol(d_kernel, h_kernel, sizeof(h_kernel));
hipMemcpy(d_input, input, width*height * sizeof(float), hipMemcpyHostToDevice);
// declare block and grid dimentions
dim3 blockDim(32, 32, 1);
dim3 gridDim(width / (blockDim.x) + 1, height / (blockDim.y) + 1);
printf("Image height: %d, width: %d\n", height, width);
// run the CUDA kernel
runFilter << <gridDim, blockDim >> >(d_input, d_output, width, height);
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
// if there's an error, display it
printf("Error: %s\n", hipGetErrorString(cudaStatus));
}
hipDeviceSynchronize();
// copy the output from the device to the host, ready for png output
hipMemcpy(output, d_output, width*height * sizeof(float), hipMemcpyDeviceToHost);
printf("%f", output[0]);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
// if there's an error, display it
printf("Error running kernel: %s\n", hipGetErrorString(cudaStatus));
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char** argv)
{
// declare image paths
const char* image_path = argv[1];
const char* output_path = "output.png";
std::vector<unsigned char> img_vect;
unsigned int width, height;
struct timespec start, finish;
long long int time_elapsed;
getGaussianKernel();
unsigned error = lodepng::decode(img_vect, width, height, image_path);
if (error) {
printf("decoder error: %d, %s", error, lodepng_error_text(error));
}
int image_size = width*height;
float* temp;
float* input;
float* output;
// allocate memory on the host for the image data
hipHostMalloc(&temp, (image_size*3) * sizeof(float));
hipHostMalloc(&input, (image_size) * sizeof(float));
hipHostMalloc(&output, (image_size) * sizeof(float));
int count = 0;
// getting rid of the apha channel as it is not needed
for (int i = 0; i < img_vect.size(); ++i) {
if ((i + 1) % 4 != 0) {
temp[count] = img_vect.at(i);
count++;
}
}
// generate grayscale by getting the mean of the RGB values and storing in one pixel value
for (int i = 0; i < image_size; i++) {
input[i] = (
temp[i * 3 + 0] +
temp[i * 3 + 1] +
temp[i * 3 + 2])/3;
}
//Start Timer
clock_gettime(CLOCK_MONOTONIC, &start);
// run the image convolution
convolveImage(input, output, width, height);
// end timer
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("\nTime elapsed was %lldns or %0.9lfs\n", time_elapsed,(time_elapsed/1.0e9));
// image vector for lodepng output
std::vector<unsigned char> out_image;
for (int i = 0; i < image_size; i++) {
out_image.push_back(output[i]);
out_image.push_back(output[i]);
out_image.push_back(output[i]);
out_image.push_back(255);
}
// output image vector using lodepng
error = lodepng::encode(output_path, out_image, width, height);
if (error) {
//if there's an error, display it
printf("lodepng error: %s\n", lodepng_error_text(error));
} else {
printf("output image generated: %s\n", output_path);
}
}
| f71fc1810de1893570b00df69396d98c52c106cb.cu | #include "cuda_runtime.h"
#include <cuda.h>
#include "device_launch_parameters.h"
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdio.h>
#include <memory.h>
#include <time.h>
#include "lodepng.h"
//kernel dimentions x and y
const int ker_x_dim = 5;
const int ker_y_dim = 5;
const double sigma = 3.0;
float h_kernel[(ker_x_dim * 2)*(ker_y_dim * 2)];
__constant__ float d_kernel[(ker_x_dim * 2)*(ker_y_dim * 2)];
// Function to generate gaussian kernel values and store in h_kernel array
void getGaussianKernel()
{
double temp = 0.0;
int r_i, r_j = 0;
for (int i = -ker_x_dim; i <= ker_x_dim; i++) {
r_i = i + ker_x_dim;
for (int j = -ker_y_dim; j <= ker_y_dim; j++) {
r_j = j + ker_y_dim;
temp = exp(-((i*i) + (j*j)) / (2 * (sigma*sigma)));
h_kernel[r_i*(ker_y_dim*2)+r_j] = temp / (2*M_PI*sigma*sigma);
}
}
printf("Kernel generated successfully\n");
}
__host__ __device__ int get1dIndex(int width, int height, int x, int y)
{
if (x < 0) x = 0;
if (x >= width) x = width - 1;
if (y < 0) y = 0;
if (y >= height) y = height - 1;
return y*width + x;
}
__global__ void runFilter(float* input, float* output, int width, int height) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float new_val = 0.0f;
int r_i, r_j = 0;
// check that thread aligns within the image dimentions
if (col < width && row < height) {
// run through the kernel matrix
for (int i = -ker_x_dim; i < ker_x_dim; i++) {
// get real kernel index
r_i = i + ker_x_dim;
for (int j = -ker_y_dim; j < ker_y_dim; j++) {
r_j = j + ker_y_dim;
// get index image index
int idx = get1dIndex(width, height, col + i, row + j);
// work out new value by multiplying kernel value by pixel value
new_val += d_kernel[r_i*(ker_y_dim * 2) + r_j] * input[idx];
}
}
// set new values to output array
output[get1dIndex(width, height, col, row)] = new_val;
}
}
void convolveImage(float* input, float* output, int width, int height)
{
float* d_input;
float* d_output;
// allocate memory in the devoce for the input and output pixel arrays
cudaMalloc(&d_input, width*height * sizeof(float));
cudaMalloc(&d_output, width*height * sizeof(float));
// copy the values of the arrays stored on the host to the arrays stored on the device
cudaMemcpyToSymbol(d_kernel, h_kernel, sizeof(h_kernel));
cudaMemcpy(d_input, input, width*height * sizeof(float), cudaMemcpyHostToDevice);
// declare block and grid dimentions
dim3 blockDim(32, 32, 1);
dim3 gridDim(width / (blockDim.x) + 1, height / (blockDim.y) + 1);
printf("Image height: %d, width: %d\n", height, width);
// run the CUDA kernel
runFilter << <gridDim, blockDim >> >(d_input, d_output, width, height);
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
// if there's an error, display it
printf("Error: %s\n", cudaGetErrorString(cudaStatus));
}
cudaDeviceSynchronize();
// copy the output from the device to the host, ready for png output
cudaMemcpy(output, d_output, width*height * sizeof(float), cudaMemcpyDeviceToHost);
printf("%f", output[0]);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
// if there's an error, display it
printf("Error running kernel: %s\n", cudaGetErrorString(cudaStatus));
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char** argv)
{
// declare image paths
const char* image_path = argv[1];
const char* output_path = "output.png";
std::vector<unsigned char> img_vect;
unsigned int width, height;
struct timespec start, finish;
long long int time_elapsed;
getGaussianKernel();
unsigned error = lodepng::decode(img_vect, width, height, image_path);
if (error) {
printf("decoder error: %d, %s", error, lodepng_error_text(error));
}
int image_size = width*height;
float* temp;
float* input;
float* output;
// allocate memory on the host for the image data
cudaMallocHost(&temp, (image_size*3) * sizeof(float));
cudaMallocHost(&input, (image_size) * sizeof(float));
cudaMallocHost(&output, (image_size) * sizeof(float));
int count = 0;
// getting rid of the apha channel as it is not needed
for (int i = 0; i < img_vect.size(); ++i) {
if ((i + 1) % 4 != 0) {
temp[count] = img_vect.at(i);
count++;
}
}
// generate grayscale by getting the mean of the RGB values and storing in one pixel value
for (int i = 0; i < image_size; i++) {
input[i] = (
temp[i * 3 + 0] +
temp[i * 3 + 1] +
temp[i * 3 + 2])/3;
}
//Start Timer
clock_gettime(CLOCK_MONOTONIC, &start);
// run the image convolution
convolveImage(input, output, width, height);
// end timer
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("\nTime elapsed was %lldns or %0.9lfs\n", time_elapsed,(time_elapsed/1.0e9));
// image vector for lodepng output
std::vector<unsigned char> out_image;
for (int i = 0; i < image_size; i++) {
out_image.push_back(output[i]);
out_image.push_back(output[i]);
out_image.push_back(output[i]);
out_image.push_back(255);
}
// output image vector using lodepng
error = lodepng::encode(output_path, out_image, width, height);
if (error) {
//if there's an error, display it
printf("lodepng error: %s\n", lodepng_error_text(error));
} else {
printf("output image generated: %s\n", output_path);
}
}
|
a52b4664dc4759bb9fbf850d4f9fb14ab718fb46.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
__global__ void foo(float *A, int sz) {
__requires(sz == blockDim.x);
for(int i = 0; i < 100; i++) {
A[sz*i + threadIdx.x] *= 2.0f;
}
} | a52b4664dc4759bb9fbf850d4f9fb14ab718fb46.cu | #include <cuda.h>
__global__ void foo(float *A, int sz) {
__requires(sz == blockDim.x);
for(int i = 0; i < 100; i++) {
A[sz*i + threadIdx.x] *= 2.0f;
}
} |
3e2bd8be3546031a062c347bcb5dfd21123d7e34.hip | // !!! This is a file automatically generated by hipify!!!
// User: [email protected]
// ExecutionRequest[P:'extinguishing.cu',P:1,T:1,args:'',q:'cudalb']
// May 16 2019 19:51:26
#include "cputils.h" // Added by tablon
/*
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "cputils.h"
#include <hip/hip_runtime.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
/////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////KERNELS////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////
__global__ void gpuFunc_inicializar(float *S, float *SC, int rows, int columns)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
int i,j;
i = gid/columns;
j = gid%columns;
if(gid < (rows * columns)){
accessMat( S, i, j ) = 0.0;
accessMat( SC, i, j ) = 0.0;
}
}
__global__ void gpuFunc_actualizarCalor(float *S, float *SC, int rows, int columns)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
int i,j;
i = gid/columns;
j = gid%columns;
if(gid < (rows * columns)){
if(i != 0 && j != 0 && i < rows-1 && j < columns-1){
accessMat( S, i, j ) = (
accessMat( SC, i-1, j ) +
accessMat( SC, i+1, j ) +
accessMat( SC, i, j-1 ) +
accessMat( SC, i, j+1 ) ) / 4;
}
}
}
__global__ void gpuFunc_actualizarFocos(float *S, FocalPoint *focal, int num_focal, int columns)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
if(gid < num_focal){
if ( focal[gid].active == 1 ){
int x = focal[gid].x;
int y = focal[gid].y;
accessMat( S, x, y ) = focal[gid].heat;
}
}
}
__global__ void gpuFunc_globalResidual(float *cSurface, float *cSurfaceCopy, int rows, int columns, float *globalResidual)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
int i,j;
i = gid/columns;
j = gid%columns;
if(i<rows*columns){
if(fabs(accessMat(cSurfaceCopy,i,j) - accessMat(cSurface,i,j ) ) >= THRESHOLD){
*globalResidual =THRESHOLD;
}
}
}
__global__ void gpuFunc_movimiento(int num_teams, int num_focal, FocalPoint *focal, Team *teams)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
if(gid < num_teams){
int j,t;
t=gid;
/* 4.3.1. Choose nearest focal point */
float distance = FLT_MAX;
int target = -1;
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
int local_distance = (focal[j].x - teams[t].x)*(focal[j].x - teams[t].x) + (focal[j].y - teams[t].y)*(focal[j].y - teams[t].y);
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[t].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target != -1 ){
/* 4.3.4. Move in the focal point direction */
if ( teams[t].type == 1 ) {
// Type 1: Can move in diagonal
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
else if ( teams[t].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
else if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
else if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////s
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
hipSetDevice(0);
hipDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*
*/
int ITEMS = rows * columns;
float *cSurface, *cSurfaceCopy;
FocalPoint *cFocal;
Team *cTeams;
float *cGlobalResidual;
hipMalloc( (void**) &cSurface, sizeof(float) * (int) ITEMS);
hipMalloc( (void**) &cSurfaceCopy, sizeof(float) * (int) ITEMS);
hipMalloc( (void**) &cFocal, sizeof(FocalPoint) * (int) num_focal);
hipMalloc( (void**) &cTeams, sizeof(Team) * (int) num_teams);
hipMalloc( (void**) &cGlobalResidual, sizeof(float));
int bloqShape = 256;
int gridShape = ITEMS/bloqShape;
if(ITEMS%bloqShape){
gridShape = gridShape + 1;
}
/* 3. Initialize surface */
hipLaunchKernelGGL(( gpuFunc_inicializar), dim3(gridShape),dim3(bloqShape), 0, 0, cSurface,cSurfaceCopy,rows,columns);
/* 4. Simulation */
int iter;
int flag_stability = 0;
int first_activation = 0;
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
/* 4.1. Activate focal points */
int num_deactivated = 0;
for( i=0; i<num_focal; i++ ) {
if ( focal[i].start == iter ) {
focal[i].active = 1;
if ( ! first_activation ) first_activation = 1;
}
// Count focal points already deactivated by a team
if ( focal[i].active == 2 ) num_deactivated++;
}
hipMemcpy(cFocal,focal,sizeof(FocalPoint) * num_focal,hipMemcpyHostToDevice);
if ( ! first_activation )continue;
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual = 0.0f;
int step;
for( step=0; step<10; step++ ) {
if(step%2){
/* 4.2.1. Update heat on active focal points */
hipLaunchKernelGGL(( gpuFunc_actualizarFocos), dim3(gridShape),dim3(bloqShape), 0, 0, cSurfaceCopy,cFocal,num_focal,columns);
/* 4.2.3. Update surface values (skip borders) */
hipLaunchKernelGGL(( gpuFunc_actualizarCalor), dim3(gridShape),dim3(bloqShape), 0, 0, cSurface,cSurfaceCopy,rows,columns);
}else{
/* 4.2.1. Update heat on active focal points */
hipLaunchKernelGGL(( gpuFunc_actualizarFocos), dim3(gridShape),dim3(bloqShape), 0, 0, cSurface,cFocal,num_focal,columns);
/* 4.2.3. Update surface values (skip borders) */
hipLaunchKernelGGL(( gpuFunc_actualizarCalor), dim3(gridShape),dim3(bloqShape), 0, 0, cSurfaceCopy,cSurface,rows,columns);
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0){
if( num_deactivated == num_focal){
hipMemcpy(cGlobalResidual,&(global_residual),sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gpuFunc_globalResidual), dim3(gridShape),dim3(bloqShape), 0, 0, cSurface,cSurfaceCopy,rows,columns,cGlobalResidual);
hipMemcpy(&global_residual,cGlobalResidual,sizeof(float),hipMemcpyDeviceToHost);
}
}
}
}
hipMemcpy(surface, cSurface, sizeof(float)* ITEMS, hipMemcpyDeviceToHost);
hipMemcpy(focal,cFocal,sizeof(FocalPoint) * num_focal,hipMemcpyDeviceToHost);
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
if( num_deactivated == num_focal && global_residual < THRESHOLD ) flag_stability = 1;
if ( num_deactivated != num_focal ){
/* 4.3. Move teams */
hipMemcpy(cFocal,focal, sizeof(FocalPoint)* num_focal,hipMemcpyHostToDevice);
hipMemcpy(cTeams,teams, sizeof(Team)* num_teams,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gpuFunc_movimiento), dim3(gridShape),dim3(bloqShape), 0, 0, num_teams, num_focal, cFocal, cTeams);
hipMemcpy(focal,cFocal, sizeof(FocalPoint)*num_focal,hipMemcpyDeviceToHost);
hipMemcpy(teams,cTeams, sizeof(Team)*num_teams,hipMemcpyDeviceToHost);
}
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface
if ( (teams[t].x - i)*(teams[t].x - i) + (teams[t].y - j)*(teams[t].y - j) <= radius * radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * ( 1 - 0.25 ); // Team efficiency factor
}
}
}
}
hipMemcpy(cSurface, surface, sizeof(float)* ITEMS, hipMemcpyHostToDevice);
hipMemcpy(cFocal, focal, sizeof(FocalPoint)* num_focal, hipMemcpyHostToDevice);
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
}
hipFree(cSurface);
hipFree(cSurfaceCopy);
hipFree(cFocal);
hipFree(cTeams);
hipDeviceReset();
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
hipDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
| 3e2bd8be3546031a062c347bcb5dfd21123d7e34.cu | // User: [email protected]
// ExecutionRequest[P:'extinguishing.cu',P:1,T:1,args:'',q:'cudalb']
// May 16 2019 19:51:26
#include "cputils.h" // Added by tablon
/*
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "cputils.h"
#include <cuda.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
/////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////KERNELS////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////
__global__ void gpuFunc_inicializar(float *S, float *SC, int rows, int columns)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
int i,j;
i = gid/columns;
j = gid%columns;
if(gid < (rows * columns)){
accessMat( S, i, j ) = 0.0;
accessMat( SC, i, j ) = 0.0;
}
}
__global__ void gpuFunc_actualizarCalor(float *S, float *SC, int rows, int columns)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
int i,j;
i = gid/columns;
j = gid%columns;
if(gid < (rows * columns)){
if(i != 0 && j != 0 && i < rows-1 && j < columns-1){
accessMat( S, i, j ) = (
accessMat( SC, i-1, j ) +
accessMat( SC, i+1, j ) +
accessMat( SC, i, j-1 ) +
accessMat( SC, i, j+1 ) ) / 4;
}
}
}
__global__ void gpuFunc_actualizarFocos(float *S, FocalPoint *focal, int num_focal, int columns)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
if(gid < num_focal){
if ( focal[gid].active == 1 ){
int x = focal[gid].x;
int y = focal[gid].y;
accessMat( S, x, y ) = focal[gid].heat;
}
}
}
__global__ void gpuFunc_globalResidual(float *cSurface, float *cSurfaceCopy, int rows, int columns, float *globalResidual)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
int i,j;
i = gid/columns;
j = gid%columns;
if(i<rows*columns){
if(fabs(accessMat(cSurfaceCopy,i,j) - accessMat(cSurface,i,j ) ) >= THRESHOLD){
*globalResidual =THRESHOLD;
}
}
}
__global__ void gpuFunc_movimiento(int num_teams, int num_focal, FocalPoint *focal, Team *teams)
{
int IDX_Thread = threadIdx.x;
int IDX_block = blockIdx.x;
int threads_per_block_x = blockDim.x;
int gid = IDX_Thread + (IDX_block * threads_per_block_x);
if(gid < num_teams){
int j,t;
t=gid;
/* 4.3.1. Choose nearest focal point */
float distance = FLT_MAX;
int target = -1;
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
int local_distance = (focal[j].x - teams[t].x)*(focal[j].x - teams[t].x) + (focal[j].y - teams[t].y)*(focal[j].y - teams[t].y);
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[t].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target != -1 ){
/* 4.3.4. Move in the focal point direction */
if ( teams[t].type == 1 ) {
// Type 1: Can move in diagonal
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
else if ( teams[t].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
else if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
else if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////s
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
cudaSetDevice(0);
cudaDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*
*/
int ITEMS = rows * columns;
float *cSurface, *cSurfaceCopy;
FocalPoint *cFocal;
Team *cTeams;
float *cGlobalResidual;
cudaMalloc( (void**) &cSurface, sizeof(float) * (int) ITEMS);
cudaMalloc( (void**) &cSurfaceCopy, sizeof(float) * (int) ITEMS);
cudaMalloc( (void**) &cFocal, sizeof(FocalPoint) * (int) num_focal);
cudaMalloc( (void**) &cTeams, sizeof(Team) * (int) num_teams);
cudaMalloc( (void**) &cGlobalResidual, sizeof(float));
int bloqShape = 256;
int gridShape = ITEMS/bloqShape;
if(ITEMS%bloqShape){
gridShape = gridShape + 1;
}
/* 3. Initialize surface */
gpuFunc_inicializar<<<gridShape,bloqShape>>>(cSurface,cSurfaceCopy,rows,columns);
/* 4. Simulation */
int iter;
int flag_stability = 0;
int first_activation = 0;
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
/* 4.1. Activate focal points */
int num_deactivated = 0;
for( i=0; i<num_focal; i++ ) {
if ( focal[i].start == iter ) {
focal[i].active = 1;
if ( ! first_activation ) first_activation = 1;
}
// Count focal points already deactivated by a team
if ( focal[i].active == 2 ) num_deactivated++;
}
cudaMemcpy(cFocal,focal,sizeof(FocalPoint) * num_focal,cudaMemcpyHostToDevice);
if ( ! first_activation )continue;
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual = 0.0f;
int step;
for( step=0; step<10; step++ ) {
if(step%2){
/* 4.2.1. Update heat on active focal points */
gpuFunc_actualizarFocos<<<gridShape,bloqShape>>>(cSurfaceCopy,cFocal,num_focal,columns);
/* 4.2.3. Update surface values (skip borders) */
gpuFunc_actualizarCalor<<<gridShape,bloqShape>>>(cSurface,cSurfaceCopy,rows,columns);
}else{
/* 4.2.1. Update heat on active focal points */
gpuFunc_actualizarFocos<<<gridShape,bloqShape>>>(cSurface,cFocal,num_focal,columns);
/* 4.2.3. Update surface values (skip borders) */
gpuFunc_actualizarCalor<<<gridShape,bloqShape>>>(cSurfaceCopy,cSurface,rows,columns);
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0){
if( num_deactivated == num_focal){
cudaMemcpy(cGlobalResidual,&(global_residual),sizeof(float),cudaMemcpyHostToDevice);
gpuFunc_globalResidual<<<gridShape,bloqShape>>>(cSurface,cSurfaceCopy,rows,columns,cGlobalResidual);
cudaMemcpy(&global_residual,cGlobalResidual,sizeof(float),cudaMemcpyDeviceToHost);
}
}
}
}
cudaMemcpy(surface, cSurface, sizeof(float)* ITEMS, cudaMemcpyDeviceToHost);
cudaMemcpy(focal,cFocal,sizeof(FocalPoint) * num_focal,cudaMemcpyDeviceToHost);
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
if( num_deactivated == num_focal && global_residual < THRESHOLD ) flag_stability = 1;
if ( num_deactivated != num_focal ){
/* 4.3. Move teams */
cudaMemcpy(cFocal,focal, sizeof(FocalPoint)* num_focal,cudaMemcpyHostToDevice);
cudaMemcpy(cTeams,teams, sizeof(Team)* num_teams,cudaMemcpyHostToDevice);
gpuFunc_movimiento<<<gridShape,bloqShape>>>(num_teams, num_focal, cFocal, cTeams);
cudaMemcpy(focal,cFocal, sizeof(FocalPoint)*num_focal,cudaMemcpyDeviceToHost);
cudaMemcpy(teams,cTeams, sizeof(Team)*num_teams,cudaMemcpyDeviceToHost);
}
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface
if ( (teams[t].x - i)*(teams[t].x - i) + (teams[t].y - j)*(teams[t].y - j) <= radius * radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * ( 1 - 0.25 ); // Team efficiency factor
}
}
}
}
cudaMemcpy(cSurface, surface, sizeof(float)* ITEMS, cudaMemcpyHostToDevice);
cudaMemcpy(cFocal, focal, sizeof(FocalPoint)* num_focal, cudaMemcpyHostToDevice);
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
}
cudaFree(cSurface);
cudaFree(cSurfaceCopy);
cudaFree(cFocal);
cudaFree(cTeams);
cudaDeviceReset();
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
cudaDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
|
a6ec82dd3142dc0e04096913f16eb2c7cdeeb6ed.hip | // !!! This is a file automatically generated by hipify!!!
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include "hip/hip_texture_types.h"
#include<math.h>
#include "hip/hip_runtime.h"
//#include "cpu_anim.h" //texture
#define size 256
texture<float, hipTextureType2D, hipReadModeElementType> texRef;
__global__ void transformKernel(float* input, float* output, int width, int height, float theta)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
//
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
int col = tu*width;
int row = tv*height;
//output[y*width + x] = input[0];
output[y*width + x] = tex2D(texRef, tu, tv);
}
void main()
{
int width = 3840, height = 1920;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray*cuArray;
hipMallocArray(&cuArray, &channelDesc, width, height);
float*h_data = (float*)malloc(width*height*sizeof(float));
for (int i = 0; i<height; ++i)
{
for (int j = 0; j<width; ++j)
{
h_data[i*width + j] = i*width + j;
}
}
hipMemcpyToArray(cuArray, 0, 0, h_data, width*height*sizeof(float), hipMemcpyHostToDevice);
texRef.addressMode[0] = hipAddressModeWrap;
texRef.addressMode[1] = hipAddressModeWrap;
texRef.filterMode = hipFilterModeLinear;
texRef.normalized = true;
hipBindTextureToArray(texRef, cuArray, channelDesc);
float*output;
hipMalloc(&output, width*height*sizeof(float));
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
float angle = 30;
float *input = NULL;
hipMalloc(&input, width*height*sizeof(float));
hipMemcpy(input, h_data, width*height*sizeof(float), hipMemcpyHostToDevice);
transformKernel << <dimGrid, dimBlock >> >(input, output, width, height, angle);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
for (int i = 0; i < 1000; i++)
{
transformKernel << <dimGrid, dimBlock >> >(input, output, width, height, angle);
hipGetLastError();
}
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float costtime;
hipEventElapsedTime(&costtime, start, stop);
printf("kernel run time: %f ms\n", costtime);
float*hostPtr = (float*)malloc(sizeof(float)*width*height);
hipMemcpy(hostPtr, output, sizeof(float)*width*height, hipMemcpyDeviceToHost);
/*for (int i = 0; i<height; ++i)
{
for (int j = 0; j<width; ++j)
{
printf("%f\n", hostPtr[i*width + j]);
}
printf("\n");
}*/
free(hostPtr);
hipFreeArray(cuArray);
hipFree(output);
system("pause");
} | a6ec82dd3142dc0e04096913f16eb2c7cdeeb6ed.cu | #include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include "cuda_texture_types.h"
#include<math.h>
#include "cuda.h"
//#include "cpu_anim.h" //调用texture的时候必须加上这个头文件
#define size 256
texture<float, cudaTextureType2D, cudaReadModeElementType> texRef;
__global__ void transformKernel(float* input, float* output, int width, int height, float theta)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
// 坐标转换
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
int col = tu*width;
int row = tv*height;
//output[y*width + x] = input[0];
output[y*width + x] = tex2D(texRef, tu, tv);
}
void main()
{
int width = 3840, height = 1920;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray*cuArray;
cudaMallocArray(&cuArray, &channelDesc, width, height);
float*h_data = (float*)malloc(width*height*sizeof(float));
for (int i = 0; i<height; ++i)
{
for (int j = 0; j<width; ++j)
{
h_data[i*width + j] = i*width + j;
}
}
cudaMemcpyToArray(cuArray, 0, 0, h_data, width*height*sizeof(float), cudaMemcpyHostToDevice);
texRef.addressMode[0] = cudaAddressModeWrap;
texRef.addressMode[1] = cudaAddressModeWrap;
texRef.filterMode = cudaFilterModeLinear;
texRef.normalized = true;
cudaBindTextureToArray(texRef, cuArray, channelDesc);
float*output;
cudaMalloc(&output, width*height*sizeof(float));
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
float angle = 30;
float *input = NULL;
cudaMalloc(&input, width*height*sizeof(float));
cudaMemcpy(input, h_data, width*height*sizeof(float), cudaMemcpyHostToDevice);
transformKernel << <dimGrid, dimBlock >> >(input, output, width, height, angle);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
for (int i = 0; i < 1000; i++)
{
transformKernel << <dimGrid, dimBlock >> >(input, output, width, height, angle);
cudaGetLastError();
}
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float costtime;
cudaEventElapsedTime(&costtime, start, stop);
printf("kernel run time: %f ms\n", costtime);
float*hostPtr = (float*)malloc(sizeof(float)*width*height);
cudaMemcpy(hostPtr, output, sizeof(float)*width*height, cudaMemcpyDeviceToHost);
/*for (int i = 0; i<height; ++i)
{
for (int j = 0; j<width; ++j)
{
printf("%f\n", hostPtr[i*width + j]);
}
printf("\n");
}*/
free(hostPtr);
cudaFreeArray(cuArray);
cudaFree(output);
system("pause");
} |
6599a9e97594f1836f7ce779ac8d3d220dfd713d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zidr_smoothing_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex *drs,
magmaDoubleComplex *dr,
magmaDoubleComplex *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaDoubleComplex_ptr
vector
@param[in]
dr magmaDoubleComplex_ptr
vector
@param[in,out]
dt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex_ptr drs,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zidr_smoothing_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zidr_smoothing_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex *dx,
magmaDoubleComplex *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
dx magmaDoubleComplex_ptr
vector
@param[in,out]
dxs magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zidr_smoothing_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
| 6599a9e97594f1836f7ce779ac8d3d220dfd713d.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zidr_smoothing_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex *drs,
magmaDoubleComplex *dr,
magmaDoubleComplex *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaDoubleComplex_ptr
vector
@param[in]
dr magmaDoubleComplex_ptr
vector
@param[in,out]
dt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex_ptr drs,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zidr_smoothing_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zidr_smoothing_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex *dx,
magmaDoubleComplex *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
dx magmaDoubleComplex_ptr
vector
@param[in,out]
dxs magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zidr_smoothing_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
|
3bc57b4ef3249f96e55f37ba213231b48a938c35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
__global__ void mykernel(){
printf("Hello world from device!\n");
} /* end kernel */
int main(void)
{
hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, );
checkKERNEL()
printf("Hello World from Host\n");
return 0;
} /* end main */
| 3bc57b4ef3249f96e55f37ba213231b48a938c35.cu | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
__global__ void mykernel(){
printf("Hello world from device!\n");
} /* end kernel */
int main(void)
{
mykernel<<<1,1>>>();
checkKERNEL()
printf("Hello World from Host\n");
return 0;
} /* end main */
|
54cb4ab9ef36feaff3a830b57930941ee969ea16.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include"cuda_helper.cuh"
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/generate.h>
#include<thrust/sort.h>
#include<thrust/copy.h>
#include<thrust/sequence.h>
#define NUM_ELEM (1024*1024)
#define NUM_ELEM_START (1024*1024)
#define NUM_ELEM_END (1024*1024*10)
void _sortThrustTest(){
thrust::host_vector<u32> host_arr(NUM_ELEM);
thrust::generate(host_arr.begin(),host_arr.end(),rand);
thrust::device_vector<u32> device_arr = host_arr;
thrust::sort(device_arr.begin(),device_arr.end());
thrust::sort(host_arr.begin(),host_arr.end());
thrust::host_vector<u32> host_sorted_arr = device_arr;
bool flag = false;
for (u32 i = 0; i < NUM_ELEM; i++){
if (host_sorted_arr[i] != host_arr[i])
flag = true;
}
if (flag == false)
printf("> Test passed.\n");
else
printf("> Test failed.\n");
}
long int reduce_serial(const int *__restrict__ const host_raw_ptr,const int num_elements){
long int sum = 0;
for (int i = 0; i < num_elements; i++)
sum += host_raw_ptr[i];
return sum;
}
long int reduce_openmp(const int *__restrict__ const host_raw_ptr, const int num_elements){
long int sum = 0;
#pragma omp parallel for reduction(+:sum) num_threads(4)
for (int i = 0; i < num_elements; i++)
sum += host_raw_ptr[i];
return sum;
}
void _reduceThrustTest(){
int num_devices;
checkCudaErrors(hipGetDeviceCount(&num_devices));
printf("\n> GPU num: %d.", num_devices);
int cur_device = 0;
checkCudaErrors(hipSetDevice(cur_device));
struct hipDeviceProp_t device_prop;
checkCudaErrors(hipGetDeviceProperties(&device_prop, cur_device));
printf("\n> Using CUDA Device %u. Device ID: %s on PCI-E %d",cur_device,device_prop.name,device_prop.pciBusID);
for (unsigned long num_elem = NUM_ELEM_START; num_elem < NUM_ELEM_END; num_elem*=2){
const size_t nbytes= sizeof(int)*num_elem;
printf("\n> Reducing %lu data items (%lu MB)", num_elem, (nbytes / 1024 / 1024));
float c2d_t, reduce_d_t, reduce_h_t, reduce_h_mp_t, reduce_h_serial_t;
hipEvent_t c2d_start, c2d_stop;
hipEvent_t sort_d_start, sort_d_stop;
checkCudaErrors(hipEventCreate(&c2d_start));
checkCudaErrors(hipEventCreate(&c2d_stop));
checkCudaErrors(hipEventCreate(&sort_d_start));
checkCudaErrors(hipEventCreate(&sort_d_stop));
thrust::host_vector<int> host_arr(num_elem);
thrust::sequence(host_arr.begin(),host_arr.end());
//copy to device
checkCudaErrors(hipEventRecord(c2d_start,0));
thrust::device_vector<int> device_arr = host_arr;
checkCudaErrors(hipEventRecord(c2d_stop, 0));
checkCudaErrors(hipEventSynchronize(c2d_stop));
// sort on device
checkCudaErrors(hipEventRecord(sort_d_start, 0));
const long int sum_device = thrust::reduce(device_arr.begin(),device_arr.end());
checkCudaErrors(hipEventRecord(sort_d_stop, 0));
checkCudaErrors(hipEventSynchronize(sort_d_stop));
//sort on host
clock_t start, stop;
start = clock();
const long int sum_host = thrust::reduce(host_arr.begin(),host_arr.end());
stop = clock();
reduce_h_t = stop - start;
// allocate host memory
int * const host_raw_ptr_2 = (int*)malloc(nbytes);
int * p2 = host_raw_ptr_2;
for (int i = 0; i < num_elem; i++)
*p2++ = host_arr[i];
// host_openmp
start = clock();
const long int sum_host_openmp = reduce_openmp(host_raw_ptr_2,num_elem);
stop = clock();
reduce_h_mp_t = stop - start;
// host_serial
start = clock();
const long int sum_host_serial = reduce_serial(host_raw_ptr_2, num_elem);
stop = clock();
reduce_h_serial_t = stop - start;
free(host_raw_ptr_2);
if ((sum_device==sum_host)&& (sum_host_serial == sum_host_openmp))
printf("\n> reduction matched");
else
printf("\n> reduction failed");
checkCudaErrors(hipEventElapsedTime(&c2d_t, c2d_start, c2d_stop));
checkCudaErrors(hipEventElapsedTime(&reduce_d_t, sort_d_start, sort_d_stop));
printf("\n> copy to device : %0.2fms", c2d_t);
printf("\n> reduce on device: %0.2fms", reduce_d_t);
printf("\n> total on device : %0.2fms", reduce_d_t + c2d_t);
printf("\n> Thrust reduce on host: %0.2fms", reduce_h_t);
printf("\n> serial reduce on host: %0.2fms", reduce_h_serial_t);
printf("\n> openMP reduce on host: %0.2fms", reduce_h_mp_t);
checkCudaErrors(hipEventDestroy(c2d_start));
checkCudaErrors(hipEventDestroy(c2d_stop));
checkCudaErrors(hipEventDestroy(sort_d_start));
checkCudaErrors(hipEventDestroy(sort_d_stop));
}
checkCudaErrors(hipDeviceReset());
}
| 54cb4ab9ef36feaff3a830b57930941ee969ea16.cu | #pragma once
#include"cuda_helper.cuh"
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/generate.h>
#include<thrust/sort.h>
#include<thrust/copy.h>
#include<thrust/sequence.h>
#define NUM_ELEM (1024*1024)
#define NUM_ELEM_START (1024*1024)
#define NUM_ELEM_END (1024*1024*10)
void _sortThrustTest(){
thrust::host_vector<u32> host_arr(NUM_ELEM);
thrust::generate(host_arr.begin(),host_arr.end(),rand);
thrust::device_vector<u32> device_arr = host_arr;
thrust::sort(device_arr.begin(),device_arr.end());
thrust::sort(host_arr.begin(),host_arr.end());
thrust::host_vector<u32> host_sorted_arr = device_arr;
bool flag = false;
for (u32 i = 0; i < NUM_ELEM; i++){
if (host_sorted_arr[i] != host_arr[i])
flag = true;
}
if (flag == false)
printf("> Test passed.\n");
else
printf("> Test failed.\n");
}
long int reduce_serial(const int *__restrict__ const host_raw_ptr,const int num_elements){
long int sum = 0;
for (int i = 0; i < num_elements; i++)
sum += host_raw_ptr[i];
return sum;
}
long int reduce_openmp(const int *__restrict__ const host_raw_ptr, const int num_elements){
long int sum = 0;
#pragma omp parallel for reduction(+:sum) num_threads(4)
for (int i = 0; i < num_elements; i++)
sum += host_raw_ptr[i];
return sum;
}
void _reduceThrustTest(){
int num_devices;
checkCudaErrors(cudaGetDeviceCount(&num_devices));
printf("\n> GPU num: %d.", num_devices);
int cur_device = 0;
checkCudaErrors(cudaSetDevice(cur_device));
struct cudaDeviceProp device_prop;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, cur_device));
printf("\n> Using CUDA Device %u. Device ID: %s on PCI-E %d",cur_device,device_prop.name,device_prop.pciBusID);
for (unsigned long num_elem = NUM_ELEM_START; num_elem < NUM_ELEM_END; num_elem*=2){
const size_t nbytes= sizeof(int)*num_elem;
printf("\n> Reducing %lu data items (%lu MB)", num_elem, (nbytes / 1024 / 1024));
float c2d_t, reduce_d_t, reduce_h_t, reduce_h_mp_t, reduce_h_serial_t;
cudaEvent_t c2d_start, c2d_stop;
cudaEvent_t sort_d_start, sort_d_stop;
checkCudaErrors(cudaEventCreate(&c2d_start));
checkCudaErrors(cudaEventCreate(&c2d_stop));
checkCudaErrors(cudaEventCreate(&sort_d_start));
checkCudaErrors(cudaEventCreate(&sort_d_stop));
thrust::host_vector<int> host_arr(num_elem);
thrust::sequence(host_arr.begin(),host_arr.end());
//copy to device
checkCudaErrors(cudaEventRecord(c2d_start,0));
thrust::device_vector<int> device_arr = host_arr;
checkCudaErrors(cudaEventRecord(c2d_stop, 0));
checkCudaErrors(cudaEventSynchronize(c2d_stop));
// sort on device
checkCudaErrors(cudaEventRecord(sort_d_start, 0));
const long int sum_device = thrust::reduce(device_arr.begin(),device_arr.end());
checkCudaErrors(cudaEventRecord(sort_d_stop, 0));
checkCudaErrors(cudaEventSynchronize(sort_d_stop));
//sort on host
clock_t start, stop;
start = clock();
const long int sum_host = thrust::reduce(host_arr.begin(),host_arr.end());
stop = clock();
reduce_h_t = stop - start;
// allocate host memory
int * const host_raw_ptr_2 = (int*)malloc(nbytes);
int * p2 = host_raw_ptr_2;
for (int i = 0; i < num_elem; i++)
*p2++ = host_arr[i];
// host_openmp
start = clock();
const long int sum_host_openmp = reduce_openmp(host_raw_ptr_2,num_elem);
stop = clock();
reduce_h_mp_t = stop - start;
// host_serial
start = clock();
const long int sum_host_serial = reduce_serial(host_raw_ptr_2, num_elem);
stop = clock();
reduce_h_serial_t = stop - start;
free(host_raw_ptr_2);
if ((sum_device==sum_host)&& (sum_host_serial == sum_host_openmp))
printf("\n> reduction matched");
else
printf("\n> reduction failed");
checkCudaErrors(cudaEventElapsedTime(&c2d_t, c2d_start, c2d_stop));
checkCudaErrors(cudaEventElapsedTime(&reduce_d_t, sort_d_start, sort_d_stop));
printf("\n> copy to device : %0.2fms", c2d_t);
printf("\n> reduce on device: %0.2fms", reduce_d_t);
printf("\n> total on device : %0.2fms", reduce_d_t + c2d_t);
printf("\n> Thrust reduce on host: %0.2fms", reduce_h_t);
printf("\n> serial reduce on host: %0.2fms", reduce_h_serial_t);
printf("\n> openMP reduce on host: %0.2fms", reduce_h_mp_t);
checkCudaErrors(cudaEventDestroy(c2d_start));
checkCudaErrors(cudaEventDestroy(c2d_stop));
checkCudaErrors(cudaEventDestroy(sort_d_start));
checkCudaErrors(cudaEventDestroy(sort_d_stop));
}
checkCudaErrors(cudaDeviceReset());
}
|
76193c9c2d58b2f6953a282ca6c41410cd10b185.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "relax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *U = NULL;
hipMalloc(&U, XSIZE*YSIZE);
int *F = NULL;
hipMalloc(&F, XSIZE*YSIZE);
int *d = NULL;
hipMalloc(&d, XSIZE*YSIZE);
size_t gSize = XSIZE*YSIZE;
int *adjMat = NULL;
hipMalloc(&adjMat, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
relax), dim3(gridBlock),dim3(threadBlock), 0, 0, U,F,d,gSize,adjMat);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
relax), dim3(gridBlock),dim3(threadBlock), 0, 0, U,F,d,gSize,adjMat);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
relax), dim3(gridBlock),dim3(threadBlock), 0, 0, U,F,d,gSize,adjMat);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 76193c9c2d58b2f6953a282ca6c41410cd10b185.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "relax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *U = NULL;
cudaMalloc(&U, XSIZE*YSIZE);
int *F = NULL;
cudaMalloc(&F, XSIZE*YSIZE);
int *d = NULL;
cudaMalloc(&d, XSIZE*YSIZE);
size_t gSize = XSIZE*YSIZE;
int *adjMat = NULL;
cudaMalloc(&adjMat, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
relax<<<gridBlock,threadBlock>>>(U,F,d,gSize,adjMat);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
relax<<<gridBlock,threadBlock>>>(U,F,d,gSize,adjMat);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
relax<<<gridBlock,threadBlock>>>(U,F,d,gSize,adjMat);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cc0b286de2b072e208628de7fe742a6ba8a6d595.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{Composite{tanh((i0 + i1))}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, row)
// Input 1 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, const float * i1_data, int i1_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i1_data = i1_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i1_data += pos0 * i1_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_data[0] + ii_i1_data[0];
o0_i = tanh(V_DUMMY_ID__tmp1);
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{tanh((i0 + i1))}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, row)
// Input 1 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, const float * i1_data, int i1_str_0, int i1_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i1_data = i1_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_i1_data += pos1 * i1_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i1_data += pos0 * i1_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_data[0] + ii_i1_data[0];
o0_i = tanh(V_DUMMY_ID__tmp1);
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{tanh((i0 + i1))}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, row)
// Input 1 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, const float * i1_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = i0_data[i] + i1_data[i];
o0_i = tanh(V_DUMMY_ID__tmp1);
}
o0_data[i] = o0_i;
}
}
static void can_collapse_node_64c2a42272088a5a25934dfdfab727da_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_64c2a42272088a5a25934dfdfab727da_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*1;
int local_dims[2];
int local_str[2][2];
int local_ostr[1][2];
int nd_collapse = 2;
for(int i=0;i<2;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[1][i]=i1_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<2;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<2;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[2] = {1,1};
int nd_collapse_0[2] = {1,1};
can_collapse_node_64c2a42272088a5a25934dfdfab727da_0(nd_collapse, local_dims, local_str[0], nd_collapse_0);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_0[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_1[2] = {1,1};
can_collapse_node_64c2a42272088a5a25934dfdfab727da_0(nd_collapse, local_dims, local_str[1], nd_collapse_1);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_1[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[1][i-1]=local_str[1][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[1][j-1]=local_str[1][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[0][nd_collapse-1]==1 && local_str[1][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(2,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, i1_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_64c2a42272088a5a25934dfdfab727da_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_64c2a42272088a5a25934dfdfab727da_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_2), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_64c2a42272088a5a25934dfdfab727da_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_64c2a42272088a5a25934dfdfab727da {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V1;
__struct_compiled_op_64c2a42272088a5a25934dfdfab727da() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_64c2a42272088a5a25934dfdfab727da(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_8:
double __DUMMY_8;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
CudaNdarray * V5;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V3)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 0 passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V5))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
V5 = (CudaNdarray*)py_V5;
//std::cerr << "c_extract " << V5 << '\n';
if (V5->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V5->nd);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << " nd check passed\n";
assert(V5);
Py_INCREF(py_V5);
}
else if (py_V5 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract done " << V5 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} START\n";
//standard elemwise size checks
int dims[2] = {1,1};
int broadcasts_V3[2] = {1, 0};
int broadcasts_V5[2] = {0, 0};
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} checking input V3\n";
if (2 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V3->nd);
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
}
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} checking input V5\n";
if (2 != V5->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V5->nd);
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i];
if ((!(broadcasts_V5[i] &&
CudaNdarray_HOST_DIMS(V5)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V5)[i]))
{
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} checking input V5 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 1 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V5)[i],
dims[i]
);
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
}
Py_XDECREF(V1);
V1 = V5;
Py_INCREF(V1);
for (int i = 0; (i< 2) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" 0 (indices start at 0), working inplace"
" on input 1, has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V1)[i],
dims[i]
);
Py_DECREF(V1);
V1 = NULL;
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_64c2a42272088a5a25934dfdfab727da_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
else // no error
{
}
}
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} END\n";
__label_7:
double __DUMMY_7;
}
__label_6:
//std::cerr << "cleanup " << py_V5 << " " << V5 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
if (V5)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt));
Py_XDECREF(V5);
}
//std::cerr << "cleanup done" << py_V5 << "\n";
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_64c2a42272088a5a25934dfdfab727da_executor(__struct_compiled_op_64c2a42272088a5a25934dfdfab727da* self) {
return self->run();
}
static void __struct_compiled_op_64c2a42272088a5a25934dfdfab727da_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_64c2a42272088a5a25934dfdfab727da*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (4 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 4, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_64c2a42272088a5a25934dfdfab727da* struct_ptr = new __struct_compiled_op_64c2a42272088a5a25934dfdfab727da();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_64c2a42272088a5a25934dfdfab727da_executor), struct_ptr, __struct_compiled_op_64c2a42272088a5a25934dfdfab727da_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC init64c2a42272088a5a25934dfdfab727da(void){
(void) Py_InitModule("64c2a42272088a5a25934dfdfab727da", MyMethods);
}
| cc0b286de2b072e208628de7fe742a6ba8a6d595.cu | #include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{Composite{tanh((i0 + i1))}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, row)
// Input 1 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, const float * i1_data, int i1_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i1_data = i1_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i1_data += pos0 * i1_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_data[0] + ii_i1_data[0];
o0_i = tanh(V_DUMMY_ID__tmp1);
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{tanh((i0 + i1))}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, row)
// Input 1 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, const float * i1_data, int i1_str_0, int i1_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i1_data = i1_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_i1_data += pos1 * i1_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i1_data += pos0 * i1_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_data[0] + ii_i1_data[0];
o0_i = tanh(V_DUMMY_ID__tmp1);
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{tanh((i0 + i1))}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, row)
// Input 1 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, const float * i1_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = i0_data[i] + i1_data[i];
o0_i = tanh(V_DUMMY_ID__tmp1);
}
o0_data[i] = o0_i;
}
}
static void can_collapse_node_64c2a42272088a5a25934dfdfab727da_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_64c2a42272088a5a25934dfdfab727da_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*1;
int local_dims[2];
int local_str[2][2];
int local_ostr[1][2];
int nd_collapse = 2;
for(int i=0;i<2;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[1][i]=i1_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<2;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<2;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[2] = {1,1};
int nd_collapse_0[2] = {1,1};
can_collapse_node_64c2a42272088a5a25934dfdfab727da_0(nd_collapse, local_dims, local_str[0], nd_collapse_0);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_0[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_1[2] = {1,1};
can_collapse_node_64c2a42272088a5a25934dfdfab727da_0(nd_collapse, local_dims, local_str[1], nd_collapse_1);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_1[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[1][i-1]=local_str[1][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[1][j-1]=local_str[1][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[0][nd_collapse-1]==1 && local_str[1][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(2,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_64c2a42272088a5a25934dfdfab727da_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_64c2a42272088a5a25934dfdfab727da_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_2<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_64c2a42272088a5a25934dfdfab727da_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_64c2a42272088a5a25934dfdfab727da_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_64c2a42272088a5a25934dfdfab727da {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V1;
__struct_compiled_op_64c2a42272088a5a25934dfdfab727da() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_64c2a42272088a5a25934dfdfab727da(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_8:
double __DUMMY_8;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
CudaNdarray * V5;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V3)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 0 passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V5))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
V5 = (CudaNdarray*)py_V5;
//std::cerr << "c_extract " << V5 << '\n';
if (V5->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V5->nd);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << " nd check passed\n";
assert(V5);
Py_INCREF(py_V5);
}
else if (py_V5 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract done " << V5 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} START\n";
//standard elemwise size checks
int dims[2] = {1,1};
int broadcasts_V3[2] = {1, 0};
int broadcasts_V5[2] = {0, 0};
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} checking input V3\n";
if (2 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V3->nd);
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
}
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} checking input V5\n";
if (2 != V5->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V5->nd);
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i];
if ((!(broadcasts_V5[i] &&
CudaNdarray_HOST_DIMS(V5)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V5)[i]))
{
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} checking input V5 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 1 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V5)[i],
dims[i]
);
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
}
Py_XDECREF(V1);
V1 = V5;
Py_INCREF(V1);
for (int i = 0; (i< 2) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" 0 (indices start at 0), working inplace"
" on input 1, has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V1)[i],
dims[i]
);
Py_DECREF(V1);
V1 = NULL;
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_64c2a42272088a5a25934dfdfab727da_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 7;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_7;};
}
else // no error
{
}
}
//std::cerr << "C_CODE Composite{tanh((i0 + i1))} END\n";
__label_7:
double __DUMMY_7;
}
__label_6:
//std::cerr << "cleanup " << py_V5 << " " << V5 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
if (V5)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt));
Py_XDECREF(V5);
}
//std::cerr << "cleanup done" << py_V5 << "\n";
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_64c2a42272088a5a25934dfdfab727da_executor(__struct_compiled_op_64c2a42272088a5a25934dfdfab727da* self) {
return self->run();
}
static void __struct_compiled_op_64c2a42272088a5a25934dfdfab727da_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_64c2a42272088a5a25934dfdfab727da*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (4 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 4, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_64c2a42272088a5a25934dfdfab727da* struct_ptr = new __struct_compiled_op_64c2a42272088a5a25934dfdfab727da();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_64c2a42272088a5a25934dfdfab727da_executor), struct_ptr, __struct_compiled_op_64c2a42272088a5a25934dfdfab727da_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC init64c2a42272088a5a25934dfdfab727da(void){
(void) Py_InitModule("64c2a42272088a5a25934dfdfab727da", MyMethods);
}
|
fb0e0409e64399f616e5e6b4681162287cdc3cef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
hipCtx_t hContext = 0;
#define CUDA_CHECK( fn ) do { \
hipError_t status = (fn); \
if ( hipSuccess != status ) { \
const char* errstr; \
hipGetErrorString(status, &errstr); \
printf("CUDA Driver Failure (line %d of file %s):\n\t%s returned 0x%x (%s)\n", __LINE__, __FILE__, #fn, status, errstr); \
exit(EXIT_FAILURE); \
} \
} while (0)
void gflops(const char* ident, int N, float ms, int repeat)
{
float msecPerMatrixMul = ms / repeat;
double flopsPerMatrixMul = (N+16-1) * 16.0 ;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("ms = %f \n", msecPerMatrixMul);
printf("%s GFLOPS: %.2f (size: %d, iterations: %d)\n", ident, gigaFlops, N, repeat);
}
int main()
{
//-----------------sample_data_config---------------------
int N = 1024032;//1024032;//1023985;
int M = 16;//16;
int P = 1024000;
size_t sizeSampleFloat = N * 4;
size_t sizeFilterFloat = M * 4;//16 * 4;
size_t sizeResultFloat = P * 4;
dim3 threads(32, 1, 1);
dim3 grid(2000, 1, 1);
hipError_t error;
char deviceName[32];
int count, ordinal, major, minor;
hipDevice_t hDevice;
hipEvent_t hStart, hStop;
hipDeviceptr_t devH, devX, devY;
// ------Initialize the Driver API and find a device-----
CUDA_CHECK(hipInit(0));
CUDA_CHECK(hipGetDeviceCount(&count));
for (ordinal = 0; ordinal < count; ordinal++)
{
CUDA_CHECK(hipDeviceGet(&hDevice, ordinal));
CUDA_CHECK(hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hDevice));
CUDA_CHECK(hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hDevice));
CUDA_CHECK(hipDeviceGetName(deviceName, sizeof(deviceName), hDevice));
if (major >= 5 && minor >= 2)
{
//printf("Using: Id:%d %s (%d.%d)\n\n", ordinal, deviceName, major, minor);
break;
}
}
if (ordinal == count)
{
printf("No compute 5.0 device found, exiting.\n");
exit(EXIT_FAILURE);
}
//-----------------device_test------------------------
int device = 0;
error = hipSetDevice(0);
if (error != hipSuccess)
{
printf("device error");
exit(EXIT_FAILURE);
}
else printf("device: %d \n", device);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, 0);
if (error != hipSuccess)
{
printf("DeviceProperties error");
exit(EXIT_FAILURE);
}
//-----------------------host----------------------------
float* H = (float*)malloc(sizeFilterFloat);
float* X = (float*)malloc(sizeSampleFloat);
float* Y = (float*)malloc(sizeResultFloat);
float* T = (float*)malloc(sizeResultFloat);
for (int i = 0; i < N ; i++)
{
X[i] = (float)rand()/1000;
}
for (int i = 0; i < M; i++)
{
H[i] = (float)rand()/1000;
}
for (int i = 0; i < P; i++) //
{
Y[i] = (float)0.0;
T[i] = (float)0.0;
}
for (int i = 0; i < P; i++)
{
int k = i;
for (int j = 16; j > 0; j--)
{
T[i] += H[j - 1] * X[k];
k++;
}
}
//-----------------------Dev----------------------------
CUDA_CHECK(hipCtxCreate(&hContext, 0, hDevice));
CUDA_CHECK(hipEventCreate(&hStart, hipEventBlockingSync)); // hipEventDefault
CUDA_CHECK(hipEventCreate(&hStop, hipEventBlockingSync));
CUDA_CHECK(cuMemAlloc(&devH, sizeFilterFloat));
CUDA_CHECK(cuMemAlloc(&devX, sizeSampleFloat));
CUDA_CHECK(cuMemAlloc(&devY, sizeResultFloat));
CUDA_CHECK(cuMemcpyHtoD(devH, H, sizeFilterFloat));
CUDA_CHECK(cuMemcpyHtoD(devX, X, sizeSampleFloat));
//---------------------Kernel----------------------------
printf("Computing result using CUDA Kernel...\n");
// Load the cubin
hipModule_t hModule;
CUDA_CHECK(hipModuleLoad(&hModule, "conv.cubin"));
// Load the kernel function
hipFunction_t hKernel;
CUDA_CHECK(hipModuleGetFunction(&hKernel, hModule, "conv_kernel_32"));
void * params[] = {&devH, &devX, &devY};
int repeat = 20;
float totalTime = 0;
// Launch the kernel repeat times.. but break it up into pieces so as not to lock things up.
CUDA_CHECK(hipEventCreate(&hStart, hipEventBlockingSync)); // hipEventDefault
CUDA_CHECK(hipEventCreate(&hStop, hipEventBlockingSync));
while (repeat > 0)
{
float ms;
int r = repeat;
CUDA_CHECK(hipEventRecord(hStart, NULL));
for (int i = 0; i < repeat; i++)
CUDA_CHECK(hipModuleLaunchKernel(hKernel, grid.x, 1, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(hipEventRecord(hStop, NULL));
CUDA_CHECK(hipEventSynchronize(hStop));
CUDA_CHECK(hipEventElapsedTime(&ms, hStart, hStop));
totalTime += ms;
//gflops("conv_kernel_32", N, ms, repeat);
repeat -= r;
}
//CUDA_CHECK(hipModuleLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
//CUDA_CHECK(hipModuleLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(hipModuleUnload(hModule));
printf("first time done\n");
// Copy result from device to host
CUDA_CHECK(cuMemcpyDtoH(Y, devY, sizeResultFloat));
CUDA_CHECK(cuMemcpyDtoH(H, devH, sizeFilterFloat));
CUDA_CHECK(cuMemcpyDtoH(X, devX, sizeSampleFloat));
for (int i = 1024*0; i<1024*1; i++)
printf("Y[%d] = %f --- and --- T[%d] = %f delta = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
for (int i = 1024*0; i<P; i++)
{
if (Y[i] - T[i] > 1e-2)
printf("Y[%d] = %f --- but --- T[%d] = %f delta = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
}
//-----------------------free----------------------------
// Cleanup and shutdown of cuda
CUDA_CHECK(hipFree(devH));
CUDA_CHECK(hipFree(devX));
CUDA_CHECK(hipFree(devY));
free(H);
free(X);
free(Y);
CUDA_CHECK(hipEventDestroy(hStart));
CUDA_CHECK(hipEventDestroy(hStop));
//CUBLAS_CHECK( hipblasDestroy(hCublas) );
//hCublas = 0;
CUDA_CHECK(hipCtxDestroy(hContext));
hContext = 0;
hipDeviceReset();
printf("done\n");
return EXIT_SUCCESS;
}
| fb0e0409e64399f616e5e6b4681162287cdc3cef.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
CUcontext hContext = 0;
#define CUDA_CHECK( fn ) do { \
CUresult status = (fn); \
if ( CUDA_SUCCESS != status ) { \
const char* errstr; \
cuGetErrorString(status, &errstr); \
printf("CUDA Driver Failure (line %d of file %s):\n\t%s returned 0x%x (%s)\n", __LINE__, __FILE__, #fn, status, errstr); \
exit(EXIT_FAILURE); \
} \
} while (0)
void gflops(const char* ident, int N, float ms, int repeat)
{
float msecPerMatrixMul = ms / repeat;
double flopsPerMatrixMul = (N+16-1) * 16.0 ;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("ms = %f \n", msecPerMatrixMul);
printf("%s GFLOPS: %.2f (size: %d, iterations: %d)\n", ident, gigaFlops, N, repeat);
}
int main()
{
//-----------------sample_data_config---------------------
int N = 1024032;//1024032;//1023985;
int M = 16;//16;
int P = 1024000;
size_t sizeSampleFloat = N * 4;
size_t sizeFilterFloat = M * 4;//16 * 4;
size_t sizeResultFloat = P * 4;
dim3 threads(32, 1, 1);
dim3 grid(2000, 1, 1);
cudaError_t error;
char deviceName[32];
int count, ordinal, major, minor;
CUdevice hDevice;
CUevent hStart, hStop;
CUdeviceptr devH, devX, devY;
// ------Initialize the Driver API and find a device-----
CUDA_CHECK(cuInit(0));
CUDA_CHECK(cuDeviceGetCount(&count));
for (ordinal = 0; ordinal < count; ordinal++)
{
CUDA_CHECK(cuDeviceGet(&hDevice, ordinal));
CUDA_CHECK(cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, hDevice));
CUDA_CHECK(cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, hDevice));
CUDA_CHECK(cuDeviceGetName(deviceName, sizeof(deviceName), hDevice));
if (major >= 5 && minor >= 2)
{
//printf("Using: Id:%d %s (%d.%d)\n\n", ordinal, deviceName, major, minor);
break;
}
}
if (ordinal == count)
{
printf("No compute 5.0 device found, exiting.\n");
exit(EXIT_FAILURE);
}
//-----------------device_test------------------------
int device = 0;
error = cudaSetDevice(0);
if (error != cudaSuccess)
{
printf("device error");
exit(EXIT_FAILURE);
}
else printf("device: %d \n", device);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, 0);
if (error != cudaSuccess)
{
printf("DeviceProperties error");
exit(EXIT_FAILURE);
}
//-----------------------host----------------------------
float* H = (float*)malloc(sizeFilterFloat);
float* X = (float*)malloc(sizeSampleFloat);
float* Y = (float*)malloc(sizeResultFloat);
float* T = (float*)malloc(sizeResultFloat);
for (int i = 0; i < N ; i++)
{
X[i] = (float)rand()/1000;
}
for (int i = 0; i < M; i++)
{
H[i] = (float)rand()/1000;
}
for (int i = 0; i < P; i++) //
{
Y[i] = (float)0.0;
T[i] = (float)0.0;
}
for (int i = 0; i < P; i++)
{
int k = i;
for (int j = 16; j > 0; j--)
{
T[i] += H[j - 1] * X[k];
k++;
}
}
//-----------------------Dev----------------------------
CUDA_CHECK(cuCtxCreate(&hContext, 0, hDevice));
CUDA_CHECK(cuEventCreate(&hStart, CU_EVENT_BLOCKING_SYNC)); // CU_EVENT_DEFAULT
CUDA_CHECK(cuEventCreate(&hStop, CU_EVENT_BLOCKING_SYNC));
CUDA_CHECK(cuMemAlloc(&devH, sizeFilterFloat));
CUDA_CHECK(cuMemAlloc(&devX, sizeSampleFloat));
CUDA_CHECK(cuMemAlloc(&devY, sizeResultFloat));
CUDA_CHECK(cuMemcpyHtoD(devH, H, sizeFilterFloat));
CUDA_CHECK(cuMemcpyHtoD(devX, X, sizeSampleFloat));
//---------------------Kernel----------------------------
printf("Computing result using CUDA Kernel...\n");
// Load the cubin
CUmodule hModule;
CUDA_CHECK(cuModuleLoad(&hModule, "conv.cubin"));
// Load the kernel function
CUfunction hKernel;
CUDA_CHECK(cuModuleGetFunction(&hKernel, hModule, "conv_kernel_32"));
void * params[] = {&devH, &devX, &devY};
int repeat = 20;
float totalTime = 0;
// Launch the kernel repeat times.. but break it up into pieces so as not to lock things up.
CUDA_CHECK(cuEventCreate(&hStart, CU_EVENT_BLOCKING_SYNC)); // CU_EVENT_DEFAULT
CUDA_CHECK(cuEventCreate(&hStop, CU_EVENT_BLOCKING_SYNC));
while (repeat > 0)
{
float ms;
int r = repeat;
CUDA_CHECK(cuEventRecord(hStart, NULL));
for (int i = 0; i < repeat; i++)
CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, 1, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(cuEventRecord(hStop, NULL));
CUDA_CHECK(cuEventSynchronize(hStop));
CUDA_CHECK(cuEventElapsedTime(&ms, hStart, hStop));
totalTime += ms;
//gflops("conv_kernel_32", N, ms, repeat);
repeat -= r;
}
//CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
//CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(cuModuleUnload(hModule));
printf("first time done\n");
// Copy result from device to host
CUDA_CHECK(cuMemcpyDtoH(Y, devY, sizeResultFloat));
CUDA_CHECK(cuMemcpyDtoH(H, devH, sizeFilterFloat));
CUDA_CHECK(cuMemcpyDtoH(X, devX, sizeSampleFloat));
for (int i = 1024*0; i<1024*1; i++)
printf("Y[%d] = %f --- and --- T[%d] = %f delta = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
for (int i = 1024*0; i<P; i++)
{
if (Y[i] - T[i] > 1e-2)
printf("Y[%d] = %f --- but --- T[%d] = %f delta = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
}
//-----------------------free----------------------------
// Cleanup and shutdown of cuda
CUDA_CHECK(cuMemFree(devH));
CUDA_CHECK(cuMemFree(devX));
CUDA_CHECK(cuMemFree(devY));
free(H);
free(X);
free(Y);
CUDA_CHECK(cuEventDestroy(hStart));
CUDA_CHECK(cuEventDestroy(hStop));
//CUBLAS_CHECK( cublasDestroy(hCublas) );
//hCublas = 0;
CUDA_CHECK(cuCtxDestroy(hContext));
hContext = 0;
cudaDeviceReset();
printf("done\n");
return EXIT_SUCCESS;
}
|
62130391e6580ed9fee977d20db3cbb6012e3395.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- High-Performance Graph Primitives on GPU
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file test_vis.cuh
*
* @brief Simple test driver program for Vertex-Induced Subgraph
*/
#include <stdio.h>
#include <string>
#include <iostream>
// utilities for correctness checking
#include <gunrock/util/test_utils.cuh>
// graph construction utilities
#include <gunrock/graphio/market.cuh>
// primitive-specific headers include
#include <gunrock/app/vis/vis_enactor.cuh>
#include <gunrock/app/vis/vis_problem.cuh>
#include <gunrock/app/vis/vis_functor.cuh>
// gunrock abstraction graph operators
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::vis;
// ----------------------------------------------------------------------------
// Housekeeping Routines
// ----------------------------------------------------------------------------
void Usage() {
printf(
" test_vis <graph type> <graph type args> [--undirected] [--quick]\n"
" [--device=<device_index>] [--instrumented] [--v]\n"
"Graph types and arguments:\n"
" market <file>\n"
" Reads a Matrix-Market coordinate-formatted graph,\n"
" edges from STDIN (or from the optionally-specified file)\n"
" --device=<device_index> Set GPU device to run. [Default: 0]\n"
" --undirected Convert the graph to undirected\n"
" --instrumented Keep kernels statics [Default: Disable]\n"
" total_queued, search_depth and avg_duty\n"
" (a relative indicator of load imbalance)\n"
" --quick Skip the CPU validation [Default: false]\n"
" --queue-sizing=<factor> Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>) [Default: 1.0]\n"
" --v Print verbose per iteration debug info\n");
}
/**
* @brief Displays primitive results.
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] graph Reference to the CSR graph.
*/
template<typename VertexId, typename SizeT, typename Value>
void DisplaySolution(const Csr<VertexId, Value, SizeT> &graph) {
printf("==> display solution: (currently missing)\n");
// TODO(developer): code to print out results
}
/**
* @brief Performance / Evaluation statistics.
*/
struct Stats {
const char *name;
Statistic num_iterations;
Stats() : name(NULL), num_iterations() {}
explicit Stats(const char *name) : name(name), num_iterations() {}
};
/**
* @brief Test_Parameter structure.
*/
struct Test_Parameter : gunrock::app::TestParameter_Base {
public:
Test_Parameter() {}
~Test_Parameter() {}
void Init(CommandLineArgs &args) {
TestParameter_Base::Init(args);
}
};
/**
* @brief Displays timing and correctness statistics.
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] stats Reference to the Stats object.
* @param[in] graph Reference to the CSR graph.
* @param[in] elapsed Device elapsed running time.
* @param[in] iterations Number of iterations of the algorithm.
*/
template<typename VertexId, typename SizeT, typename Value>
void DisplayStats(
const Stats& stats,
const Csr<VertexId, Value, SizeT>& graph,
const float elapsed,
const long long iterations) {
printf("[%s] finished.\n", stats.name);
printf("elapsed: %.4f ms\n", elapsed);
printf("num_iterations: %lld\n", iterations);
// TODO(developer): code to print statistics
}
// ----------------------------------------------------------------------------
// Testing Routines
// ----------------------------------------------------------------------------
/**
* @brief A simple CPU-based reference implementation.
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] graph Reference to the CSR graph we process on.
*/
template<typename VertexId, typename SizeT, typename Value>
void SimpleReference(const Csr<VertexId, Value, SizeT> &graph) {
// initialization
// perform calculation
CpuTimer cpu_timer;
cpu_timer.Start();
// TODO(developer): CPU validation code here
cpu_timer.Stop();
float cpu_elapsed = cpu_timer.ElapsedMillis();
printf("CPU reference finished in %lf ms.\n\n", cpu_elapsed);
}
/**
* @brief Sample test entry
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] parameter Test parameter settings.
*/
template<typename VertexId, typename SizeT, typename Value>
void RunTest(Test_Parameter *parameter) {
typedef VISProblem < VertexId, SizeT, Value,
true, // MARK_PREDECESSORS
false, // ENABLE_IDEMPOTENCE
false > Problem;
Csr<VertexId, Value, SizeT>* graph =
(Csr<VertexId, Value, SizeT>*)parameter->graph;
ContextPtr* context = (ContextPtr*)parameter -> context;
std::string partition_method = parameter -> partition_method;
int max_grid_size = parameter -> max_grid_size;
int num_gpus = parameter -> num_gpus;
int* gpu_idx = parameter -> gpu_idx;
int iterations = parameter -> iterations;
bool g_quick = parameter -> g_quick;
bool g_stream_from_host = parameter -> g_stream_from_host;
double max_queue_sizing = parameter -> max_queue_sizing;
// allocate host-side array (for both reference and GPU-computed results)
VertexId *r_labels = (VertexId*)malloc(sizeof(VertexId) * graph->nodes);
VertexId *h_labels = (VertexId*)malloc(sizeof(VertexId) * graph->nodes);
VISEnactor <
Problem,
false, // INSTRUMENT
false, // DEBUG
true > // SIZE_CHECK
enactor(gpu_idx); // allocate primitive enactor map
Problem *problem = new Problem; // allocate primitive problem on GPU
util::GRError(
problem->Init(g_stream_from_host, *graph, num_gpus),
"Problem Initialization Failed", __FILE__, __LINE__);
Stats *stats = new Stats("GPU Primitive");
//
// perform calculation
//
GpuTimer gpu_timer;
float elapsed = 0.0f;
for (int iter = 0; iter < iterations; ++iter) {
util::GRError(
problem->Reset(enactor.GetFrontierType(),
max_queue_sizing),
"Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
enactor.template Enact<Problem>(*context, problem, max_grid_size),
"Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
// extract results
util::GRError(
problem->Extract(h_labels),
"Problem Data Extraction Failed", __FILE__, __LINE__);
// compute reference CPU validation solution
if (!g_quick) {
printf("==> computing reference value ... (currently missing)\n");
SimpleReference<VertexId, SizeT, Value>(graph);
printf("==> validation: (currently missing)\n");
}
DisplaySolution<VertexId, SizeT, Value>(graph); // display solution
// display statistics
SizeT num_iteratios = 0;
enactor.GetStatistics(num_iteratios);
DisplayStats<VertexId, SizeT, Value>(*stats, graph, elapsed, num_iteratios);
// clean up
if (stats) { delete stats; }
if (problem) { delete problem; }
if (r_labels) { free(r_labels); }
if (h_labels) { free(h_labels); }
}
/**
* @brief Sample test entry
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] graph Pointer to the CSR graph we process on.
* @param[in] args Reference to the command line arguments.
* @param[in] num_gpus Number of GPUs.
* @param[in] context CudaContext pointer for moderngpu APIs.
* @param[in] gpu_idx GPU inddex to run algorithm.
* @param[in] streams CUDA streams.
*/
template <
typename VertexId,
typename Value,
typename SizeT >
void RunTest(
Csr<VertexId, Value, SizeT>* graph,
CommandLineArgs& args,
int num_gpus,
ContextPtr* context,
int* gpu_idx,
hipStream_t* streams = NULL) {
Test_Parameter *parameter = new Test_Parameter;
parameter -> Init(args);
parameter -> graph = graph;
parameter -> num_gpus = num_gpus;
parameter -> context = context;
parameter -> gpu_idx = gpu_idx;
parameter -> streams = streams;
RunTest<VertexId, Value, SizeT>(parameter);
}
// ----------------------------------------------------------------------------
// Main
// ----------------------------------------------------------------------------
int main(int argc, char** argv) {
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help"))) {
Usage();
return 1;
}
int device = 0;
args.GetCmdLineArgument("device", device);
ContextPtr context = mgpu::CreateCudaDevice(device);
// parse graph-construction parameters
bool g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1) {
Usage();
return 1;
}
typedef int VertexId; // Use as the vertex identifier
typedef int SizeT; // Use as the graph size type
typedef int Value; // Use as the value type
if (graph_type == "market") {
// matrix-market coordinate-formatted graph
Csr<VertexId, Value, SizeT> csr(false);
char *name = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
name, csr, g_undirected, false) != 0) {
return 1;
}
csr.DisplayGraph(); // display graph adjacent list
csr.PrintHistogram(); // display graph histogram
RunTest<VertexId, Value, SizeT>(&csr, args, 1, &context, &device);
} else {
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 62130391e6580ed9fee977d20db3cbb6012e3395.cu | // ----------------------------------------------------------------------------
// Gunrock -- High-Performance Graph Primitives on GPU
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file test_vis.cuh
*
* @brief Simple test driver program for Vertex-Induced Subgraph
*/
#include <stdio.h>
#include <string>
#include <iostream>
// utilities for correctness checking
#include <gunrock/util/test_utils.cuh>
// graph construction utilities
#include <gunrock/graphio/market.cuh>
// primitive-specific headers include
#include <gunrock/app/vis/vis_enactor.cuh>
#include <gunrock/app/vis/vis_problem.cuh>
#include <gunrock/app/vis/vis_functor.cuh>
// gunrock abstraction graph operators
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::vis;
// ----------------------------------------------------------------------------
// Housekeeping Routines
// ----------------------------------------------------------------------------
void Usage() {
printf(
" test_vis <graph type> <graph type args> [--undirected] [--quick]\n"
" [--device=<device_index>] [--instrumented] [--v]\n"
"Graph types and arguments:\n"
" market <file>\n"
" Reads a Matrix-Market coordinate-formatted graph,\n"
" edges from STDIN (or from the optionally-specified file)\n"
" --device=<device_index> Set GPU device to run. [Default: 0]\n"
" --undirected Convert the graph to undirected\n"
" --instrumented Keep kernels statics [Default: Disable]\n"
" total_queued, search_depth and avg_duty\n"
" (a relative indicator of load imbalance)\n"
" --quick Skip the CPU validation [Default: false]\n"
" --queue-sizing=<factor> Allocates a frontier queue sized at: \n"
" (graph-edges * <factor>) [Default: 1.0]\n"
" --v Print verbose per iteration debug info\n");
}
/**
* @brief Displays primitive results.
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] graph Reference to the CSR graph.
*/
template<typename VertexId, typename SizeT, typename Value>
void DisplaySolution(const Csr<VertexId, Value, SizeT> &graph) {
printf("==> display solution: (currently missing)\n");
// TODO(developer): code to print out results
}
/**
* @brief Performance / Evaluation statistics.
*/
struct Stats {
const char *name;
Statistic num_iterations;
Stats() : name(NULL), num_iterations() {}
explicit Stats(const char *name) : name(name), num_iterations() {}
};
/**
* @brief Test_Parameter structure.
*/
struct Test_Parameter : gunrock::app::TestParameter_Base {
public:
Test_Parameter() {}
~Test_Parameter() {}
void Init(CommandLineArgs &args) {
TestParameter_Base::Init(args);
}
};
/**
* @brief Displays timing and correctness statistics.
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] stats Reference to the Stats object.
* @param[in] graph Reference to the CSR graph.
* @param[in] elapsed Device elapsed running time.
* @param[in] iterations Number of iterations of the algorithm.
*/
template<typename VertexId, typename SizeT, typename Value>
void DisplayStats(
const Stats& stats,
const Csr<VertexId, Value, SizeT>& graph,
const float elapsed,
const long long iterations) {
printf("[%s] finished.\n", stats.name);
printf("elapsed: %.4f ms\n", elapsed);
printf("num_iterations: %lld\n", iterations);
// TODO(developer): code to print statistics
}
// ----------------------------------------------------------------------------
// Testing Routines
// ----------------------------------------------------------------------------
/**
* @brief A simple CPU-based reference implementation.
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] graph Reference to the CSR graph we process on.
*/
template<typename VertexId, typename SizeT, typename Value>
void SimpleReference(const Csr<VertexId, Value, SizeT> &graph) {
// initialization
// perform calculation
CpuTimer cpu_timer;
cpu_timer.Start();
// TODO(developer): CPU validation code here
cpu_timer.Stop();
float cpu_elapsed = cpu_timer.ElapsedMillis();
printf("CPU reference finished in %lf ms.\n\n", cpu_elapsed);
}
/**
* @brief Sample test entry
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] parameter Test parameter settings.
*/
template<typename VertexId, typename SizeT, typename Value>
void RunTest(Test_Parameter *parameter) {
typedef VISProblem < VertexId, SizeT, Value,
true, // MARK_PREDECESSORS
false, // ENABLE_IDEMPOTENCE
false > Problem;
Csr<VertexId, Value, SizeT>* graph =
(Csr<VertexId, Value, SizeT>*)parameter->graph;
ContextPtr* context = (ContextPtr*)parameter -> context;
std::string partition_method = parameter -> partition_method;
int max_grid_size = parameter -> max_grid_size;
int num_gpus = parameter -> num_gpus;
int* gpu_idx = parameter -> gpu_idx;
int iterations = parameter -> iterations;
bool g_quick = parameter -> g_quick;
bool g_stream_from_host = parameter -> g_stream_from_host;
double max_queue_sizing = parameter -> max_queue_sizing;
// allocate host-side array (for both reference and GPU-computed results)
VertexId *r_labels = (VertexId*)malloc(sizeof(VertexId) * graph->nodes);
VertexId *h_labels = (VertexId*)malloc(sizeof(VertexId) * graph->nodes);
VISEnactor <
Problem,
false, // INSTRUMENT
false, // DEBUG
true > // SIZE_CHECK
enactor(gpu_idx); // allocate primitive enactor map
Problem *problem = new Problem; // allocate primitive problem on GPU
util::GRError(
problem->Init(g_stream_from_host, *graph, num_gpus),
"Problem Initialization Failed", __FILE__, __LINE__);
Stats *stats = new Stats("GPU Primitive");
//
// perform calculation
//
GpuTimer gpu_timer;
float elapsed = 0.0f;
for (int iter = 0; iter < iterations; ++iter) {
util::GRError(
problem->Reset(enactor.GetFrontierType(),
max_queue_sizing),
"Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
enactor.template Enact<Problem>(*context, problem, max_grid_size),
"Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
// extract results
util::GRError(
problem->Extract(h_labels),
"Problem Data Extraction Failed", __FILE__, __LINE__);
// compute reference CPU validation solution
if (!g_quick) {
printf("==> computing reference value ... (currently missing)\n");
SimpleReference<VertexId, SizeT, Value>(graph);
printf("==> validation: (currently missing)\n");
}
DisplaySolution<VertexId, SizeT, Value>(graph); // display solution
// display statistics
SizeT num_iteratios = 0;
enactor.GetStatistics(num_iteratios);
DisplayStats<VertexId, SizeT, Value>(*stats, graph, elapsed, num_iteratios);
// clean up
if (stats) { delete stats; }
if (problem) { delete problem; }
if (r_labels) { free(r_labels); }
if (h_labels) { free(h_labels); }
}
/**
* @brief Sample test entry
*
* @tparam VertexId
* @tparam SizeT
* @tparam Value
*
* @param[in] graph Pointer to the CSR graph we process on.
* @param[in] args Reference to the command line arguments.
* @param[in] num_gpus Number of GPUs.
* @param[in] context CudaContext pointer for moderngpu APIs.
* @param[in] gpu_idx GPU inddex to run algorithm.
* @param[in] streams CUDA streams.
*/
template <
typename VertexId,
typename Value,
typename SizeT >
void RunTest(
Csr<VertexId, Value, SizeT>* graph,
CommandLineArgs& args,
int num_gpus,
ContextPtr* context,
int* gpu_idx,
cudaStream_t* streams = NULL) {
Test_Parameter *parameter = new Test_Parameter;
parameter -> Init(args);
parameter -> graph = graph;
parameter -> num_gpus = num_gpus;
parameter -> context = context;
parameter -> gpu_idx = gpu_idx;
parameter -> streams = streams;
RunTest<VertexId, Value, SizeT>(parameter);
}
// ----------------------------------------------------------------------------
// Main
// ----------------------------------------------------------------------------
int main(int argc, char** argv) {
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help"))) {
Usage();
return 1;
}
int device = 0;
args.GetCmdLineArgument("device", device);
ContextPtr context = mgpu::CreateCudaDevice(device);
// parse graph-construction parameters
bool g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1) {
Usage();
return 1;
}
typedef int VertexId; // Use as the vertex identifier
typedef int SizeT; // Use as the graph size type
typedef int Value; // Use as the value type
if (graph_type == "market") {
// matrix-market coordinate-formatted graph
Csr<VertexId, Value, SizeT> csr(false);
char *name = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
name, csr, g_undirected, false) != 0) {
return 1;
}
csr.DisplayGraph(); // display graph adjacent list
csr.PrintHistogram(); // display graph histogram
RunTest<VertexId, Value, SizeT>(&csr, args, 1, &context, &device);
} else {
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
e7298c47dc42fab6870e0d5985623b0f482fc974.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********HEADERS**********/
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
#include "cuda_ptr.cuh"
#include "mimo-io.cuh"
using namespace std;
/**********DEFINING CONSTANTS***********/
#define NX 192 //was 201
#define NY 192 //was 201
#define NT 401
#define NS 640 //number of sensors
#define BLOCK_X 16
#define BLOCK_Y 16
#define HX 0.001f
#define HY 0.001f
#define H 0.001f
/* __constant__ float T = 1.3333e-04f; // 0.2f / 1500.f; */
#define DT 3.3333e-07f
/* __constant__ float fre = 125000.f; */
#define OMEGAC 7.8540e+05f
#define TAO 4.0000e-06f
#define TT 8.1573e-06f
/**********FUNCTION DECLARATION**********/
//Host Functions
void Ultrasonic_Tomography(const string&, int, int);
void Position_Transducers(host_ptr<int>, host_ptr<int>, int);
//In-Line Functions
inline int grid_size(int, int);
template <typename T> __host__ __device__ void minmax(T &a, T &b);
//Device Functions
__global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int);
__global__ void propagation_at_corners(kernel_ptr<float>, int);
__global__ void initial_signal(kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>,kernel_ptr<float>,int);
/***************MAIN PROGRAM***************/
int main(int argc, char **argv)
{
//Command Line Argument Processing
if (argc != 3) {
cerr << "Usage: " << argv[0] << " <fo filename> <sensor group size>\n\n";
exit(1);
}
string fo_filename = argv[1];
int group_size = stoi(argv[2]);
if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) {
cerr << "Error: '" << fo_filename << "' should have only one period.\n"
<< " It should be in the current directory "
<< "and have only one filetype extension.\n\n";
exit(1);
}
// Time Measuring Variables
int ti = 0, tf = 0;
// set floating-point precision on stdout and stderr
cout << fixed << setprecision(10);
cerr << fixed << setprecision(10);
cerr << "Ultrasonic Tomography Running:\n\n";
//Initial time
ti = clock();
cerr << "ti = " << ti << "\n";
Ultrasonic_Tomography(fo_filename, group_size, ti);
hipDeviceReset();
//Calculate total time
tf = clock();
cerr << "tf = " << tf << "\n"
<< "tt = " << tf - ti << "\n"
<< "Total Seconds = " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n";
}
/**********HOST FUNCTION DEFINITIONS**********/
void Ultrasonic_Tomography(const string &fo_filename, int group_size, int ti)
{
// environment initialization
// fo(i, j) =
// ground truth value at pos (i, j) of field
host_ptr<float> fo(NX, NY);
device_ptr<float> dev_fo(NX, NY);
{
ifstream fo_in(fo_filename);
if (!fo_in) {
cerr << "Error: '" + fo_filename + "' file not found in current directory.\n\n";
return;
}
read(fo_in, fo);
copy(dev_fo, fo);
}
// Position of the transducers
host_ptr<int> ii(NS);
host_ptr<int> jj(NS);
device_ptr<int> dev_ii(NS);
device_ptr<int> dev_jj(NS);
Position_Transducers(ii, jj, NS);
// copy from host to device
copy(dev_ii, ii);
copy(dev_jj, jj);
// Ng = number of sensor groups that will be launched in parallel
int Ng = NS / group_size;
// u(i, j, k, g) =
// wave propagation at pos (i, j) of field, at time k, from sensor group g
device_ptr<float> dev_u(NX, NY, NT, Ng);
dev_u.set(0.f);
// kernel launch parameters for propagation
dim3 threads_propagation(NX, 1, 1);
dim3 grid_propagation(
grid_size(NX, threads_propagation.x),
grid_size(NY, threads_propagation.y),
grid_size(Ng, threads_propagation.z));
// kernel launch parameters for propagation_at_corners
dim3 threads_prop_corners(NT, 1);
dim3 grid_prop_corners(
grid_size(NT, threads_prop_corners.x),
grid_size(Ng, threads_prop_corners.y));
// initial wave propagation over fo
for (int k = 1; k < NT - 1; ++k)
hipLaunchKernelGGL(( propagation), dim3(grid_propagation), dim3(threads_propagation), 0, 0, dev_ii, dev_jj, dev_fo, dev_u, k, group_size, Ng);
hipLaunchKernelGGL(( propagation_at_corners), dim3(grid_prop_corners), dim3(threads_prop_corners), 0, 0, dev_u, Ng);
// gg_xxx(i, k, g) =
// initial signal at pos i in row/column xxx
// at time k, from sensor group
// e.g g_bottom stores the bottom row,
// g_right stores the right column
device_ptr<float> dev_g_bottom(NX, NT, Ng);
device_ptr<float> dev_g_right(NY, NT, Ng);
device_ptr<float> dev_g_top(NX, NT, Ng);
device_ptr<float> dev_g_left(NY, NT, Ng);
dev_g_bottom.set(0.f);
dev_g_right.set(0.f);
dev_g_top.set(0.f);
dev_g_left.set(0.f);
// kernel launch parameters for initial_signal
dim3 threads_signal(NX, 1, 1);
dim3 grid_signal(
grid_size(NX, threads_signal.x),
grid_size(NT, threads_signal.y),
grid_size(Ng, threads_signal.z));
// store initial signal of wave at sensor positions of u in g
hipLaunchKernelGGL(( initial_signal), dim3(grid_signal), dim3(threads_signal), 0, 0, dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, Ng);
host_ptr<float> g_bottom(NX, NT, Ng);
host_ptr<float> g_right(NY, NT, Ng);
host_ptr<float> g_top(NX, NT, Ng);
host_ptr<float> g_left(NY, NT, Ng);
copy(g_bottom, dev_g_bottom);
copy(g_right, dev_g_right);
copy(g_top, dev_g_top);
copy(g_left, dev_g_left);
{
auto idx = fo_filename.find_first_of('.');
string prefix = fo_filename.substr(0, idx) + "-data-";
string suffix = "-" + to_string(group_size) + ".txt";
string gb_name = prefix + "bottom" + suffix;
string gr_name = prefix + "right" + suffix;
string gt_name = prefix + "top" + suffix;
string gl_name = prefix + "left" + suffix;
ofstream gb_out(gb_name);
ofstream gr_out(gr_name);
ofstream gt_out(gt_name);
ofstream gl_out(gl_name);
cerr << "writing to '" << gb_name << "'...\n\n";
write(gb_out, g_bottom);
cerr << "writing to '" << gr_name << "'...\n\n";
write(gr_out, g_right);
cerr << "writing to '" << gt_name << "'...\n\n";
write(gt_out, g_top);
cerr << "writing to '" << gl_name << "'...\n\n";
write(gl_out, g_left);
}
}
void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
for(p = 0; p < 160; p++)
{
ii(p) = 21 + (p + 1);
jj(p) = 181;
}
for(p = 160; p < 320; p++)
{
ii(p) = 181;
jj(p) = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii(p) = 181 - ((p + 1) - 320);
jj(p) = 21;
}
for(p = 480; p < num; p++)
{
ii(p) = 21;
jj(p) = 21 + ((p + 1) - 480);
}
}
/**********DEVICE FUNCTION DEFINITIONS***********/
__global__ void propagation(
kernel_ptr<int> const ii,
kernel_ptr<int> const jj,
kernel_ptr<float> const f,
kernel_ptr<float> u,
int k, int group_size, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if(i < NX && j < NY && g < Ng) {
float v = 1500.f * sqrtf(1.f + f(i, j));
float r = v * DT / HX;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(u(i+1, j, k, g) +
u(i-1, j, k, g) +
u(i, j-1, k, g) +
u(i, j+1, k, g)) +
s * u(i, j, k, g) -
u(i, j, k-1, g);
int p = g * group_size;
int jp1 = jj(p);
int jp2 = jj(p + group_size - 1);
int ip1 = ii(p);
int ip2 = ii(p + group_size - 1);
minmax(jp1, jp2);
minmax(ip1, ip2);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * DT - TT;
// add wave value
val +=
v * v * DT * DT *
cosf(OMEGAC * t) *
expf(-(t * t) / (2.f * TAO * TAO));
}
}
// at boundary
else {
// boundary booleans
bool top = (j == 0);
bool bottom = (j == NY - 1);
bool left = (i == 0);
bool right = (i == NX - 1);
// index variables for different boundary cases
int ja = top ? (j + 1) : bottom ? (j - 1) : j;
int jb = top ? (j + 2) : bottom ? (j - 2) : j;
int ia = left ? (i + 1) : right ? (i - 1) : i;
int ib = left ? (i + 2) : right ? (i - 2) : i;
val =
(2.f - 2.f * r - r * r) * u(i, j, k, g) +
2.f * r * (1.f + r) * u(ia, ja, k, g) -
r * r * u(ib, jb, k, g) +
(2.f * r - 1.f) * u(i, j, k-1, g) -
2.f * r * u(ia, ja, k-1, g);
}
u(i, j, k+1, g) = val;
}
}
__global__ void propagation_at_corners(
kernel_ptr<float> u,
int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int g = threadIdx.y + blockIdx.y * blockDim.y;
if (k < NT && g < Ng) {
u(0, 0, k, g) =
1.f / 2.f * (u(0, 1, k, g) + u(1, 0, k, g));
u(NX-1, 0, k, g) =
1.f / 2.f * (u(NX-2, 0, k, g) + u(NX-1, 1, k, g));
u(0, NY-1, k, g) =
1.f / 2.f * (u(0, NY-2, k, g) + u(1, NY-1, k, g));
u(NX-1, NY-1, k, g) =
1.f / 2.f * (u(NX-2, NY-1, k, g) + u(NX-1, NY-2, k, g));
}
}
__global__ void initial_signal(
kernel_ptr<float> const u,
kernel_ptr<float> g_bottom,
kernel_ptr<float> g_right,
kernel_ptr<float> g_top,
kernel_ptr<float> g_left,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng) {
// store values at bottom sensor row of u
g_bottom(i, k, g) =
u(i, 180, k, g);
// store values at top sensor row of u
g_top(i, k, g) =
u(i, 20, k, g);
// store values at right sensor column of u
g_right(i, k, g) =
u(180, i, k, g);
// store values at left sensor column of u
g_left(i, k, g) =
u(20, i, k, g);
}
}
/**********INLINE FUNCTION DEFINITIONS**********/
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// POST-CONDITION: a <= b
template <typename T>
__host__ __device__
void minmax(T &a, T &b)
{
if (a > b) {
int t = a;
a = b;
b = t;
}
}
| e7298c47dc42fab6870e0d5985623b0f482fc974.cu | /**********HEADERS**********/
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
#include "cuda_ptr.cuh"
#include "mimo-io.cuh"
using namespace std;
/**********DEFINING CONSTANTS***********/
#define NX 192 //was 201
#define NY 192 //was 201
#define NT 401
#define NS 640 //number of sensors
#define BLOCK_X 16
#define BLOCK_Y 16
#define HX 0.001f
#define HY 0.001f
#define H 0.001f
/* __constant__ float T = 1.3333e-04f; // 0.2f / 1500.f; */
#define DT 3.3333e-07f
/* __constant__ float fre = 125000.f; */
#define OMEGAC 7.8540e+05f
#define TAO 4.0000e-06f
#define TT 8.1573e-06f
/**********FUNCTION DECLARATION**********/
//Host Functions
void Ultrasonic_Tomography(const string&, int, int);
void Position_Transducers(host_ptr<int>, host_ptr<int>, int);
//In-Line Functions
inline int grid_size(int, int);
template <typename T> __host__ __device__ void minmax(T &a, T &b);
//Device Functions
__global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int);
__global__ void propagation_at_corners(kernel_ptr<float>, int);
__global__ void initial_signal(kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>,kernel_ptr<float>,int);
/***************MAIN PROGRAM***************/
int main(int argc, char **argv)
{
//Command Line Argument Processing
if (argc != 3) {
cerr << "Usage: " << argv[0] << " <fo filename> <sensor group size>\n\n";
exit(1);
}
string fo_filename = argv[1];
int group_size = stoi(argv[2]);
if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) {
cerr << "Error: '" << fo_filename << "' should have only one period.\n"
<< " It should be in the current directory "
<< "and have only one filetype extension.\n\n";
exit(1);
}
// Time Measuring Variables
int ti = 0, tf = 0;
// set floating-point precision on stdout and stderr
cout << fixed << setprecision(10);
cerr << fixed << setprecision(10);
cerr << "Ultrasonic Tomography Running:\n\n";
//Initial time
ti = clock();
cerr << "ti = " << ti << "\n";
Ultrasonic_Tomography(fo_filename, group_size, ti);
cudaDeviceReset();
//Calculate total time
tf = clock();
cerr << "tf = " << tf << "\n"
<< "tt = " << tf - ti << "\n"
<< "Total Seconds = " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n";
}
/**********HOST FUNCTION DEFINITIONS**********/
void Ultrasonic_Tomography(const string &fo_filename, int group_size, int ti)
{
// environment initialization
// fo(i, j) =
// ground truth value at pos (i, j) of field
host_ptr<float> fo(NX, NY);
device_ptr<float> dev_fo(NX, NY);
{
ifstream fo_in(fo_filename);
if (!fo_in) {
cerr << "Error: '" + fo_filename + "' file not found in current directory.\n\n";
return;
}
read(fo_in, fo);
copy(dev_fo, fo);
}
// Position of the transducers
host_ptr<int> ii(NS);
host_ptr<int> jj(NS);
device_ptr<int> dev_ii(NS);
device_ptr<int> dev_jj(NS);
Position_Transducers(ii, jj, NS);
// copy from host to device
copy(dev_ii, ii);
copy(dev_jj, jj);
// Ng = number of sensor groups that will be launched in parallel
int Ng = NS / group_size;
// u(i, j, k, g) =
// wave propagation at pos (i, j) of field, at time k, from sensor group g
device_ptr<float> dev_u(NX, NY, NT, Ng);
dev_u.set(0.f);
// kernel launch parameters for propagation
dim3 threads_propagation(NX, 1, 1);
dim3 grid_propagation(
grid_size(NX, threads_propagation.x),
grid_size(NY, threads_propagation.y),
grid_size(Ng, threads_propagation.z));
// kernel launch parameters for propagation_at_corners
dim3 threads_prop_corners(NT, 1);
dim3 grid_prop_corners(
grid_size(NT, threads_prop_corners.x),
grid_size(Ng, threads_prop_corners.y));
// initial wave propagation over fo
for (int k = 1; k < NT - 1; ++k)
propagation<<<grid_propagation, threads_propagation>>>(dev_ii, dev_jj, dev_fo, dev_u, k, group_size, Ng);
propagation_at_corners<<<grid_prop_corners, threads_prop_corners>>>(dev_u, Ng);
// gg_xxx(i, k, g) =
// initial signal at pos i in row/column xxx
// at time k, from sensor group
// e.g g_bottom stores the bottom row,
// g_right stores the right column
device_ptr<float> dev_g_bottom(NX, NT, Ng);
device_ptr<float> dev_g_right(NY, NT, Ng);
device_ptr<float> dev_g_top(NX, NT, Ng);
device_ptr<float> dev_g_left(NY, NT, Ng);
dev_g_bottom.set(0.f);
dev_g_right.set(0.f);
dev_g_top.set(0.f);
dev_g_left.set(0.f);
// kernel launch parameters for initial_signal
dim3 threads_signal(NX, 1, 1);
dim3 grid_signal(
grid_size(NX, threads_signal.x),
grid_size(NT, threads_signal.y),
grid_size(Ng, threads_signal.z));
// store initial signal of wave at sensor positions of u in g
initial_signal<<<grid_signal, threads_signal>>>(dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, Ng);
host_ptr<float> g_bottom(NX, NT, Ng);
host_ptr<float> g_right(NY, NT, Ng);
host_ptr<float> g_top(NX, NT, Ng);
host_ptr<float> g_left(NY, NT, Ng);
copy(g_bottom, dev_g_bottom);
copy(g_right, dev_g_right);
copy(g_top, dev_g_top);
copy(g_left, dev_g_left);
{
auto idx = fo_filename.find_first_of('.');
string prefix = fo_filename.substr(0, idx) + "-data-";
string suffix = "-" + to_string(group_size) + ".txt";
string gb_name = prefix + "bottom" + suffix;
string gr_name = prefix + "right" + suffix;
string gt_name = prefix + "top" + suffix;
string gl_name = prefix + "left" + suffix;
ofstream gb_out(gb_name);
ofstream gr_out(gr_name);
ofstream gt_out(gt_name);
ofstream gl_out(gl_name);
cerr << "writing to '" << gb_name << "'...\n\n";
write(gb_out, g_bottom);
cerr << "writing to '" << gr_name << "'...\n\n";
write(gr_out, g_right);
cerr << "writing to '" << gt_name << "'...\n\n";
write(gt_out, g_top);
cerr << "writing to '" << gl_name << "'...\n\n";
write(gl_out, g_left);
}
}
void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
for(p = 0; p < 160; p++)
{
ii(p) = 21 + (p + 1);
jj(p) = 181;
}
for(p = 160; p < 320; p++)
{
ii(p) = 181;
jj(p) = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii(p) = 181 - ((p + 1) - 320);
jj(p) = 21;
}
for(p = 480; p < num; p++)
{
ii(p) = 21;
jj(p) = 21 + ((p + 1) - 480);
}
}
/**********DEVICE FUNCTION DEFINITIONS***********/
__global__ void propagation(
kernel_ptr<int> const ii,
kernel_ptr<int> const jj,
kernel_ptr<float> const f,
kernel_ptr<float> u,
int k, int group_size, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if(i < NX && j < NY && g < Ng) {
float v = 1500.f * sqrtf(1.f + f(i, j));
float r = v * DT / HX;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(u(i+1, j, k, g) +
u(i-1, j, k, g) +
u(i, j-1, k, g) +
u(i, j+1, k, g)) +
s * u(i, j, k, g) -
u(i, j, k-1, g);
int p = g * group_size;
int jp1 = jj(p);
int jp2 = jj(p + group_size - 1);
int ip1 = ii(p);
int ip2 = ii(p + group_size - 1);
minmax(jp1, jp2);
minmax(ip1, ip2);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * DT - TT;
// add wave value
val +=
v * v * DT * DT *
cosf(OMEGAC * t) *
expf(-(t * t) / (2.f * TAO * TAO));
}
}
// at boundary
else {
// boundary booleans
bool top = (j == 0);
bool bottom = (j == NY - 1);
bool left = (i == 0);
bool right = (i == NX - 1);
// index variables for different boundary cases
int ja = top ? (j + 1) : bottom ? (j - 1) : j;
int jb = top ? (j + 2) : bottom ? (j - 2) : j;
int ia = left ? (i + 1) : right ? (i - 1) : i;
int ib = left ? (i + 2) : right ? (i - 2) : i;
val =
(2.f - 2.f * r - r * r) * u(i, j, k, g) +
2.f * r * (1.f + r) * u(ia, ja, k, g) -
r * r * u(ib, jb, k, g) +
(2.f * r - 1.f) * u(i, j, k-1, g) -
2.f * r * u(ia, ja, k-1, g);
}
u(i, j, k+1, g) = val;
}
}
__global__ void propagation_at_corners(
kernel_ptr<float> u,
int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int g = threadIdx.y + blockIdx.y * blockDim.y;
if (k < NT && g < Ng) {
u(0, 0, k, g) =
1.f / 2.f * (u(0, 1, k, g) + u(1, 0, k, g));
u(NX-1, 0, k, g) =
1.f / 2.f * (u(NX-2, 0, k, g) + u(NX-1, 1, k, g));
u(0, NY-1, k, g) =
1.f / 2.f * (u(0, NY-2, k, g) + u(1, NY-1, k, g));
u(NX-1, NY-1, k, g) =
1.f / 2.f * (u(NX-2, NY-1, k, g) + u(NX-1, NY-2, k, g));
}
}
__global__ void initial_signal(
kernel_ptr<float> const u,
kernel_ptr<float> g_bottom,
kernel_ptr<float> g_right,
kernel_ptr<float> g_top,
kernel_ptr<float> g_left,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng) {
// store values at bottom sensor row of u
g_bottom(i, k, g) =
u(i, 180, k, g);
// store values at top sensor row of u
g_top(i, k, g) =
u(i, 20, k, g);
// store values at right sensor column of u
g_right(i, k, g) =
u(180, i, k, g);
// store values at left sensor column of u
g_left(i, k, g) =
u(20, i, k, g);
}
}
/**********INLINE FUNCTION DEFINITIONS**********/
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// POST-CONDITION: a <= b
template <typename T>
__host__ __device__
void minmax(T &a, T &b)
{
if (a > b) {
int t = a;
a = b;
b = t;
}
}
|
eba2ac0c57905a70ae1cab4fe9064b288aadaa2f.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include "cuZFP.h"
#include "encode1.cuh"
#include "encode2.cuh"
#include "encode3.cuh"
#include "decode1.cuh"
#include "decode2.cuh"
#include "decode3.cuh"
#include "ErrorCheck.h"
#include "pointers.cuh"
#include "type_info.cuh"
#include <iostream>
#include <assert.h>
// we need to know about bitstream, but we don't
// want duplicate symbols.
#ifndef inline_
#define inline_ inline
#endif
#include "../inline/bitstream.c"
namespace internal
{
bool is_contigous3d(const uint dims[3], const int3 &stride, long long int &offset)
{
typedef long long int int64;
int64 idims[3];
idims[0] = dims[0];
idims[1] = dims[1];
idims[2] = dims[2];
int64 imin = ::min(stride.x,0) * (idims[0] - 1) +
::min(stride.y,0) * (idims[1] - 1) +
::min(stride.z,0) * (idims[2] - 1);
int64 imax = ::max(stride.x,0) * (idims[0] - 1) +
::max(stride.y,0) * (idims[1] - 1) +
::max(stride.z,0) * (idims[2] - 1);
offset = imin;
int64 ns = idims[0] * idims[1] * idims[2];
return (imax - imin + 1 == ns);
}
bool is_contigous2d(const uint dims[3], const int3 &stride, long long int &offset)
{
typedef long long int int64;
int64 idims[2];
idims[0] = dims[0];
idims[1] = dims[1];
int64 imin = ::min(stride.x,0) * (idims[0] - 1) +
::min(stride.y,0) * (idims[1] - 1);
int64 imax = ::max(stride.x,0) * (idims[0] - 1) +
::max(stride.y,0) * (idims[1] - 1);
offset = imin;
return (imax - imin + 1) == (idims[0] * idims[1]);
}
bool is_contigous1d(uint dim, const int &stride, long long int &offset)
{
offset = 0;
if(stride < 0) offset = stride * (int(dim) - 1);
return std::abs(stride) == 1;
}
bool is_contigous(const uint dims[3], const int3 &stride, long long int &offset)
{
int d = 0;
if(dims[0] != 0) d++;
if(dims[1] != 0) d++;
if(dims[2] != 0) d++;
if(d == 3)
{
return is_contigous3d(dims, stride, offset);
}
else if(d == 2)
{
return is_contigous2d(dims, stride, offset);
}
else
{
return is_contigous1d(dims[0], stride.x, offset);
}
}
//
// encode expects device pointers
//
template<typename T>
size_t encode(uint dims[3], int3 stride, int bits_per_block, T *d_data, Word *d_stream)
{
int d = 0;
size_t len = 1;
for(int i = 0; i < 3; ++i)
{
if(dims[i] != 0)
{
d++;
len *= dims[i];
}
}
ErrorCheck errors;
size_t stream_size = 0;
if(d == 1)
{
int dim = dims[0];
int sx = stride.x;
stream_size = cuZFP::encode1<T>(dim, sx, d_data, d_stream, bits_per_block);
}
else if(d == 2)
{
uint2 ndims = make_uint2(dims[0], dims[1]);
int2 s;
s.x = stride.x;
s.y = stride.y;
stream_size = cuZFP::encode2<T>(ndims, s, d_data, d_stream, bits_per_block);
}
else if(d == 3)
{
int3 s;
s.x = stride.x;
s.y = stride.y;
s.z = stride.z;
uint3 ndims = make_uint3(dims[0], dims[1], dims[2]);
stream_size = cuZFP::encode<T>(ndims, s, d_data, d_stream, bits_per_block);
}
errors.chk("Encode");
return stream_size;
}
template<typename T>
size_t decode(uint ndims[3], int3 stride, int bits_per_block, Word *stream, T *out)
{
int d = 0;
size_t out_size = 1;
size_t stream_bytes = 0;
for(int i = 0; i < 3; ++i)
{
if(ndims[i] != 0)
{
d++;
out_size *= ndims[i];
}
}
if(d == 3)
{
uint3 dims = make_uint3(ndims[0], ndims[1], ndims[2]);
int3 s;
s.x = stride.x;
s.y = stride.y;
s.z = stride.z;
stream_bytes = cuZFP::decode3<T>(dims, s, stream, out, bits_per_block);
}
else if(d == 1)
{
uint dim = ndims[0];
int sx = stride.x;
stream_bytes = cuZFP::decode1<T>(dim, sx, stream, out, bits_per_block);
}
else if(d == 2)
{
uint2 dims;
dims.x = ndims[0];
dims.y = ndims[1];
int2 s;
s.x = stride.x;
s.y = stride.y;
stream_bytes = cuZFP::decode2<T>(dims, s, stream, out, bits_per_block);
}
else std::cerr<<" d == "<<d<<" not implemented\n";
return stream_bytes;
}
Word *setup_device_stream(zfp_stream *stream,const zfp_field *field)
{
bool stream_device = cuZFP::is_gpu_ptr(stream->stream->begin);
assert(sizeof(word) == sizeof(Word)); // "CUDA version currently only supports 64bit words");
if(stream_device)
{
return (Word*) stream->stream->begin;
}
Word *d_stream = NULL;
// TODO: we we have a real stream we can just ask it how big it is
size_t max_size = zfp_stream_maximum_size(stream, field);
hipMalloc(&d_stream, max_size);
hipMemcpy(d_stream, stream->stream->begin, max_size, hipMemcpyHostToDevice);
return d_stream;
}
void * offset_void(zfp_type type, void *ptr, long long int offset)
{
void * offset_ptr = NULL;
if(type == zfp_type_float)
{
float* data = (float*) ptr;
offset_ptr = (void*)(&data[offset]);
}
else if(type == zfp_type_double)
{
double* data = (double*) ptr;
offset_ptr = (void*)(&data[offset]);
}
else if(type == zfp_type_int32)
{
int * data = (int*) ptr;
offset_ptr = (void*)(&data[offset]);
}
else if(type == zfp_type_int64)
{
long long int * data = (long long int*) ptr;
offset_ptr = (void*)(&data[offset]);
}
return offset_ptr;
}
void *setup_device_field(const zfp_field *field, const int3 &stride, long long int &offset)
{
bool field_device = cuZFP::is_gpu_ptr(field->data);
if(field_device)
{
offset = 0;
return field->data;
}
uint dims[3];
dims[0] = field->nx;
dims[1] = field->ny;
dims[2] = field->nz;
size_t type_size = zfp_type_size(field->type);
size_t field_size = 1;
for(int i = 0; i < 3; ++i)
{
if(dims[i] != 0)
{
field_size *= dims[i];
}
}
bool contig = internal::is_contigous(dims, stride, offset);
void * host_ptr = offset_void(field->type, field->data, offset);;
void *d_data = NULL;
if(contig)
{
size_t field_bytes = type_size * field_size;
hipMalloc(&d_data, field_bytes);
hipMemcpy(d_data, host_ptr, field_bytes, hipMemcpyHostToDevice);
}
return offset_void(field->type, d_data, -offset);
}
void cleanup_device_ptr(void *orig_ptr, void *d_ptr, size_t bytes, long long int offset, zfp_type type)
{
bool device = cuZFP::is_gpu_ptr(orig_ptr);
if(device)
{
return;
}
// from whence it came
void *d_offset_ptr = offset_void(type, d_ptr, offset);
void *h_offset_ptr = offset_void(type, orig_ptr, offset);
if(bytes > 0)
{
hipMemcpy(h_offset_ptr, d_offset_ptr, bytes, hipMemcpyDeviceToHost);
}
hipFree(d_offset_ptr);
}
} // namespace internal
size_t
cuda_compress(zfp_stream *stream, const zfp_field *field)
{
uint dims[3];
dims[0] = field->nx;
dims[1] = field->ny;
dims[2] = field->nz;
int3 stride;
stride.x = field->sx ? field->sx : 1;
stride.y = field->sy ? field->sy : field->nx;
stride.z = field->sz ? field->sz : field->nx * field->ny;
size_t stream_bytes = 0;
long long int offset = 0;
void *d_data = internal::setup_device_field(field, stride, offset);
if(d_data == NULL)
{
// null means the array is non-contiguous host mem which is not supported
return 0;
}
Word *d_stream = internal::setup_device_stream(stream, field);
if(field->type == zfp_type_float)
{
float* data = (float*) d_data;
stream_bytes = internal::encode<float>(dims, stride, (int)stream->maxbits, data, d_stream);
}
else if(field->type == zfp_type_double)
{
double* data = (double*) d_data;
stream_bytes = internal::encode<double>(dims, stride, (int)stream->maxbits, data, d_stream);
}
else if(field->type == zfp_type_int32)
{
int * data = (int*) d_data;
stream_bytes = internal::encode<int>(dims, stride, (int)stream->maxbits, data, d_stream);
}
else if(field->type == zfp_type_int64)
{
long long int * data = (long long int*) d_data;
stream_bytes = internal::encode<long long int>(dims, stride, (int)stream->maxbits, data, d_stream);
}
internal::cleanup_device_ptr(stream->stream->begin, d_stream, stream_bytes, 0, field->type);
internal::cleanup_device_ptr(field->data, d_data, 0, offset, field->type);
// zfp wants to flush the stream.
// set bits to wsize because we already did that.
size_t compressed_size = stream_bytes / sizeof(Word);
stream->stream->bits = wsize;
// set stream pointer to end of stream
stream->stream->ptr = stream->stream->begin + compressed_size;
return stream_bytes;
}
void
cuda_decompress(zfp_stream *stream, zfp_field *field)
{
uint dims[3];
dims[0] = field->nx;
dims[1] = field->ny;
dims[2] = field->nz;
int3 stride;
stride.x = field->sx ? field->sx : 1;
stride.y = field->sy ? field->sy : field->nx;
stride.z = field->sz ? field->sz : field->nx * field->ny;
size_t decoded_bytes = 0;
long long int offset = 0;
void *d_data = internal::setup_device_field(field, stride, offset);
if(d_data == NULL)
{
// null means the array is non-contiguous host mem which is not supported
return;
}
Word *d_stream = internal::setup_device_stream(stream, field);
if(field->type == zfp_type_float)
{
float *data = (float*) d_data;
decoded_bytes = internal::decode(dims, stride, (int)stream->maxbits, d_stream, data);
d_data = (void*) data;
}
else if(field->type == zfp_type_double)
{
double *data = (double*) d_data;
decoded_bytes = internal::decode(dims, stride, (int)stream->maxbits, d_stream, data);
d_data = (void*) data;
}
else if(field->type == zfp_type_int32)
{
int *data = (int*) d_data;
decoded_bytes = internal::decode(dims, stride, (int)stream->maxbits, d_stream, data);
d_data = (void*) data;
}
else if(field->type == zfp_type_int64)
{
long long int *data = (long long int*) d_data;
decoded_bytes = internal::decode(dims, stride, (int)stream->maxbits, d_stream, data);
d_data = (void*) data;
}
else
{
std::cerr<<"Cannot decompress: type unknown\n";
}
size_t type_size = zfp_type_size(field->type);
size_t field_size = 1;
for(int i = 0; i < 3; ++i)
{
if(dims[i] != 0)
{
field_size *= dims[i];
}
}
size_t bytes = type_size * field_size;
internal::cleanup_device_ptr(stream->stream->begin, d_stream, 0, 0, field->type);
internal::cleanup_device_ptr(field->data, d_data, bytes, offset, field->type);
// this is how zfp determins if this was a success
size_t words_read = decoded_bytes / sizeof(Word);
stream->stream->bits = wsize;
// set stream pointer to end of stream
stream->stream->ptr = stream->stream->begin + words_read;
}
| eba2ac0c57905a70ae1cab4fe9064b288aadaa2f.cu | #include <assert.h>
#include "cuZFP.h"
#include "encode1.cuh"
#include "encode2.cuh"
#include "encode3.cuh"
#include "decode1.cuh"
#include "decode2.cuh"
#include "decode3.cuh"
#include "ErrorCheck.h"
#include "pointers.cuh"
#include "type_info.cuh"
#include <iostream>
#include <assert.h>
// we need to know about bitstream, but we don't
// want duplicate symbols.
#ifndef inline_
#define inline_ inline
#endif
#include "../inline/bitstream.c"
namespace internal
{
bool is_contigous3d(const uint dims[3], const int3 &stride, long long int &offset)
{
typedef long long int int64;
int64 idims[3];
idims[0] = dims[0];
idims[1] = dims[1];
idims[2] = dims[2];
int64 imin = std::min(stride.x,0) * (idims[0] - 1) +
std::min(stride.y,0) * (idims[1] - 1) +
std::min(stride.z,0) * (idims[2] - 1);
int64 imax = std::max(stride.x,0) * (idims[0] - 1) +
std::max(stride.y,0) * (idims[1] - 1) +
std::max(stride.z,0) * (idims[2] - 1);
offset = imin;
int64 ns = idims[0] * idims[1] * idims[2];
return (imax - imin + 1 == ns);
}
bool is_contigous2d(const uint dims[3], const int3 &stride, long long int &offset)
{
typedef long long int int64;
int64 idims[2];
idims[0] = dims[0];
idims[1] = dims[1];
int64 imin = std::min(stride.x,0) * (idims[0] - 1) +
std::min(stride.y,0) * (idims[1] - 1);
int64 imax = std::max(stride.x,0) * (idims[0] - 1) +
std::max(stride.y,0) * (idims[1] - 1);
offset = imin;
return (imax - imin + 1) == (idims[0] * idims[1]);
}
bool is_contigous1d(uint dim, const int &stride, long long int &offset)
{
offset = 0;
if(stride < 0) offset = stride * (int(dim) - 1);
return std::abs(stride) == 1;
}
bool is_contigous(const uint dims[3], const int3 &stride, long long int &offset)
{
int d = 0;
if(dims[0] != 0) d++;
if(dims[1] != 0) d++;
if(dims[2] != 0) d++;
if(d == 3)
{
return is_contigous3d(dims, stride, offset);
}
else if(d == 2)
{
return is_contigous2d(dims, stride, offset);
}
else
{
return is_contigous1d(dims[0], stride.x, offset);
}
}
//
// encode expects device pointers
//
template<typename T>
size_t encode(uint dims[3], int3 stride, int bits_per_block, T *d_data, Word *d_stream)
{
int d = 0;
size_t len = 1;
for(int i = 0; i < 3; ++i)
{
if(dims[i] != 0)
{
d++;
len *= dims[i];
}
}
ErrorCheck errors;
size_t stream_size = 0;
if(d == 1)
{
int dim = dims[0];
int sx = stride.x;
stream_size = cuZFP::encode1<T>(dim, sx, d_data, d_stream, bits_per_block);
}
else if(d == 2)
{
uint2 ndims = make_uint2(dims[0], dims[1]);
int2 s;
s.x = stride.x;
s.y = stride.y;
stream_size = cuZFP::encode2<T>(ndims, s, d_data, d_stream, bits_per_block);
}
else if(d == 3)
{
int3 s;
s.x = stride.x;
s.y = stride.y;
s.z = stride.z;
uint3 ndims = make_uint3(dims[0], dims[1], dims[2]);
stream_size = cuZFP::encode<T>(ndims, s, d_data, d_stream, bits_per_block);
}
errors.chk("Encode");
return stream_size;
}
template<typename T>
size_t decode(uint ndims[3], int3 stride, int bits_per_block, Word *stream, T *out)
{
int d = 0;
size_t out_size = 1;
size_t stream_bytes = 0;
for(int i = 0; i < 3; ++i)
{
if(ndims[i] != 0)
{
d++;
out_size *= ndims[i];
}
}
if(d == 3)
{
uint3 dims = make_uint3(ndims[0], ndims[1], ndims[2]);
int3 s;
s.x = stride.x;
s.y = stride.y;
s.z = stride.z;
stream_bytes = cuZFP::decode3<T>(dims, s, stream, out, bits_per_block);
}
else if(d == 1)
{
uint dim = ndims[0];
int sx = stride.x;
stream_bytes = cuZFP::decode1<T>(dim, sx, stream, out, bits_per_block);
}
else if(d == 2)
{
uint2 dims;
dims.x = ndims[0];
dims.y = ndims[1];
int2 s;
s.x = stride.x;
s.y = stride.y;
stream_bytes = cuZFP::decode2<T>(dims, s, stream, out, bits_per_block);
}
else std::cerr<<" d == "<<d<<" not implemented\n";
return stream_bytes;
}
Word *setup_device_stream(zfp_stream *stream,const zfp_field *field)
{
bool stream_device = cuZFP::is_gpu_ptr(stream->stream->begin);
assert(sizeof(word) == sizeof(Word)); // "CUDA version currently only supports 64bit words");
if(stream_device)
{
return (Word*) stream->stream->begin;
}
Word *d_stream = NULL;
// TODO: we we have a real stream we can just ask it how big it is
size_t max_size = zfp_stream_maximum_size(stream, field);
cudaMalloc(&d_stream, max_size);
cudaMemcpy(d_stream, stream->stream->begin, max_size, cudaMemcpyHostToDevice);
return d_stream;
}
void * offset_void(zfp_type type, void *ptr, long long int offset)
{
void * offset_ptr = NULL;
if(type == zfp_type_float)
{
float* data = (float*) ptr;
offset_ptr = (void*)(&data[offset]);
}
else if(type == zfp_type_double)
{
double* data = (double*) ptr;
offset_ptr = (void*)(&data[offset]);
}
else if(type == zfp_type_int32)
{
int * data = (int*) ptr;
offset_ptr = (void*)(&data[offset]);
}
else if(type == zfp_type_int64)
{
long long int * data = (long long int*) ptr;
offset_ptr = (void*)(&data[offset]);
}
return offset_ptr;
}
void *setup_device_field(const zfp_field *field, const int3 &stride, long long int &offset)
{
bool field_device = cuZFP::is_gpu_ptr(field->data);
if(field_device)
{
offset = 0;
return field->data;
}
uint dims[3];
dims[0] = field->nx;
dims[1] = field->ny;
dims[2] = field->nz;
size_t type_size = zfp_type_size(field->type);
size_t field_size = 1;
for(int i = 0; i < 3; ++i)
{
if(dims[i] != 0)
{
field_size *= dims[i];
}
}
bool contig = internal::is_contigous(dims, stride, offset);
void * host_ptr = offset_void(field->type, field->data, offset);;
void *d_data = NULL;
if(contig)
{
size_t field_bytes = type_size * field_size;
cudaMalloc(&d_data, field_bytes);
cudaMemcpy(d_data, host_ptr, field_bytes, cudaMemcpyHostToDevice);
}
return offset_void(field->type, d_data, -offset);
}
void cleanup_device_ptr(void *orig_ptr, void *d_ptr, size_t bytes, long long int offset, zfp_type type)
{
bool device = cuZFP::is_gpu_ptr(orig_ptr);
if(device)
{
return;
}
// from whence it came
void *d_offset_ptr = offset_void(type, d_ptr, offset);
void *h_offset_ptr = offset_void(type, orig_ptr, offset);
if(bytes > 0)
{
cudaMemcpy(h_offset_ptr, d_offset_ptr, bytes, cudaMemcpyDeviceToHost);
}
cudaFree(d_offset_ptr);
}
} // namespace internal
size_t
cuda_compress(zfp_stream *stream, const zfp_field *field)
{
uint dims[3];
dims[0] = field->nx;
dims[1] = field->ny;
dims[2] = field->nz;
int3 stride;
stride.x = field->sx ? field->sx : 1;
stride.y = field->sy ? field->sy : field->nx;
stride.z = field->sz ? field->sz : field->nx * field->ny;
size_t stream_bytes = 0;
long long int offset = 0;
void *d_data = internal::setup_device_field(field, stride, offset);
if(d_data == NULL)
{
// null means the array is non-contiguous host mem which is not supported
return 0;
}
Word *d_stream = internal::setup_device_stream(stream, field);
if(field->type == zfp_type_float)
{
float* data = (float*) d_data;
stream_bytes = internal::encode<float>(dims, stride, (int)stream->maxbits, data, d_stream);
}
else if(field->type == zfp_type_double)
{
double* data = (double*) d_data;
stream_bytes = internal::encode<double>(dims, stride, (int)stream->maxbits, data, d_stream);
}
else if(field->type == zfp_type_int32)
{
int * data = (int*) d_data;
stream_bytes = internal::encode<int>(dims, stride, (int)stream->maxbits, data, d_stream);
}
else if(field->type == zfp_type_int64)
{
long long int * data = (long long int*) d_data;
stream_bytes = internal::encode<long long int>(dims, stride, (int)stream->maxbits, data, d_stream);
}
internal::cleanup_device_ptr(stream->stream->begin, d_stream, stream_bytes, 0, field->type);
internal::cleanup_device_ptr(field->data, d_data, 0, offset, field->type);
// zfp wants to flush the stream.
// set bits to wsize because we already did that.
size_t compressed_size = stream_bytes / sizeof(Word);
stream->stream->bits = wsize;
// set stream pointer to end of stream
stream->stream->ptr = stream->stream->begin + compressed_size;
return stream_bytes;
}
void
cuda_decompress(zfp_stream *stream, zfp_field *field)
{
uint dims[3];
dims[0] = field->nx;
dims[1] = field->ny;
dims[2] = field->nz;
int3 stride;
stride.x = field->sx ? field->sx : 1;
stride.y = field->sy ? field->sy : field->nx;
stride.z = field->sz ? field->sz : field->nx * field->ny;
size_t decoded_bytes = 0;
long long int offset = 0;
void *d_data = internal::setup_device_field(field, stride, offset);
if(d_data == NULL)
{
// null means the array is non-contiguous host mem which is not supported
return;
}
Word *d_stream = internal::setup_device_stream(stream, field);
if(field->type == zfp_type_float)
{
float *data = (float*) d_data;
decoded_bytes = internal::decode(dims, stride, (int)stream->maxbits, d_stream, data);
d_data = (void*) data;
}
else if(field->type == zfp_type_double)
{
double *data = (double*) d_data;
decoded_bytes = internal::decode(dims, stride, (int)stream->maxbits, d_stream, data);
d_data = (void*) data;
}
else if(field->type == zfp_type_int32)
{
int *data = (int*) d_data;
decoded_bytes = internal::decode(dims, stride, (int)stream->maxbits, d_stream, data);
d_data = (void*) data;
}
else if(field->type == zfp_type_int64)
{
long long int *data = (long long int*) d_data;
decoded_bytes = internal::decode(dims, stride, (int)stream->maxbits, d_stream, data);
d_data = (void*) data;
}
else
{
std::cerr<<"Cannot decompress: type unknown\n";
}
size_t type_size = zfp_type_size(field->type);
size_t field_size = 1;
for(int i = 0; i < 3; ++i)
{
if(dims[i] != 0)
{
field_size *= dims[i];
}
}
size_t bytes = type_size * field_size;
internal::cleanup_device_ptr(stream->stream->begin, d_stream, 0, 0, field->type);
internal::cleanup_device_ptr(field->data, d_data, bytes, offset, field->type);
// this is how zfp determins if this was a success
size_t words_read = decoded_bytes / sizeof(Word);
stream->stream->bits = wsize;
// set stream pointer to end of stream
stream->stream->ptr = stream->stream->begin + words_read;
}
|
048218497102aeb1df13d8cba0b6a6603d2fe447.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Rectangular matrix multiplication
* A[M][K] * B[k][N] = C[M][N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/timeb.h>
#include <string.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
#define REAL float
void init(int M, int N, REAL * A) {
int i, j;
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++) {
A[i*N+j] = (REAL) drand48();
}
}
}
double maxerror(int M, int N, REAL * A, REAL *B) {
int i, j;
double error = 0.0;
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++) {
double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j];
if (diff < 0)
diff = -diff;
if (diff > error)
error = diff;
}
}
return error;
}
void matmul_base(int N, REAL *A, REAL * B, REAL *C);
void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks);
void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C);
void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C);
void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C);
int main(int argc, char *argv[]) {
int N;
int num_tasks = 5; /* 5 is default number of tasks */
double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */
if (argc < 2) {
fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks);
exit(1);
}
N = atoi(argv[1]);
if (argc > 2) num_tasks = atoi(argv[2]);
REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */
/* below is a cast from memory buffer to a 2-d row-major array */
REAL *A = heap_buffer;
REAL *B = &heap_buffer[N*N];
REAL *C_base = &heap_buffer[2*N*N];
REAL *C_openmp = &heap_buffer[3*N*N];
srand48((1 << 12));
init(N, N, A);
init(N, N, B);
/* example run */
elapsed_base = read_timer();
matmul_base(N, A, B, C_base);
elapsed_base = (read_timer() - elapsed_base);
elapsed_openmp = read_timer();
matmul_openmp(N, A, B, C_openmp, num_tasks);
elapsed_openmp = (read_timer() - elapsed_openmp);
/* call and timing for the three CUDA versions */
/* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3.
* 1 is a graphical card with less computation capability.
*/
hipSetDevice(0);
//call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C);
//call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C);
//call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C);
printf("======================================================================================================\n");
printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base));
printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp));
/* put other printf statements for outputing results for GPU execution */
free(heap_buffer);
return 0;
}
void matmul_base(int N, REAL *A, REAL * B, REAL *C) {
int i, j, k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
REAL temp = 0.0;
for (k = 0; k < N; k++) {
temp += A[i*N+k] * B[k*N+j];
}
C[i*N+j] = temp;
}
}
}
void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) {
int i, j, k;
#pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
REAL temp = 0.0;
for (k = 0; k < N; k++) {
temp += A[i*N+k] * B[k*N+j];
}
C[i*N+j] = temp;
}
}
}
/*
* call to kernel that uses GPU global memory
*/
void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) {
}
/*
* call to kernel that use GPU shared memory
*/
void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) {
}
/*
* call to sgemm of cublas library
*/
void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) {
}
| 048218497102aeb1df13d8cba0b6a6603d2fe447.cu | /*
* Rectangular matrix multiplication
* A[M][K] * B[k][N] = C[M][N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/timeb.h>
#include <string.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
#define REAL float
void init(int M, int N, REAL * A) {
int i, j;
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++) {
A[i*N+j] = (REAL) drand48();
}
}
}
double maxerror(int M, int N, REAL * A, REAL *B) {
int i, j;
double error = 0.0;
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++) {
double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j];
if (diff < 0)
diff = -diff;
if (diff > error)
error = diff;
}
}
return error;
}
void matmul_base(int N, REAL *A, REAL * B, REAL *C);
void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks);
void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C);
void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C);
void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C);
int main(int argc, char *argv[]) {
int N;
int num_tasks = 5; /* 5 is default number of tasks */
double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */
if (argc < 2) {
fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks);
exit(1);
}
N = atoi(argv[1]);
if (argc > 2) num_tasks = atoi(argv[2]);
REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */
/* below is a cast from memory buffer to a 2-d row-major array */
REAL *A = heap_buffer;
REAL *B = &heap_buffer[N*N];
REAL *C_base = &heap_buffer[2*N*N];
REAL *C_openmp = &heap_buffer[3*N*N];
srand48((1 << 12));
init(N, N, A);
init(N, N, B);
/* example run */
elapsed_base = read_timer();
matmul_base(N, A, B, C_base);
elapsed_base = (read_timer() - elapsed_base);
elapsed_openmp = read_timer();
matmul_openmp(N, A, B, C_openmp, num_tasks);
elapsed_openmp = (read_timer() - elapsed_openmp);
/* call and timing for the three CUDA versions */
/* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3.
* 1 is a graphical card with less computation capability.
*/
cudaSetDevice(0);
//call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C);
//call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C);
//call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C);
printf("======================================================================================================\n");
printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base));
printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp));
/* put other printf statements for outputing results for GPU execution */
free(heap_buffer);
return 0;
}
void matmul_base(int N, REAL *A, REAL * B, REAL *C) {
int i, j, k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
REAL temp = 0.0;
for (k = 0; k < N; k++) {
temp += A[i*N+k] * B[k*N+j];
}
C[i*N+j] = temp;
}
}
}
void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) {
int i, j, k;
#pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
REAL temp = 0.0;
for (k = 0; k < N; k++) {
temp += A[i*N+k] * B[k*N+j];
}
C[i*N+j] = temp;
}
}
}
/*
* call to kernel that uses GPU global memory
*/
void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) {
}
/*
* call to kernel that use GPU shared memory
*/
void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) {
}
/*
* call to sgemm of cublas library
*/
void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) {
}
|
850ceb8cca9268a14a374711ab57495413ceffd7.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <random>
#include <type_traits>
#ifdef K2_WITH_CUDA
#include "hiprand/hiprand.h" // NOLINT
#include "hiprand/hiprand_kernel.h" // NOLINT
#endif
#include "k2/csrc/rand.h"
namespace k2 {
namespace {
// when calling hiprand_init() in kernels, its arguments
// seed and offset are from this struct. All kernels
// share the same seed and offset.
struct CudaRandState {
// the default value for seed is from
// https://github.com/pytorch/pytorch/blob/master/c10/core/GeneratorImpl.h#L56
//
// It has a good distribution of 0s and 1s in bit representation.
uint64_t seed = 67280421310721u;
uint64_t offset = 0;
};
struct CpuRandState {
uint64_t seed = std::mt19937::default_seed;
std::mt19937 generator;
};
static CudaRandState &GetCudaRandState(ContextPtr context) {
int32_t device_id = context->GetDeviceId();
K2_CHECK_LT(device_id, kMaxNumGpus);
static CudaRandState rand_states[kMaxNumGpus];
return rand_states[device_id];
} // namespace
static CpuRandState &GetCpuRandState() {
static thread_local CpuRandState state;
return state;
}
template <typename T, typename Distribution>
static void RandCpu(int32_t dim, T low, T high, T *out) {
Distribution distribution(low, high);
auto &generator = GetCpuRandState().generator;
for (int32_t i = 0; i != dim; ++i) {
out[i] = distribution(generator);
}
}
} // namespace
uint64_t GetSeed(ContextPtr context) {
DeviceType device_type = context->GetDeviceType();
if (device_type == kCuda) return GetCudaRandState(context).seed;
K2_CHECK_EQ(device_type, kCpu);
return GetCpuRandState().seed;
}
void SetSeed(ContextPtr context, uint64_t seed) {
DeviceType device_type = context->GetDeviceType();
if (device_type == kCuda) {
// TODO(fangjun): we may need a lock here
CudaRandState &state = GetCudaRandState(context);
state.seed = seed;
state.offset = 0;
return;
}
K2_CHECK_EQ(device_type, kCpu);
CpuRandState &state = GetCpuRandState();
state.seed = seed;
state.generator.seed(seed);
}
template <>
void Rand<float>(ContextPtr context, float low, float high, int32_t dim,
float *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<float, std::uniform_real_distribution<float>>(dim, low, high,
array_data);
return;
}
K2_CHECK_EQ(device_type, kCuda);
#ifdef K2_WITH_CUDA
CudaRandState &state = GetCudaRandState(context);
float range = high - low;
auto generate_rand_lambda_float = [=] __device__(int32_t i) {
hiprandStatePhilox4_32_10_t philox_state;
hiprand_init(state.seed,
i, // sequence
state.offset, &philox_state);
float4 r = hiprand_uniform4(&philox_state);
// hiprand_uniform4() returns a number in (0, 1],
// we want to transform it to [0, 1)
//
// CAUTION: `1 - r.x` is not used here as it may be rounded up to 1
// when `r.x` is close to 0
float t = (r.x == 1.0f) ? 0.0f : r.x;
array_data[i] = t * range + low;
};
EvalDevice(context, dim, generate_rand_lambda_float);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code";
#endif
}
template <>
void Rand<double>(ContextPtr context, double low, double high, int32_t dim,
double *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<double, std::uniform_real_distribution<double>>(dim, low, high,
array_data);
return;
}
#ifdef K2_WITH_CUDA
K2_CHECK_EQ(device_type, kCuda);
CudaRandState &state = GetCudaRandState(context);
double range = high - low;
auto generate_rand_lambda_double = [=] __device__(int32_t i) {
hiprandStatePhilox4_32_10_t philox_state;
hiprand_init(state.seed,
i, // sequence
state.offset, &philox_state);
double2 r = hiprand_uniform2_double(&philox_state);
double t = (r.x == 1.0) ? 0.0 : r.x;
array_data[i] = t * range + low;
};
EvalDevice(context, dim, generate_rand_lambda_double);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code.";
#endif
}
template <>
void Rand<int32_t>(ContextPtr context, int32_t low, int32_t high, int32_t dim,
int32_t *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<int32_t, std::uniform_int_distribution<int32_t>>(
dim, low, high - 1, // -1 since high is to be excluded
array_data);
return;
}
#ifdef K2_WITH_CUDA
K2_CHECK_EQ(device_type, kCuda);
CudaRandState &state = GetCudaRandState(context);
uint32_t range = high - low;
auto generate_rand_lambda_double = [=] __device__(int32_t i) {
hiprandStatePhilox4_32_10_t philox_state;
hiprand_init(state.seed,
i, // sequence
state.offset, &philox_state);
uint4 r = hiprand4(&philox_state);
int32_t t = static_cast<int32_t>(r.x % range + low);
array_data[i] = t;
};
EvalDevice(context, dim, generate_rand_lambda_double);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code.";
#endif
}
} // namespace k2
| 850ceb8cca9268a14a374711ab57495413ceffd7.cu | /**
* Copyright 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <random>
#include <type_traits>
#ifdef K2_WITH_CUDA
#include "curand.h" // NOLINT
#include "curand_kernel.h" // NOLINT
#endif
#include "k2/csrc/rand.h"
namespace k2 {
namespace {
// when calling curand_init() in kernels, its arguments
// seed and offset are from this struct. All kernels
// share the same seed and offset.
struct CudaRandState {
// the default value for seed is from
// https://github.com/pytorch/pytorch/blob/master/c10/core/GeneratorImpl.h#L56
//
// It has a good distribution of 0s and 1s in bit representation.
uint64_t seed = 67280421310721u;
uint64_t offset = 0;
};
struct CpuRandState {
uint64_t seed = std::mt19937::default_seed;
std::mt19937 generator;
};
static CudaRandState &GetCudaRandState(ContextPtr context) {
int32_t device_id = context->GetDeviceId();
K2_CHECK_LT(device_id, kMaxNumGpus);
static CudaRandState rand_states[kMaxNumGpus];
return rand_states[device_id];
} // namespace
static CpuRandState &GetCpuRandState() {
static thread_local CpuRandState state;
return state;
}
template <typename T, typename Distribution>
static void RandCpu(int32_t dim, T low, T high, T *out) {
Distribution distribution(low, high);
auto &generator = GetCpuRandState().generator;
for (int32_t i = 0; i != dim; ++i) {
out[i] = distribution(generator);
}
}
} // namespace
uint64_t GetSeed(ContextPtr context) {
DeviceType device_type = context->GetDeviceType();
if (device_type == kCuda) return GetCudaRandState(context).seed;
K2_CHECK_EQ(device_type, kCpu);
return GetCpuRandState().seed;
}
void SetSeed(ContextPtr context, uint64_t seed) {
DeviceType device_type = context->GetDeviceType();
if (device_type == kCuda) {
// TODO(fangjun): we may need a lock here
CudaRandState &state = GetCudaRandState(context);
state.seed = seed;
state.offset = 0;
return;
}
K2_CHECK_EQ(device_type, kCpu);
CpuRandState &state = GetCpuRandState();
state.seed = seed;
state.generator.seed(seed);
}
template <>
void Rand<float>(ContextPtr context, float low, float high, int32_t dim,
float *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<float, std::uniform_real_distribution<float>>(dim, low, high,
array_data);
return;
}
K2_CHECK_EQ(device_type, kCuda);
#ifdef K2_WITH_CUDA
CudaRandState &state = GetCudaRandState(context);
float range = high - low;
auto generate_rand_lambda_float = [=] __device__(int32_t i) {
curandStatePhilox4_32_10_t philox_state;
curand_init(state.seed,
i, // sequence
state.offset, &philox_state);
float4 r = curand_uniform4(&philox_state);
// curand_uniform4() returns a number in (0, 1],
// we want to transform it to [0, 1)
//
// CAUTION: `1 - r.x` is not used here as it may be rounded up to 1
// when `r.x` is close to 0
float t = (r.x == 1.0f) ? 0.0f : r.x;
array_data[i] = t * range + low;
};
EvalDevice(context, dim, generate_rand_lambda_float);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code";
#endif
}
template <>
void Rand<double>(ContextPtr context, double low, double high, int32_t dim,
double *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<double, std::uniform_real_distribution<double>>(dim, low, high,
array_data);
return;
}
#ifdef K2_WITH_CUDA
K2_CHECK_EQ(device_type, kCuda);
CudaRandState &state = GetCudaRandState(context);
double range = high - low;
auto generate_rand_lambda_double = [=] __device__(int32_t i) {
curandStatePhilox4_32_10_t philox_state;
curand_init(state.seed,
i, // sequence
state.offset, &philox_state);
double2 r = curand_uniform2_double(&philox_state);
double t = (r.x == 1.0) ? 0.0 : r.x;
array_data[i] = t * range + low;
};
EvalDevice(context, dim, generate_rand_lambda_double);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code.";
#endif
}
template <>
void Rand<int32_t>(ContextPtr context, int32_t low, int32_t high, int32_t dim,
int32_t *array_data) {
K2_CHECK_LT(low, high);
if (dim == 0) return;
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) {
RandCpu<int32_t, std::uniform_int_distribution<int32_t>>(
dim, low, high - 1, // -1 since high is to be excluded
array_data);
return;
}
#ifdef K2_WITH_CUDA
K2_CHECK_EQ(device_type, kCuda);
CudaRandState &state = GetCudaRandState(context);
uint32_t range = high - low;
auto generate_rand_lambda_double = [=] __device__(int32_t i) {
curandStatePhilox4_32_10_t philox_state;
curand_init(state.seed,
i, // sequence
state.offset, &philox_state);
uint4 r = curand4(&philox_state);
int32_t t = static_cast<int32_t>(r.x % range + low);
array_data[i] = t;
};
EvalDevice(context, dim, generate_rand_lambda_double);
state.offset += 4;
#else
K2_LOG(FATAL) << "Unreachable code.";
#endif
}
} // namespace k2
|
473407504a3b07d17dbc7e018b12b3e99c550aed.hip | // !!! This is a file automatically generated by hipify!!!
#include "src/randomCudaScripts/DeleteFromArray.h"
#include <cmath>
#include <hip/hip_runtime.h>
#include <iostream>
#include "device_launch_parameters.h"
__global__ void gpu_add_block_sums(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const size_t numElems)
{
//unsigned int glbl_t_idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int d_block_sum_val = d_block_sums[blockIdx.x];
//unsigned int d_in_val_0 = 0;
//unsigned int d_in_val_1 = 0;
// Simple implementation's performance is not significantly (if at all)
// better than previous verbose implementation
unsigned int cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (cpy_idx < numElems)
{
d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val;
if (cpy_idx + blockDim.x < numElems)
d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val;
}
//if (2 * glbl_t_idx < numElems)
//{
// d_out[2 * glbl_t_idx] = d_in[2 * glbl_t_idx] + d_block_sum_val;
// if (2 * glbl_t_idx + 1 < numElems)
// d_out[2 * glbl_t_idx + 1] = d_in[2 * glbl_t_idx + 1] + d_block_sum_val;
//}
//if (2 * glbl_t_idx < numElems)
//{
// d_in_val_0 = d_in[2 * glbl_t_idx];
// if (2 * glbl_t_idx + 1 < numElems)
// d_in_val_1 = d_in[2 * glbl_t_idx + 1];
//}
//else
// return;
//__syncthreads();
//d_out[2 * glbl_t_idx] = d_in_val_0 + d_block_sum_val;
//if (2 * glbl_t_idx + 1 < numElems)
// d_out[2 * glbl_t_idx + 1] = d_in_val_1 + d_block_sum_val;
}
// Modified version of Mark Harris' implementation of the Blelloch scan
// according to https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void gpu_prescan(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const unsigned int len,
const unsigned int shmem_sz,
const unsigned int max_elems_per_block)
{
// Allocated on invocation
extern __shared__ unsigned int s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory
// must be a few more than 2 * blockDim.x
if (thid + max_elems_per_block < shmem_sz)
s_out[thid + max_elems_per_block] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx];
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x];
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
// Modified version of Mark Harris' implementation of the Blelloch scan
// according to https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void gpu_prescan(unsigned int* const d_out,
bool* d_in,
unsigned int* const d_block_sums,
const unsigned int len,
const unsigned int shmem_sz,
const unsigned int max_elems_per_block, bool inverted)
{
// Allocated on invocation
extern __shared__ unsigned int s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory
// must be a few more than 2 * blockDim.x
if (thid + max_elems_per_block < shmem_sz)
s_out[thid + max_elems_per_block] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
bool a = d_in[cpy_idx] ^ inverted;
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = a;
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x] ^ inverted;
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
void sum_scan_blelloch(hipStream_t stream,
unsigned int* const d_out,
const unsigned int* d_in,
const size_t numElems)
{
// Zero out d_out
checkCudaErrors(hipMemsetAsync(d_out, 0, numElems * sizeof(unsigned int), stream));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) ::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block - 1) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(hipMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemsetAsync(d_block_sums, 0, sizeof(unsigned int) * grid_sz, stream));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
hipLaunchKernelGGL(( gpu_prescan), dim3(grid_sz), dim3(block_sz), sizeof(unsigned int) * shmem_sz, stream, d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(hipMalloc(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(hipMemsetAsync(d_dummy_blocks_sums, 0, sizeof(unsigned int), stream));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
hipLaunchKernelGGL(( gpu_prescan), dim3(1), dim3(block_sz), sizeof(unsigned int) * shmem_sz, stream, d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(hipFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(hipMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemcpyAsync(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice, stream));
sum_scan_blelloch(stream, d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(hipFree(d_in_block_sums));
}
//// Uncomment to examine block sums
//unsigned int* h_block_sums = new unsigned int[grid_sz];
//checkCudaErrors(hipMemcpy(h_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToHost));
//std::cout << "Block sums: ";
//for (int i = 0; i < grid_sz; ++i)
//{
// std::cout << h_block_sums[i] << ", ";
//}
//std::cout << std::endl;
//std::cout << "Block sums length: " << grid_sz << std::endl;
//delete[] h_block_sums;
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
hipLaunchKernelGGL(( gpu_add_block_sums), dim3(grid_sz), dim3(block_sz), 0, stream, d_out, d_out, d_block_sums, numElems);
checkCudaErrors(hipFree(d_block_sums));
}
void sum_scan_blelloch_managed(hipStream_t stream, hipStream_t stream_preprocess,
unsigned int* const d_out,
const unsigned int* d_in,
const size_t numElems)
{
// Zero out d_out
checkCudaErrors(hipMemPrefetchAsync(d_in, numElems * sizeof(unsigned int),0, stream_preprocess));
checkCudaErrors(hipMemPrefetchAsync(d_out, numElems * sizeof(unsigned int),0, stream_preprocess));
checkCudaErrors(hipMemsetAsync(d_out, 0, numElems * sizeof(unsigned int), stream_preprocess));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) ::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block - 1) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(hipMallocManaged(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemPrefetchAsync(d_block_sums, sizeof(unsigned int) * grid_sz,0, stream_preprocess));
checkCudaErrors(hipMemsetAsync(d_block_sums, 0, sizeof(unsigned int) * grid_sz, stream_preprocess));
//checkCudaErrors(hipStreamSynchronize(stream_preprocess));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
hipLaunchKernelGGL(( gpu_prescan), dim3(grid_sz), dim3(block_sz), sizeof(unsigned int) * shmem_sz, stream, d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(hipMallocManaged(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(hipMemPrefetchAsync(d_dummy_blocks_sums, sizeof(unsigned int), 0, stream_preprocess));
checkCudaErrors(hipMemsetAsync(d_dummy_blocks_sums, 0, sizeof(unsigned int), stream_preprocess));
//checkCudaErrors(hipStreamSynchronize(stream_preprocess));
hipLaunchKernelGGL(( gpu_prescan), dim3(1), dim3(block_sz), sizeof(unsigned int) * shmem_sz, stream, d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(hipFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(hipMallocManaged(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemPrefetchAsync(d_in_block_sums, sizeof(unsigned int) * grid_sz, 0, stream_preprocess));
checkCudaErrors(hipMemcpyAsync(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice, stream));
sum_scan_blelloch_managed(stream, stream_preprocess, d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(hipFree(d_in_block_sums));
}
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
hipLaunchKernelGGL(( gpu_add_block_sums), dim3(grid_sz), dim3(block_sz), 0, stream, d_out, d_out, d_block_sums, numElems);
checkCudaErrors(hipFree(d_block_sums));
}
//this is mikkel and jonas work.
__global__ void gpuDeleteFromArrayOld(float* d_outData,
const unsigned int* d_delete_array,
const float* d_data,
const size_t numElements,
const unsigned int dimensions,
const unsigned int numberOfThreadsPrPoint){
extern __shared__ float temp[];
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int point = i/numberOfThreadsPrPoint;
unsigned int pointOffset = i%numberOfThreadsPrPoint;
unsigned int dim = ceilf((float)dimensions/(float)numberOfThreadsPrPoint); // the amount of dims i am responsible for
unsigned int dimOffset = pointOffset*dim;
unsigned int dim2 = dim;
if(dimensions-dimOffset < dim){
dim2 = dimensions-dimOffset;
}
/*if(i < 12){
printf("i %u, pointNr; %u, pointOffset %u, dim %u, dimOffset %u, dim2 %u \n", i, point, pointOffset, dim, dimOffset, dim2);
}*/
if(point < numElements){
unsigned int offset = d_delete_array[point];
unsigned int nextPrefix = d_delete_array[point+1];
for(int j = 0; j < dim2; j++){
assert(threadIdx.x*dim+j < 48000/4);
assert(dimOffset < dimensions);
assert(point*dimensions+dimOffset+j < numElements*dimensions);
temp[threadIdx.x*dim+j] = d_data[point*dimensions+dimOffset+j];
}
// Make sure data is sone written into shared memory
__syncthreads();
if(offset == nextPrefix){
assert(point >= offset);
offset = point-offset;
for(int j = 0; j < dim2; j++){
d_outData[offset*dimensions+dimOffset+j] = temp[threadIdx.x*dim+j];
}
}
// make sure everyone is done reading before overwriding
__syncthreads();
}
}
template<typename T>
__global__ void gpuDeleteFromArray(T* d_outData,
const unsigned int* d_delete_array,
const T* d_data,
const size_t numElements,
const unsigned int dimensions){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < numElements*dimensions){
const size_t pointIdex = idx/(size_t)dimensions;
const size_t dimIndex = idx%dimensions;
const size_t offSet = (size_t)d_delete_array[pointIdex];
const size_t nextOffSet = (size_t)d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*(size_t)dimensions+(size_t)dimIndex;
const T theData = d_data[idx];
if(offSet == nextOffSet){
// if(newIndex >= (size_t)(numElements*(size_t)dimensions)){
// printf("pointIdex %llu dimIndex %llu nextOffSet %llu\n", pointIdex, dimIndex, nextOffSet);
// //printf("newIndex %llu %llu %u %llu\n",newIndex, numElements,dimensions, (size_t)numElements*(size_t)dimensions);
// }
assert(newIndex < numElements*dimensions);
d_outData[newIndex] = theData;
}
}
}
void deleteFromArrayWrapper(hipStream_t stream,
float* data,
unsigned int* prefixSum,
unsigned int numberOfPoints,
unsigned int dim,
float* output){
hipLaunchKernelGGL(( gpuDeleteFromArray), dim3(ceilf((float)(numberOfPoints*dim)/1024)),dim3(1024),0,stream, output, prefixSum, data, numberOfPoints, dim);
};
__global__ void gpuDeleteFromArraySpeical(float* d_outData,
const unsigned int* d_delete_array,
const float* d_data,
const size_t numElements,
const unsigned int dimensions){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
for(size_t howLongOnTheData = 0 ; howLongOnTheData < numElements*dimensions ; howLongOnTheData+=4*blockDim.x*gridDim.x){
const size_t advIdex1 = idx+howLongOnTheData;
const size_t advIdex2 = idx+howLongOnTheData+blockDim.x*gridDim.x;
const size_t advIdex3 = idx+howLongOnTheData+2*blockDim.x*gridDim.x;
const size_t advIdex4 = idx+howLongOnTheData+3*blockDim.x*gridDim.x;
float theData1;
float theData2;
float theData3;
float theData4;
if(advIdex1 < numElements*dimensions){
theData1 = d_data[advIdex1];
if(advIdex2 < numElements*dimensions){
theData2 = d_data[advIdex2];
if(advIdex3 < numElements*dimensions){
theData3 = d_data[advIdex3];
if(advIdex4 < numElements*dimensions){
theData4 = d_data[advIdex4];
}
}
}
}
if(advIdex1 < numElements*dimensions){
{
const size_t pointIdex = advIdex1/dimensions;
const size_t dimIndex = advIdex1%dimensions;
const size_t offSet = d_delete_array[pointIdex];
const size_t nextOffSet = d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*dimensions+dimIndex;
if(offSet == nextOffSet){
d_outData[newIndex] = theData1;
}
}
if(advIdex2 < numElements*dimensions){
{
const size_t pointIdex = advIdex2/dimensions;
const size_t dimIndex = advIdex2%dimensions;
const size_t offSet = d_delete_array[pointIdex];
const size_t nextOffSet = d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*dimensions+dimIndex;
if(offSet == nextOffSet){
d_outData[newIndex] = theData2;
}
}
if(advIdex3 < numElements*dimensions){
{
const size_t pointIdex = advIdex3/dimensions;
const size_t dimIndex = advIdex3%dimensions;
const size_t offSet = d_delete_array[pointIdex];
const size_t nextOffSet = d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*dimensions+dimIndex;
if(offSet == nextOffSet){
d_outData[newIndex] = theData3;
}
}
if(advIdex4 < numElements*dimensions){
{
const size_t pointIdex = advIdex4/dimensions;
const size_t dimIndex = advIdex4%dimensions;
const size_t offSet = d_delete_array[pointIdex];
const size_t nextOffSet = d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*dimensions+dimIndex;
if(offSet == nextOffSet){
d_outData[newIndex] = theData4;
}
}
}
}
}
}
}
}
void deleteFromArraySpecialWrapper(hipStream_t stream,
float* data,
unsigned int* prefixSum,
unsigned int numberOfPoints,
unsigned int dim,
float* output){
hipLaunchKernelGGL(( gpuDeleteFromArraySpeical), dim3(ceilf((float)((numberOfPoints*dim)/4)/1024)),dim3(1024),0,stream, output, prefixSum, data, numberOfPoints, dim);
};
template<typename T>
__global__ void gpuDeleteFromArrayTrasformed(T* d_outData,
const unsigned int* d_delete_array,
const T* d_data,
const size_t numElements,
const unsigned int dimensions){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < numElements*dimensions){
const T theData = d_data[idx];
const unsigned int whatPoint = idx%numElements;
const unsigned int whatDim = idx/numElements;
const unsigned int offSet = d_delete_array[whatPoint];
const unsigned int nextOffSet = d_delete_array[whatPoint+1];
const unsigned int maxOffSet = d_delete_array[numElements];
const unsigned int newIndex = whatDim*(numElements-maxOffSet)+whatPoint-offSet;
if(offSet == nextOffSet){
d_outData[newIndex] = theData;
}
//printf(" theData: %f \n whatPoint %u \n whatDim %u \n offSet %u \n nextOffSet %u \n maxOffSet %u \n newIndex %u \n",theData,whatPoint,whatDim,offSet,nextOffSet,maxOffSet,newIndex);
}
}
void sum_scan_blelloch(hipStream_t stream,
unsigned int* const d_out,
bool* d_in,
const size_t numElems,
bool inverted)
{
// Zero out d_out
checkCudaErrors(hipMemsetAsync(d_out, 0, numElems * sizeof(unsigned int), stream));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) ::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block - 1) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(hipMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemsetAsync(d_block_sums, 0, sizeof(unsigned int) * grid_sz, stream));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
hipLaunchKernelGGL(( gpu_prescan), dim3(grid_sz), dim3(block_sz), sizeof(unsigned int) * shmem_sz, stream, d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block, inverted);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(hipMalloc(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(hipMemsetAsync(d_dummy_blocks_sums, 0, sizeof(unsigned int), stream));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
hipLaunchKernelGGL(( gpu_prescan), dim3(1), dim3(block_sz), sizeof(unsigned int) * shmem_sz, stream, d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(hipFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(hipMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemcpyAsync(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice, stream));
sum_scan_blelloch(stream, d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(hipFree(d_in_block_sums));
}
//// Uncomment to examine block sums
//unsigned int* h_block_sums = new unsigned int[grid_sz];
//checkCudaErrors(hipMemcpy(h_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToHost));
//std::cout << "Block sums: ";
//for (int i = 0; i < grid_sz; ++i)
//{
// std::cout << h_block_sums[i] << ", ";
//}
//std::cout << std::endl;
//std::cout << "Block sums length: " << grid_sz << std::endl;
//delete[] h_block_sums;
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
hipLaunchKernelGGL(( gpu_add_block_sums), dim3(grid_sz), dim3(block_sz), 0, stream, d_out, d_out, d_block_sums, numElems);
checkCudaErrors(hipFree(d_block_sums));
}
void sum_scan_blelloch_managed(hipStream_t stream, hipStream_t stream_preprocess,
unsigned int* const d_out,
bool* d_in,
const size_t numElems,
bool inverted)
{
// Zero out d_out
// std::cout << " numElems * sizeof(bool): " << numElems * sizeof(bool) << " " <<d_in<< std::endl;
// for(int i = 0; i < numElems-5; i++){
// std::cout << d_in[i] << " " << std::endl;
// }
// std::cout << std::endl;
checkCudaErrors(hipMemPrefetchAsync(d_in, numElems * sizeof(bool),0, stream_preprocess));
checkCudaErrors(hipMemPrefetchAsync(d_out, numElems * sizeof(unsigned int),0, stream_preprocess));
checkCudaErrors(hipMemsetAsync(d_out, 0, numElems * sizeof(unsigned int), stream_preprocess));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) ::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block - 1) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(hipMallocManaged(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemPrefetchAsync(d_block_sums, sizeof(unsigned int) * grid_sz, 0, stream_preprocess));
checkCudaErrors(hipMemsetAsync(d_block_sums, 0, sizeof(unsigned int) * grid_sz, stream_preprocess));
// checkCudaErrors(hipStreamSynchronize(stream_preprocess));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
hipLaunchKernelGGL(( gpu_prescan), dim3(grid_sz), dim3(block_sz), sizeof(unsigned int) * shmem_sz, stream, d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block, inverted);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(hipMallocManaged(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(hipMemPrefetchAsync(d_dummy_blocks_sums, sizeof(unsigned int), 0, stream_preprocess));
checkCudaErrors(hipMemsetAsync(d_dummy_blocks_sums, 0, sizeof(unsigned int), stream_preprocess));
// checkCudaErrors(hipStreamSynchronize(stream_preprocess));
hipLaunchKernelGGL(( gpu_prescan), dim3(1), dim3(block_sz), sizeof(unsigned int) * shmem_sz, stream, d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(hipFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(hipMallocManaged(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemPrefetchAsync(d_in_block_sums, sizeof(unsigned int) * grid_sz, 0, stream_preprocess));
checkCudaErrors(hipMemcpyAsync(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice, stream));
sum_scan_blelloch_managed(stream, stream_preprocess, d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(hipFree(d_in_block_sums));
}
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
hipLaunchKernelGGL(( gpu_add_block_sums), dim3(grid_sz), dim3(block_sz), 0, stream, d_out, d_out, d_block_sums, numElems);
checkCudaErrors(hipFree(d_block_sums));
}
void cpu_sum_scan(unsigned int* const h_out,
const bool* const h_in,
const size_t numElems)
{
unsigned int run_sum = 0;
for (int i = 0; i < numElems; ++i)
{
h_out[i] = run_sum;
run_sum = run_sum + h_in[i];
}
}
void cpu_sum_scan(unsigned int* const h_out,
const unsigned int* const h_in,
const size_t numElems)
{
unsigned int run_sum = 0;
for (int i = 0; i < numElems; ++i)
{
h_out[i] = run_sum;
run_sum = run_sum + h_in[i];
}
}
void cpuDeleteFromArray(float* const h_outData,
const bool* h_delete_array,
const float* data,
const size_t numElements,
unsigned int dimension){
unsigned int ammountNotDeleted = 0;
for(unsigned int i = 0 ; i < numElements ; ++i){
if(!h_delete_array[i]){
for(unsigned int dimIndex = 0 ; dimIndex < dimension ; ++dimIndex){
h_outData[ammountNotDeleted+dimIndex] = data[i*dimension+dimIndex];
}
ammountNotDeleted += dimension;
}
}
}
/*
* this fuction takes the data , and an array of bools that is one longer than the data where the last bool is not relevant.
* return the list where entry i in the data is deleted if entry i in the bools array is 0.
* it also deletes from the indexes keeping track of what is what.
* but it does not resize the indexs , meaning that some if the indexs array can be garbage.
*/
void deleteFromArrayOld(hipStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
bool inverted,
float* time){
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(hipMalloc(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
sum_scan_blelloch(stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
//unsigned int* const d_outData, const unsigned int* delete_array, const float* data,unsigned int* indexes , const size_t numElements
const unsigned int threadsUsed = 1024;
unsigned int numberOfvaluesPrThread = 10; // hardcoded, could be up to 11,... it is bounded by the size of shared memory
unsigned int numberOfThreadsPrPoint = ceilf((float)dimension/(float)numberOfvaluesPrThread);
unsigned int smem = threadsUsed*sizeof(float)*numberOfvaluesPrThread;
unsigned int blocksNeccesary = (numElements*numberOfThreadsPrPoint)/threadsUsed;
if((numElements*numberOfThreadsPrPoint)%1024 != 0){
blocksNeccesary++;
}
hipEvent_t start, stop;
if(time != nullptr){
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
}
hipLaunchKernelGGL(( gpuDeleteFromArrayOld), dim3(blocksNeccesary),dim3(threadsUsed),smem, stream, d_outData,d_out_blelloch,d_data,numElements,dimension,numberOfThreadsPrPoint);
hipFree(d_out_blelloch);
if(time != nullptr){
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(time, start, stop);
}
}
/*
* This is take two of the fuction , i make different versions to keep track of performance changes
* this fuction takes the data , and an array of bools that is one longer than the data where the last bool is not relevant.
* return the list where entry i in the data is deleted if entry i in the bools array is 0.
* it also deletes from the indexes keeping track of what is what.
* but it does not resize the indexs , meaning that some if the indexs array can be garbage.
*/
void deleteFromArray(hipStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
const bool inverted,
float* time){
const unsigned int threadsUsed = 1024;
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(hipMalloc(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
sum_scan_blelloch(stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
unsigned int blocksToUse = numElements*dimension/threadsUsed;
if((numElements*dimension)%threadsUsed!=0){
blocksToUse++;
}
hipEvent_t start, stop;
if(time != nullptr){
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
}
hipLaunchKernelGGL(( gpuDeleteFromArray), dim3(blocksToUse),dim3(threadsUsed),0, stream, d_outData,d_out_blelloch,d_data,numElements,dimension);
checkCudaErrors(hipFree(d_out_blelloch));
if(time != nullptr){
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(time, start, stop);
}
};
void deleteFromArray_managed(hipStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
const bool inverted,
float* time){
const unsigned int threadsUsed = 1024;
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(hipMallocManaged(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
// std::cout << "(numElements+1): " << (numElements+1) << " "<< d_delete_array << std::endl;
sum_scan_blelloch_managed(stream, stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
unsigned int blocksToUse = numElements*dimension/threadsUsed;
if((numElements*dimension)%threadsUsed!=0){
blocksToUse++;
}
hipLaunchKernelGGL(( gpuDeleteFromArray), dim3(blocksToUse),dim3(threadsUsed),0, stream, d_outData,d_out_blelloch,d_data,numElements,dimension);
checkCudaErrors(hipFree(d_out_blelloch));
};
void deleteFromArrayWrapper(unsigned int dimGrid, unsigned int dimBlock, hipStream_t stream,
float* data, unsigned int* prefixSum, unsigned int numberOfElements,
unsigned int dim, float* output){
hipLaunchKernelGGL(( gpuDeleteFromArray), dim3(dimGrid), dim3(dimBlock), 0, stream, output,
prefixSum,
data,
numberOfElements, dim);
};
void deleteFromArrayWrapper(unsigned int dimGrid, unsigned int dimBlock, hipStream_t stream,
unsigned int* data, unsigned int* prefixSum, unsigned int numberOfElements,
unsigned int dim, unsigned* output){
hipLaunchKernelGGL(( gpuDeleteFromArray), dim3(dimGrid), dim3(dimBlock), 0, stream, output,
prefixSum,
data,
numberOfElements, dim);
};
/*
* This is take two of the fuction , i make different versions to keep track of performance changes
* this fuction takes the data , and an array of bools that is one longer than the data where the last bool is not relevant.
* return the list where entry i in the data is deleted if entry i in the bools array is 0.
* it also deletes from the indexes keeping track of what is what.
* but it does not resize the indexs , meaning that some if the indexs array can be garbage.
*/
void deleteFromArraySpecial(hipStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
const bool inverted,
float* time){
const unsigned int threadsUsed = 1024;
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(hipMalloc(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
sum_scan_blelloch(stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
unsigned int blocksToUse = 1;
hipEvent_t start, stop;
if(time != nullptr){
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
}
hipLaunchKernelGGL(( gpuDeleteFromArraySpeical), dim3(blocksToUse),dim3(threadsUsed),0, stream, d_outData,d_out_blelloch,d_data,numElements,dimension);
checkCudaErrors(hipFree(d_out_blelloch));
if(time != nullptr){
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(time, start, stop);
}
}
/*
* This fuction odes the same as the not transformed it just get the data in another formatting.
* This is take two of the fuction , i make different versions to keep track of performance changes
* this fuction takes the data , and an array of bools that is one longer than the data where the last bool is not relevant.
* return the list where entry i in the data is deleted if entry i in the bools array is 0.
* it also deletes from the indexes keeping track of what is what.
* but it does not resize the indexs , meaning that some if the indexs array can be garbage.
*/
void deleteFromArrayTrasformedData(hipStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
const bool inverted,
float* time){
const unsigned int threadsUsed = 1024;
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(hipMalloc(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
sum_scan_blelloch(stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
unsigned int blocksToUse = numElements*dimension/threadsUsed;
if((numElements*dimension)%threadsUsed!=0){
blocksToUse++;
}
hipEvent_t start, stop;
if(time != nullptr){
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
}
hipLaunchKernelGGL(( gpuDeleteFromArrayTrasformed<float>), dim3(blocksToUse),dim3(threadsUsed),0, stream, d_outData,d_out_blelloch,d_data,numElements,dimension);
if(time != nullptr){
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(time, start, stop);
}
checkCudaErrors(hipFree(d_out_blelloch));
};
void deleteFromArrayTransfomedDataWrapper(unsigned int dimGrid, unsigned int dimBlock, hipStream_t stream,
float* data, unsigned int* prefixSum, unsigned int numberOfElements,
unsigned int dim, float* output){
hipLaunchKernelGGL(( gpuDeleteFromArrayTrasformed<float>), dim3(dimGrid), dim3(dimBlock), 0, stream, output, prefixSum, data, numberOfElements, dim);
};
void deleteFromArrayTransfomedDataWrapper(unsigned int dimGrid, unsigned int dimBlock, hipStream_t stream,
unsigned int* data, unsigned int* prefixSum, unsigned int numberOfElements,
unsigned int dim, unsigned int* output){
hipLaunchKernelGGL(( gpuDeleteFromArrayTrasformed<unsigned int>), dim3(dimGrid), dim3(dimBlock), 0, stream, output, prefixSum, data, numberOfElements, dim);
};
void deleteFromArray(float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
bool inverted,
float* time){
hipStream_t stream;
checkCudaErrors(hipStreamCreate(&stream));
deleteFromArray(stream, d_outData, d_delete_array, d_data, numElements, dimension, inverted,time);
checkCudaErrors(hipStreamDestroy(stream));
};
| 473407504a3b07d17dbc7e018b12b3e99c550aed.cu |
#include "src/randomCudaScripts/DeleteFromArray.h"
#include <cmath>
#include <cuda.h>
#include <iostream>
#include "device_launch_parameters.h"
__global__ void gpu_add_block_sums(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const size_t numElems)
{
//unsigned int glbl_t_idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int d_block_sum_val = d_block_sums[blockIdx.x];
//unsigned int d_in_val_0 = 0;
//unsigned int d_in_val_1 = 0;
// Simple implementation's performance is not significantly (if at all)
// better than previous verbose implementation
unsigned int cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (cpy_idx < numElems)
{
d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val;
if (cpy_idx + blockDim.x < numElems)
d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val;
}
//if (2 * glbl_t_idx < numElems)
//{
// d_out[2 * glbl_t_idx] = d_in[2 * glbl_t_idx] + d_block_sum_val;
// if (2 * glbl_t_idx + 1 < numElems)
// d_out[2 * glbl_t_idx + 1] = d_in[2 * glbl_t_idx + 1] + d_block_sum_val;
//}
//if (2 * glbl_t_idx < numElems)
//{
// d_in_val_0 = d_in[2 * glbl_t_idx];
// if (2 * glbl_t_idx + 1 < numElems)
// d_in_val_1 = d_in[2 * glbl_t_idx + 1];
//}
//else
// return;
//__syncthreads();
//d_out[2 * glbl_t_idx] = d_in_val_0 + d_block_sum_val;
//if (2 * glbl_t_idx + 1 < numElems)
// d_out[2 * glbl_t_idx + 1] = d_in_val_1 + d_block_sum_val;
}
// Modified version of Mark Harris' implementation of the Blelloch scan
// according to https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void gpu_prescan(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const unsigned int len,
const unsigned int shmem_sz,
const unsigned int max_elems_per_block)
{
// Allocated on invocation
extern __shared__ unsigned int s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory
// must be a few more than 2 * blockDim.x
if (thid + max_elems_per_block < shmem_sz)
s_out[thid + max_elems_per_block] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx];
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x];
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
// Modified version of Mark Harris' implementation of the Blelloch scan
// according to https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__ void gpu_prescan(unsigned int* const d_out,
bool* d_in,
unsigned int* const d_block_sums,
const unsigned int len,
const unsigned int shmem_sz,
const unsigned int max_elems_per_block, bool inverted)
{
// Allocated on invocation
extern __shared__ unsigned int s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory
// must be a few more than 2 * blockDim.x
if (thid + max_elems_per_block < shmem_sz)
s_out[thid + max_elems_per_block] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
bool a = d_in[cpy_idx] ^ inverted;
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = a;
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x] ^ inverted;
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
void sum_scan_blelloch(cudaStream_t stream,
unsigned int* const d_out,
const unsigned int* d_in,
const size_t numElems)
{
// Zero out d_out
checkCudaErrors(cudaMemsetAsync(d_out, 0, numElems * sizeof(unsigned int), stream));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block - 1) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(cudaMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemsetAsync(d_block_sums, 0, sizeof(unsigned int) * grid_sz, stream));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz, stream>>>(d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(cudaMalloc(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(cudaMemsetAsync(d_dummy_blocks_sums, 0, sizeof(unsigned int), stream));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz, stream>>>(d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(cudaFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(cudaMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemcpyAsync(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice, stream));
sum_scan_blelloch(stream, d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(cudaFree(d_in_block_sums));
}
//// Uncomment to examine block sums
//unsigned int* h_block_sums = new unsigned int[grid_sz];
//checkCudaErrors(cudaMemcpy(h_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToHost));
//std::cout << "Block sums: ";
//for (int i = 0; i < grid_sz; ++i)
//{
// std::cout << h_block_sums[i] << ", ";
//}
//std::cout << std::endl;
//std::cout << "Block sums length: " << grid_sz << std::endl;
//delete[] h_block_sums;
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
gpu_add_block_sums<<<grid_sz, block_sz, 0, stream>>>(d_out, d_out, d_block_sums, numElems);
checkCudaErrors(cudaFree(d_block_sums));
}
void sum_scan_blelloch_managed(cudaStream_t stream, cudaStream_t stream_preprocess,
unsigned int* const d_out,
const unsigned int* d_in,
const size_t numElems)
{
// Zero out d_out
checkCudaErrors(cudaMemPrefetchAsync(d_in, numElems * sizeof(unsigned int),0, stream_preprocess));
checkCudaErrors(cudaMemPrefetchAsync(d_out, numElems * sizeof(unsigned int),0, stream_preprocess));
checkCudaErrors(cudaMemsetAsync(d_out, 0, numElems * sizeof(unsigned int), stream_preprocess));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block - 1) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(cudaMallocManaged(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemPrefetchAsync(d_block_sums, sizeof(unsigned int) * grid_sz,0, stream_preprocess));
checkCudaErrors(cudaMemsetAsync(d_block_sums, 0, sizeof(unsigned int) * grid_sz, stream_preprocess));
//checkCudaErrors(cudaStreamSynchronize(stream_preprocess));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz, stream>>>(d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(cudaMallocManaged(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(cudaMemPrefetchAsync(d_dummy_blocks_sums, sizeof(unsigned int), 0, stream_preprocess));
checkCudaErrors(cudaMemsetAsync(d_dummy_blocks_sums, 0, sizeof(unsigned int), stream_preprocess));
//checkCudaErrors(cudaStreamSynchronize(stream_preprocess));
gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz, stream>>>(d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(cudaFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(cudaMallocManaged(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemPrefetchAsync(d_in_block_sums, sizeof(unsigned int) * grid_sz, 0, stream_preprocess));
checkCudaErrors(cudaMemcpyAsync(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice, stream));
sum_scan_blelloch_managed(stream, stream_preprocess, d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(cudaFree(d_in_block_sums));
}
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
gpu_add_block_sums<<<grid_sz, block_sz, 0, stream>>>(d_out, d_out, d_block_sums, numElems);
checkCudaErrors(cudaFree(d_block_sums));
}
//this is mikkel and jonas work.
__global__ void gpuDeleteFromArrayOld(float* d_outData,
const unsigned int* d_delete_array,
const float* d_data,
const size_t numElements,
const unsigned int dimensions,
const unsigned int numberOfThreadsPrPoint){
extern __shared__ float temp[];
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int point = i/numberOfThreadsPrPoint;
unsigned int pointOffset = i%numberOfThreadsPrPoint;
unsigned int dim = ceilf((float)dimensions/(float)numberOfThreadsPrPoint); // the amount of dims i am responsible for
unsigned int dimOffset = pointOffset*dim;
unsigned int dim2 = dim;
if(dimensions-dimOffset < dim){
dim2 = dimensions-dimOffset;
}
/*if(i < 12){
printf("i %u, pointNr; %u, pointOffset %u, dim %u, dimOffset %u, dim2 %u \n", i, point, pointOffset, dim, dimOffset, dim2);
}*/
if(point < numElements){
unsigned int offset = d_delete_array[point];
unsigned int nextPrefix = d_delete_array[point+1];
for(int j = 0; j < dim2; j++){
assert(threadIdx.x*dim+j < 48000/4);
assert(dimOffset < dimensions);
assert(point*dimensions+dimOffset+j < numElements*dimensions);
temp[threadIdx.x*dim+j] = d_data[point*dimensions+dimOffset+j];
}
// Make sure data is sone written into shared memory
__syncthreads();
if(offset == nextPrefix){
assert(point >= offset);
offset = point-offset;
for(int j = 0; j < dim2; j++){
d_outData[offset*dimensions+dimOffset+j] = temp[threadIdx.x*dim+j];
}
}
// make sure everyone is done reading before overwriding
__syncthreads();
}
}
template<typename T>
__global__ void gpuDeleteFromArray(T* d_outData,
const unsigned int* d_delete_array,
const T* d_data,
const size_t numElements,
const unsigned int dimensions){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < numElements*dimensions){
const size_t pointIdex = idx/(size_t)dimensions;
const size_t dimIndex = idx%dimensions;
const size_t offSet = (size_t)d_delete_array[pointIdex];
const size_t nextOffSet = (size_t)d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*(size_t)dimensions+(size_t)dimIndex;
const T theData = d_data[idx];
if(offSet == nextOffSet){
// if(newIndex >= (size_t)(numElements*(size_t)dimensions)){
// printf("pointIdex %llu dimIndex %llu nextOffSet %llu\n", pointIdex, dimIndex, nextOffSet);
// //printf("newIndex %llu %llu %u %llu\n",newIndex, numElements,dimensions, (size_t)numElements*(size_t)dimensions);
// }
assert(newIndex < numElements*dimensions);
d_outData[newIndex] = theData;
}
}
}
void deleteFromArrayWrapper(cudaStream_t stream,
float* data,
unsigned int* prefixSum,
unsigned int numberOfPoints,
unsigned int dim,
float* output){
gpuDeleteFromArray<<<ceilf((float)(numberOfPoints*dim)/1024),1024,0,stream>>>(output, prefixSum, data, numberOfPoints, dim);
};
__global__ void gpuDeleteFromArraySpeical(float* d_outData,
const unsigned int* d_delete_array,
const float* d_data,
const size_t numElements,
const unsigned int dimensions){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
for(size_t howLongOnTheData = 0 ; howLongOnTheData < numElements*dimensions ; howLongOnTheData+=4*blockDim.x*gridDim.x){
const size_t advIdex1 = idx+howLongOnTheData;
const size_t advIdex2 = idx+howLongOnTheData+blockDim.x*gridDim.x;
const size_t advIdex3 = idx+howLongOnTheData+2*blockDim.x*gridDim.x;
const size_t advIdex4 = idx+howLongOnTheData+3*blockDim.x*gridDim.x;
float theData1;
float theData2;
float theData3;
float theData4;
if(advIdex1 < numElements*dimensions){
theData1 = d_data[advIdex1];
if(advIdex2 < numElements*dimensions){
theData2 = d_data[advIdex2];
if(advIdex3 < numElements*dimensions){
theData3 = d_data[advIdex3];
if(advIdex4 < numElements*dimensions){
theData4 = d_data[advIdex4];
}
}
}
}
if(advIdex1 < numElements*dimensions){
{
const size_t pointIdex = advIdex1/dimensions;
const size_t dimIndex = advIdex1%dimensions;
const size_t offSet = d_delete_array[pointIdex];
const size_t nextOffSet = d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*dimensions+dimIndex;
if(offSet == nextOffSet){
d_outData[newIndex] = theData1;
}
}
if(advIdex2 < numElements*dimensions){
{
const size_t pointIdex = advIdex2/dimensions;
const size_t dimIndex = advIdex2%dimensions;
const size_t offSet = d_delete_array[pointIdex];
const size_t nextOffSet = d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*dimensions+dimIndex;
if(offSet == nextOffSet){
d_outData[newIndex] = theData2;
}
}
if(advIdex3 < numElements*dimensions){
{
const size_t pointIdex = advIdex3/dimensions;
const size_t dimIndex = advIdex3%dimensions;
const size_t offSet = d_delete_array[pointIdex];
const size_t nextOffSet = d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*dimensions+dimIndex;
if(offSet == nextOffSet){
d_outData[newIndex] = theData3;
}
}
if(advIdex4 < numElements*dimensions){
{
const size_t pointIdex = advIdex4/dimensions;
const size_t dimIndex = advIdex4%dimensions;
const size_t offSet = d_delete_array[pointIdex];
const size_t nextOffSet = d_delete_array[pointIdex+1];
const size_t newIndex = (pointIdex-offSet)*dimensions+dimIndex;
if(offSet == nextOffSet){
d_outData[newIndex] = theData4;
}
}
}
}
}
}
}
}
void deleteFromArraySpecialWrapper(cudaStream_t stream,
float* data,
unsigned int* prefixSum,
unsigned int numberOfPoints,
unsigned int dim,
float* output){
gpuDeleteFromArraySpeical<<<ceilf((float)((numberOfPoints*dim)/4)/1024),1024,0,stream>>>(output, prefixSum, data, numberOfPoints, dim);
};
template<typename T>
__global__ void gpuDeleteFromArrayTrasformed(T* d_outData,
const unsigned int* d_delete_array,
const T* d_data,
const size_t numElements,
const unsigned int dimensions){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < numElements*dimensions){
const T theData = d_data[idx];
const unsigned int whatPoint = idx%numElements;
const unsigned int whatDim = idx/numElements;
const unsigned int offSet = d_delete_array[whatPoint];
const unsigned int nextOffSet = d_delete_array[whatPoint+1];
const unsigned int maxOffSet = d_delete_array[numElements];
const unsigned int newIndex = whatDim*(numElements-maxOffSet)+whatPoint-offSet;
if(offSet == nextOffSet){
d_outData[newIndex] = theData;
}
//printf(" theData: %f \n whatPoint %u \n whatDim %u \n offSet %u \n nextOffSet %u \n maxOffSet %u \n newIndex %u \n",theData,whatPoint,whatDim,offSet,nextOffSet,maxOffSet,newIndex);
}
}
void sum_scan_blelloch(cudaStream_t stream,
unsigned int* const d_out,
bool* d_in,
const size_t numElems,
bool inverted)
{
// Zero out d_out
checkCudaErrors(cudaMemsetAsync(d_out, 0, numElems * sizeof(unsigned int), stream));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block - 1) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(cudaMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemsetAsync(d_block_sums, 0, sizeof(unsigned int) * grid_sz, stream));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz, stream>>>(d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block, inverted);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(cudaMalloc(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(cudaMemsetAsync(d_dummy_blocks_sums, 0, sizeof(unsigned int), stream));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz, stream>>>(d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(cudaFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(cudaMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemcpyAsync(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice, stream));
sum_scan_blelloch(stream, d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(cudaFree(d_in_block_sums));
}
//// Uncomment to examine block sums
//unsigned int* h_block_sums = new unsigned int[grid_sz];
//checkCudaErrors(cudaMemcpy(h_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToHost));
//std::cout << "Block sums: ";
//for (int i = 0; i < grid_sz; ++i)
//{
// std::cout << h_block_sums[i] << ", ";
//}
//std::cout << std::endl;
//std::cout << "Block sums length: " << grid_sz << std::endl;
//delete[] h_block_sums;
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
gpu_add_block_sums<<<grid_sz, block_sz, 0, stream>>>(d_out, d_out, d_block_sums, numElems);
checkCudaErrors(cudaFree(d_block_sums));
}
void sum_scan_blelloch_managed(cudaStream_t stream, cudaStream_t stream_preprocess,
unsigned int* const d_out,
bool* d_in,
const size_t numElems,
bool inverted)
{
// Zero out d_out
// std::cout << " numElems * sizeof(bool): " << numElems * sizeof(bool) << " " <<d_in<< std::endl;
// for(int i = 0; i < numElems-5; i++){
// std::cout << d_in[i] << " " << std::endl;
// }
// std::cout << std::endl;
checkCudaErrors(cudaMemPrefetchAsync(d_in, numElems * sizeof(bool),0, stream_preprocess));
checkCudaErrors(cudaMemPrefetchAsync(d_out, numElems * sizeof(unsigned int),0, stream_preprocess));
checkCudaErrors(cudaMemsetAsync(d_out, 0, numElems * sizeof(unsigned int), stream_preprocess));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block - 1) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(cudaMallocManaged(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemPrefetchAsync(d_block_sums, sizeof(unsigned int) * grid_sz, 0, stream_preprocess));
checkCudaErrors(cudaMemsetAsync(d_block_sums, 0, sizeof(unsigned int) * grid_sz, stream_preprocess));
// checkCudaErrors(cudaStreamSynchronize(stream_preprocess));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz, stream>>>(d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block, inverted);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(cudaMallocManaged(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(cudaMemPrefetchAsync(d_dummy_blocks_sums, sizeof(unsigned int), 0, stream_preprocess));
checkCudaErrors(cudaMemsetAsync(d_dummy_blocks_sums, 0, sizeof(unsigned int), stream_preprocess));
// checkCudaErrors(cudaStreamSynchronize(stream_preprocess));
gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz, stream>>>(d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(cudaFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(cudaMallocManaged(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemPrefetchAsync(d_in_block_sums, sizeof(unsigned int) * grid_sz, 0, stream_preprocess));
checkCudaErrors(cudaMemcpyAsync(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice, stream));
sum_scan_blelloch_managed(stream, stream_preprocess, d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(cudaFree(d_in_block_sums));
}
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
gpu_add_block_sums<<<grid_sz, block_sz, 0, stream>>>(d_out, d_out, d_block_sums, numElems);
checkCudaErrors(cudaFree(d_block_sums));
}
void cpu_sum_scan(unsigned int* const h_out,
const bool* const h_in,
const size_t numElems)
{
unsigned int run_sum = 0;
for (int i = 0; i < numElems; ++i)
{
h_out[i] = run_sum;
run_sum = run_sum + h_in[i];
}
}
void cpu_sum_scan(unsigned int* const h_out,
const unsigned int* const h_in,
const size_t numElems)
{
unsigned int run_sum = 0;
for (int i = 0; i < numElems; ++i)
{
h_out[i] = run_sum;
run_sum = run_sum + h_in[i];
}
}
void cpuDeleteFromArray(float* const h_outData,
const bool* h_delete_array,
const float* data,
const size_t numElements,
unsigned int dimension){
unsigned int ammountNotDeleted = 0;
for(unsigned int i = 0 ; i < numElements ; ++i){
if(!h_delete_array[i]){
for(unsigned int dimIndex = 0 ; dimIndex < dimension ; ++dimIndex){
h_outData[ammountNotDeleted+dimIndex] = data[i*dimension+dimIndex];
}
ammountNotDeleted += dimension;
}
}
}
/*
* this fuction takes the data , and an array of bools that is one longer than the data where the last bool is not relevant.
* return the list where entry i in the data is deleted if entry i in the bools array is 0.
* it also deletes from the indexes keeping track of what is what.
* but it does not resize the indexs , meaning that some if the indexs array can be garbage.
*/
void deleteFromArrayOld(cudaStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
bool inverted,
float* time){
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(cudaMalloc(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
sum_scan_blelloch(stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
//unsigned int* const d_outData, const unsigned int* delete_array, const float* data,unsigned int* indexes , const size_t numElements
const unsigned int threadsUsed = 1024;
unsigned int numberOfvaluesPrThread = 10; // hardcoded, could be up to 11,... it is bounded by the size of shared memory
unsigned int numberOfThreadsPrPoint = ceilf((float)dimension/(float)numberOfvaluesPrThread);
unsigned int smem = threadsUsed*sizeof(float)*numberOfvaluesPrThread;
unsigned int blocksNeccesary = (numElements*numberOfThreadsPrPoint)/threadsUsed;
if((numElements*numberOfThreadsPrPoint)%1024 != 0){
blocksNeccesary++;
}
cudaEvent_t start, stop;
if(time != nullptr){
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
}
gpuDeleteFromArrayOld<<<blocksNeccesary,threadsUsed,smem, stream>>>(d_outData,d_out_blelloch,d_data,numElements,dimension,numberOfThreadsPrPoint);
cudaFree(d_out_blelloch);
if(time != nullptr){
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(time, start, stop);
}
}
/*
* This is take two of the fuction , i make different versions to keep track of performance changes
* this fuction takes the data , and an array of bools that is one longer than the data where the last bool is not relevant.
* return the list where entry i in the data is deleted if entry i in the bools array is 0.
* it also deletes from the indexes keeping track of what is what.
* but it does not resize the indexs , meaning that some if the indexs array can be garbage.
*/
void deleteFromArray(cudaStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
const bool inverted,
float* time){
const unsigned int threadsUsed = 1024;
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(cudaMalloc(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
sum_scan_blelloch(stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
unsigned int blocksToUse = numElements*dimension/threadsUsed;
if((numElements*dimension)%threadsUsed!=0){
blocksToUse++;
}
cudaEvent_t start, stop;
if(time != nullptr){
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
}
gpuDeleteFromArray<<<blocksToUse,threadsUsed,0, stream>>>(d_outData,d_out_blelloch,d_data,numElements,dimension);
checkCudaErrors(cudaFree(d_out_blelloch));
if(time != nullptr){
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(time, start, stop);
}
};
void deleteFromArray_managed(cudaStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
const bool inverted,
float* time){
const unsigned int threadsUsed = 1024;
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(cudaMallocManaged(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
// std::cout << "(numElements+1): " << (numElements+1) << " "<< d_delete_array << std::endl;
sum_scan_blelloch_managed(stream, stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
unsigned int blocksToUse = numElements*dimension/threadsUsed;
if((numElements*dimension)%threadsUsed!=0){
blocksToUse++;
}
gpuDeleteFromArray<<<blocksToUse,threadsUsed,0, stream>>>(d_outData,d_out_blelloch,d_data,numElements,dimension);
checkCudaErrors(cudaFree(d_out_blelloch));
};
void deleteFromArrayWrapper(unsigned int dimGrid, unsigned int dimBlock, cudaStream_t stream,
float* data, unsigned int* prefixSum, unsigned int numberOfElements,
unsigned int dim, float* output){
gpuDeleteFromArray<<<dimGrid, dimBlock, 0, stream>>>(output,
prefixSum,
data,
numberOfElements, dim);
};
void deleteFromArrayWrapper(unsigned int dimGrid, unsigned int dimBlock, cudaStream_t stream,
unsigned int* data, unsigned int* prefixSum, unsigned int numberOfElements,
unsigned int dim, unsigned* output){
gpuDeleteFromArray<<<dimGrid, dimBlock, 0, stream>>>(output,
prefixSum,
data,
numberOfElements, dim);
};
/*
* This is take two of the fuction , i make different versions to keep track of performance changes
* this fuction takes the data , and an array of bools that is one longer than the data where the last bool is not relevant.
* return the list where entry i in the data is deleted if entry i in the bools array is 0.
* it also deletes from the indexes keeping track of what is what.
* but it does not resize the indexs , meaning that some if the indexs array can be garbage.
*/
void deleteFromArraySpecial(cudaStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
const bool inverted,
float* time){
const unsigned int threadsUsed = 1024;
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(cudaMalloc(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
sum_scan_blelloch(stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
unsigned int blocksToUse = 1;
cudaEvent_t start, stop;
if(time != nullptr){
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
}
gpuDeleteFromArraySpeical<<<blocksToUse,threadsUsed,0, stream>>>(d_outData,d_out_blelloch,d_data,numElements,dimension);
checkCudaErrors(cudaFree(d_out_blelloch));
if(time != nullptr){
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(time, start, stop);
}
}
/*
* This fuction odes the same as the not transformed it just get the data in another formatting.
* This is take two of the fuction , i make different versions to keep track of performance changes
* this fuction takes the data , and an array of bools that is one longer than the data where the last bool is not relevant.
* return the list where entry i in the data is deleted if entry i in the bools array is 0.
* it also deletes from the indexes keeping track of what is what.
* but it does not resize the indexs , meaning that some if the indexs array can be garbage.
*/
void deleteFromArrayTrasformedData(cudaStream_t stream,
float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
const bool inverted,
float* time){
const unsigned int threadsUsed = 1024;
// Set up device-side memory for output
unsigned int* d_out_blelloch;
checkCudaErrors(cudaMalloc(&d_out_blelloch, sizeof(unsigned int) * (numElements+1)));
sum_scan_blelloch(stream, d_out_blelloch,d_delete_array,(numElements+1), inverted);
unsigned int blocksToUse = numElements*dimension/threadsUsed;
if((numElements*dimension)%threadsUsed!=0){
blocksToUse++;
}
cudaEvent_t start, stop;
if(time != nullptr){
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
}
gpuDeleteFromArrayTrasformed<float><<<blocksToUse,threadsUsed,0, stream>>>(d_outData,d_out_blelloch,d_data,numElements,dimension);
if(time != nullptr){
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(time, start, stop);
}
checkCudaErrors(cudaFree(d_out_blelloch));
};
void deleteFromArrayTransfomedDataWrapper(unsigned int dimGrid, unsigned int dimBlock, cudaStream_t stream,
float* data, unsigned int* prefixSum, unsigned int numberOfElements,
unsigned int dim, float* output){
gpuDeleteFromArrayTrasformed<float><<<dimGrid, dimBlock, 0, stream>>>(output, prefixSum, data, numberOfElements, dim);
};
void deleteFromArrayTransfomedDataWrapper(unsigned int dimGrid, unsigned int dimBlock, cudaStream_t stream,
unsigned int* data, unsigned int* prefixSum, unsigned int numberOfElements,
unsigned int dim, unsigned int* output){
gpuDeleteFromArrayTrasformed<unsigned int><<<dimGrid, dimBlock, 0, stream>>>(output, prefixSum, data, numberOfElements, dim);
};
void deleteFromArray(float* d_outData,
bool* d_delete_array,
const float* d_data,
const unsigned long numElements,
const unsigned int dimension,
bool inverted,
float* time){
cudaStream_t stream;
checkCudaErrors(cudaStreamCreate(&stream));
deleteFromArray(stream, d_outData, d_delete_array, d_data, numElements, dimension, inverted,time);
checkCudaErrors(cudaStreamDestroy(stream));
};
|
3fc2a643b6fbec8a1ae2f32c2318cc7cddd79bab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*$Id: MarsLib.cu 755 2009-11-18 13:22:54Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
#ifndef __MRLIB_CU__
#define __MRLIB_CU__
#include "MarsInc.h"
#include "map.cu"
#include "reduce.cu"
//----------------------------------------------
//Get default runtime configuration
//
//return: default spec
//----------------------------------------------
Spec_t *GetDefaultSpec()
{
Spec_t *spec = (Spec_t*)malloc(sizeof(Spec_t));
if (NULL == spec) exit(-1);
memset(spec, 0, sizeof(Spec_t));
return spec;
}
//--------------------------------------------------------
//Initiate map reduce spec
//--------------------------------------------------------
void InitMapReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->dimBlockMap <= 0)
g_spec->dimBlockMap = DEFAULT_DIMBLOCK;
if (g_spec->dimBlockReduce <= 0)
g_spec->dimBlockReduce = DEFAULT_DIMBLOCK;
if (g_spec->numRecTaskReduce <= 0)
g_spec->numRecTaskReduce = DEFAULT_NUMTASK;
if (g_spec->numRecTaskMap <= 0)
g_spec->numRecTaskMap = DEFAULT_NUMTASK;
if (g_spec->workflow <= 0)
g_spec->workflow = MAP_ONLY;
}
//--------------------------------------------------
//Add a map input record
//
//param : spec
//param : key -- a pointer to a buffer
//param : val -- a pointer to a buffer
//param : keySize
//param : valSize
//--------------------------------------------------
void AddMapInputRecord(Spec_t* spec,
void* key,
void* val,
int keySize,
int valSize)
{
assert(NULL != spec);
static int2 curOffset;
static int3 curChunkNum;
int index = spec->inputRecordCount;
const int dataChunkSize = 1024*1024*256;
if (spec->inputRecordCount > 0)
{
if (dataChunkSize*curChunkNum.x < (curOffset.x + keySize))
spec->inputKeys = (char*)realloc(spec->inputKeys, (++curChunkNum.x)*dataChunkSize);
memcpy(spec->inputKeys+curOffset.x, key, keySize);
if (dataChunkSize*curChunkNum.y < (curOffset.y + valSize))
spec->inputVals = (char*)realloc(spec->inputVals, (++curChunkNum.y)*dataChunkSize);
memcpy(spec->inputVals+curOffset.y, val, valSize);
if (dataChunkSize*curChunkNum.z < (spec->inputRecordCount+1)*sizeof(int4))
spec->inputOffsetSizes = (int4*)realloc(spec->inputOffsetSizes,
(++curChunkNum.z)*dataChunkSize);
}
else
{
spec->inputKeys = (char*)malloc(dataChunkSize);
if (NULL == spec->inputKeys) exit(-1);
memcpy(spec->inputKeys, key, keySize);
spec->inputVals = (char*)malloc(dataChunkSize);
if (NULL == spec->inputVals) exit(-1);
memcpy(spec->inputVals, val, valSize);
spec->inputOffsetSizes = (int4*)malloc(dataChunkSize);
curChunkNum.x++;
curChunkNum.y++;
curChunkNum.z++;
}
spec->inputOffsetSizes[index].x = curOffset.x;
spec->inputOffsetSizes[index].y = keySize;
spec->inputOffsetSizes[index].z = curOffset.y;
spec->inputOffsetSizes[index].w = valSize;
curOffset.x += keySize;
curOffset.y += valSize;
spec->inputRecordCount++;
}
//-------------------------------------------------
//Called by user defined map_count function
//
//param : keySize
//param : valSize
//param : interKeysSizePerTask
//param : interValsSizePerTask
//param : interCountPerTask
//-------------------------------------------------
__device__ void EmitInterCount(int keySize,
int valSize,
int* interKeysSizePerTask,
int* interValsSizePerTask,
int* interCountPerTask)
{
int index = TID;
interKeysSizePerTask[index] += keySize;
interValsSizePerTask[index] += valSize;
interCountPerTask[index]++;
}
//-------------------------------------------------
//called by user defined map function
//
//-------------------------------------------------
__device__ void EmitIntermediate(void* key,
void* val,
int keySize,
int valSize,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* interKeys,
char* interVals,
int4* interOffsetSizes,
int* curIndex)
{
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
int index = TID;
int2 l_keyValOffsets = keyValOffsets[index];
char *pKeySet = (char*)(interKeys + psKeySizes[index] + l_keyValOffsets.x);
char *pValSet = (char*)(interVals + psValSizes[index] + l_keyValOffsets.y);
char* sKey = (char*)key;
char* sVal = (char*)val;
for (int i = 0; i < keySize; ++i)
pKeySet[i] = sKey[i];
for (int i = 0; i < valSize; ++i)
pValSet[i] = sVal[i];
l_keyValOffsets.x += keySize;
l_keyValOffsets.y += valSize;
keyValOffsets[index] = l_keyValOffsets;
int l_curIndex = curIndex[index];
int l_psCounts = psCounts[index];
int l_curPs = l_curIndex + l_psCounts;
int4 l_interOffsetSizes1 = interOffsetSizes[l_curPs];
int4 l_interOffsetSizes2 = interOffsetSizes[l_curPs-1];
if (l_curIndex != 0)
{
l_interOffsetSizes1.x = (l_interOffsetSizes2.x + l_interOffsetSizes2.y);
l_interOffsetSizes1.z = (l_interOffsetSizes2.z + l_interOffsetSizes2.w);
}
l_interOffsetSizes1.y = keySize;
l_interOffsetSizes1.w = valSize;
interOffsetSizes[l_curPs] = l_interOffsetSizes1;
++l_curIndex;
curIndex[index] = l_curIndex;
}
//-------------------------------------------------
//Calculate intermediate data's size
//
//param : inputKeys
//param : inputVals
//param : inputOffsetSizes
//param : interKeysSizesPerTask
//param : interValsSizePerTask
//param : interCountPerTask
//param : recordNum -- total number of records
//param : recordsPerTask
//-------------------------------------------------
__global__ void MapperCount(char* inputKeys,
char* inputVals,
int4* inputOffsetSizes,
int* interKeysSizePerTask,
int* interValsSizePerTask,
int* interCountPerTask,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int4 offsetSize = inputOffsetSizes[cindex];
char *key = inputKeys + offsetSize.x;
char *val = inputVals + offsetSize.z;
map_count(key,
val,
offsetSize.y,
offsetSize.w,
interKeysSizePerTask,
interValsSizePerTask,
interCountPerTask);
}
}
//--------------------------------------------------
//mapper
//--------------------------------------------------
__global__ void Mapper(char* inputKeys,
char* inputVals,
int4* inputOffsetSizes,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* interKeys,
char* interVals,
int4* interOffsetSizes,
int* curIndex,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
int l_psCounts = psCounts[index];
int4 l_interOffsetSizes = interOffsetSizes[l_psCounts];
l_interOffsetSizes.x = psKeySizes[index];
l_interOffsetSizes.z = psValSizes[index];
interOffsetSizes[l_psCounts] = l_interOffsetSizes;
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int4 offsetSize = inputOffsetSizes[cindex];
char *key = inputKeys + offsetSize.x;
char *val = inputVals + offsetSize.z;
map(key,
val,
offsetSize.y,
offsetSize.w,
psKeySizes,
psValSizes,
psCounts,
keyValOffsets,
interKeys,
interVals,
interOffsetSizes,
curIndex);
}
}
//--------------------------------------------------
//start map
//
//1, get map input data on host
//2, upload map input data to device memory
// (keys, vals, keyOffsets, valOffsets, keySizes, valSizes)
//3, determine the number of threads to run
//4, calculate intermediate data keys'buf size
// and values' buf size
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//6, allocate intermediate memory on device memory
//7, start map
//8, free allocated memory
//--------------------------------------------------
int startMap(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);}
if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); }
if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); }
if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);}
//-------------------------------------------------------
//1, get map input data on host
//-------------------------------------------------------
int h_inputRecordCount = g_spec->inputRecordCount;
int h_inputKeysBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].x +
g_spec->inputOffsetSizes[h_inputRecordCount-1].y;
int h_inputValsBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].z +
g_spec->inputOffsetSizes[h_inputRecordCount-1].w;
char* h_inputKeys = g_spec->inputKeys;
char* h_inputVals = g_spec->inputVals;
int4* h_inputOffsetSizes = g_spec->inputOffsetSizes;
DoLog( "** Map Input: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records",
h_inputKeysBufSize, h_inputValsBufSize, sizeof(int4)*h_inputRecordCount, h_inputRecordCount);
//-------------------------------------------------------
//2, upload map input data onto device memory
//-------------------------------------------------------
DoLog( "** Upload map input data onto device memory");
TimeVal_t uploadTv;
startTimer(&uploadTv);
char* d_inputKeys = NULL;
char* d_inputVals = NULL;
int4* d_inputOffsetSizes = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_inputKeys, h_inputKeysBufSize));
CUDA_SAFE_CALL(hipMemcpy(d_inputKeys, h_inputKeys, h_inputKeysBufSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMalloc((void**)&d_inputVals, h_inputValsBufSize));
CUDA_SAFE_CALL(hipMemcpy(d_inputVals, h_inputVals, h_inputValsBufSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMalloc((void**)&d_inputOffsetSizes, sizeof(int4)*h_inputRecordCount));
hipMemcpy(d_inputOffsetSizes, h_inputOffsetSizes, sizeof(int4)*h_inputRecordCount, hipMemcpyHostToDevice);
endTimer("PCI-E I/O", &uploadTv);
//----------------------------------------------
//3, determine the number of threads to run
//----------------------------------------------
dim3 h_dimBlock(g_spec->dimBlockMap,1,1);
dim3 h_dimGrid(1,1,1);
int h_recordsPerTask = g_spec->numRecTaskMap;
int numBlocks = CEIL(CEIL(h_inputRecordCount, h_recordsPerTask), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y;
TimeVal_t mapTimer;
startTimer(&mapTimer);
//----------------------------------------------
//4, calculate intermediate data keys'buf size
// and values' buf size
//----------------------------------------------
DoLog( "** MapCount");
int* d_interKeysSizePerTask = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interKeysSizePerTask, sizeof(int)*h_actualNumThreads));
hipMemset(d_interKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_interValsSizePerTask = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interValsSizePerTask, sizeof(int)*h_actualNumThreads));
hipMemset(d_interValsSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_interCountPerTask = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interCountPerTask, sizeof(int)*h_actualNumThreads));
hipMemset(d_interCountPerTask, 0, sizeof(int)*h_actualNumThreads);
hipLaunchKernelGGL(( MapperCount), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_inputKeys,
d_inputVals,
d_inputOffsetSizes,
d_interKeysSizePerTask,
d_interValsSizePerTask,
d_interCountPerTask,
h_inputRecordCount,
h_recordsPerTask,
h_actualNumThreads);
hipDeviceSynchronize();
//-----------------------------------------------
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//-----------------------------------------------
DoLog( "** Do prefix sum on intermediate data's size\n");
int *d_psKeySizes = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads));
int h_allKeySize = prefexSum((int*)d_interKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads);
int *d_psValSizes = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads));
int h_allValSize = prefexSum((int*)d_interValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads);
int *d_psCounts = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads));
int h_allCounts = prefexSum((int*)d_interCountPerTask, (int*)d_psCounts, h_actualNumThreads);
//fprintf(stderr,"all count = %d",h_allCounts);
DoLog( "** Map Output: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records",
h_allKeySize, h_allValSize, h_allCounts * sizeof(int4), h_allCounts);
if (h_allCounts == 0)
{
DoLog( "** No output.");
hipFree(d_inputKeys);
hipFree(d_inputVals);
hipFree(d_inputOffsetSizes);
hipFree(d_psKeySizes);
hipFree(d_psValSizes);
hipFree(d_psCounts);
endTimer("Map", &mapTimer);
return 1;
}
//-----------------------------------------------
//6, allocate intermediate memory on device memory
//-----------------------------------------------
DoLog( "** Allocate intermediate memory on device memory");
char* d_interKeys = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interKeys, h_allKeySize));
hipMemset(d_interKeys, 0, h_allKeySize);
char* d_interVals = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interVals, h_allValSize));
hipMemset(d_interVals, 0, h_allValSize);
int4* d_interOffsetSizes = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interOffsetSizes, sizeof(int4)*h_allCounts));
hipMemset(d_interOffsetSizes, 0, sizeof(int4)*h_allCounts);
//--------------------------------------------------
//7, start map
//--------------------------------------------------
DoLog( "** Map");
int2* d_keyValOffsets = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads));
hipMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads);
int* d_curIndex = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads));
hipMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads);
//h_dimBlock.x =3;
int sizeSmem = h_dimBlock.x * sizeof(int) * 5;
hipLaunchKernelGGL(( Mapper), dim3(h_dimGrid), dim3(h_dimBlock), sizeSmem, 0, d_inputKeys,
d_inputVals,
d_inputOffsetSizes,
d_psKeySizes,
d_psValSizes,
d_psCounts,
d_keyValOffsets,
d_interKeys,
d_interVals,
d_interOffsetSizes,
d_curIndex,
h_inputRecordCount,
h_recordsPerTask,
h_actualNumThreads);
hipDeviceSynchronize();
g_spec->interKeys = d_interKeys;
g_spec->interVals = d_interVals;
g_spec->interOffsetSizes = d_interOffsetSizes;
//fprintf(stderr,"record count = %d",h_allCounts);
g_spec->interRecordCount = h_allCounts;
g_spec->interDiffKeyCount = h_allCounts;
g_spec->interAllKeySize = h_allKeySize;
g_spec->interAllValSize = h_allValSize;
//----------------------------------------------
//8, free
//----------------------------------------------
hipFree(d_interKeysSizePerTask);
hipFree(d_interValsSizePerTask);
hipFree(d_interCountPerTask);
hipFree(d_keyValOffsets);
hipFree(d_curIndex);
hipFree(d_inputKeys);
hipFree(d_inputVals);
hipFree(d_inputOffsetSizes);
hipFree(d_psKeySizes);
hipFree(d_psValSizes);
hipFree(d_psCounts);
endTimer("Map", &mapTimer);
return 0;
}
void startGroup(Spec_t* spec)
{
Spec_t* g_spec = spec;
int interDiffKeyCount = 0;
char* d_outputKeys = NULL;
char* d_outputVals = NULL;
int4* d_outputOffsetSizes = NULL;
int2** h_outputKeyListRange = NULL;
DoLog( "** Sort for group");
CUDA_SAFE_CALL(hipMalloc((void**)&d_outputKeys, g_spec->interAllKeySize));
CUDA_SAFE_CALL(hipMalloc((void**)&d_outputVals, g_spec->interAllValSize));
CUDA_SAFE_CALL(hipMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*g_spec->interRecordCount));
h_outputKeyListRange = (int2**)malloc(sizeof(int2*));
saven_initialPrefixSum(g_spec->interRecordCount);
interDiffKeyCount =
sort_GPU (g_spec->interKeys,
g_spec->interAllKeySize,
g_spec->interVals,
g_spec->interAllValSize,
g_spec->interOffsetSizes,
g_spec->interRecordCount,
d_outputKeys,
d_outputVals,
d_outputOffsetSizes,
h_outputKeyListRange);
DoLog( "** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount);
//fprintf(stderr,"** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount);
g_spec->interKeys = d_outputKeys;
g_spec->interVals = d_outputVals;
g_spec->interOffsetSizes = d_outputOffsetSizes;
g_spec->interDiffKeyCount = interDiffKeyCount;
int keyListRangeSize = g_spec->interDiffKeyCount * sizeof(int2);
CUDA_SAFE_CALL(hipMalloc((void**)&g_spec->interKeyListRange, keyListRangeSize));
CUDA_SAFE_CALL(hipMemcpy(g_spec->interKeyListRange, *h_outputKeyListRange, keyListRangeSize, hipMemcpyHostToDevice));
free(*h_outputKeyListRange);
free(h_outputKeyListRange);
}
//--------------------------------------------------------
//get a value from value list of the same key
//
//param : vals
//param : interOffsetSizes
//param : index
//return: the wanted value
//--------------------------------------------------------
__device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)vals + keyIndex * offset.w);
}
__device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)key + keyIndex * offset.y);
}
//---------------------------------------------------------
//called by user defined reduce_count function
//---------------------------------------------------------
__device__ void EmitCount(int keySize,
int valSize,
int* outputKeysSizePerTask,
int* outputValsSizePerTask,
int* outputCountPerTask)
{
int index = TID;
outputKeysSizePerTask[index] += keySize;
outputValsSizePerTask[index] += valSize;
outputCountPerTask[index]++;
}
//---------------------------------------------------------
//called by user defined reduce function
//---------------------------------------------------------
__device__ void Emit (char* key,
char* val,
int keySize,
int valSize,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* outputKeys,
char* outputVals,
int4* outputOffsetSizes,
int* curIndex)
{
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
int index = TID;
char *pKeySet = (char*)(outputKeys + psKeySizes[index] + keyValOffsets[index].x);
char *pValSet = (char*)(outputVals + psValSizes[index] + keyValOffsets[index].y);
for (int i = 0; i < keySize; i++)
pKeySet[i] = key[i];
for (int i = 0; i < valSize; i++)
pValSet[i] = val[i];
keyValOffsets[index].x += keySize;
keyValOffsets[index].y += valSize;
if (curIndex[index] != 0)
{
outputOffsetSizes[psCounts[index] + curIndex[index]].x =
(outputOffsetSizes[psCounts[index] + curIndex[index] - 1].x +
outputOffsetSizes[psCounts[index] + curIndex[index] - 1].y);
outputOffsetSizes[psCounts[index] + curIndex[index]].z =
(outputOffsetSizes[psCounts[index] + curIndex[index] - 1].z +
outputOffsetSizes[psCounts[index] + curIndex[index] - 1].w);
}
outputOffsetSizes[psCounts[index] + curIndex[index]].y = keySize;
outputOffsetSizes[psCounts[index] + curIndex[index]].w = valSize;
curIndex[index]++;
}
//-------------------------------------------------------
//calculate output data's size
//-------------------------------------------------------
__global__ void ReducerCount(char* interKeys,
char* interVals,
int4* interOffsetSizes,
int2* interKeyListRange,
int* outputKeysSizePerTask,
int* outputValsSizePerTask,
int* outputCountPerTask,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
//for (int i = 0; i <= recordsPerTask; i++)
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int valStartIndex = interKeyListRange[cindex].x;
int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x;
int keySize = interOffsetSizes[interKeyListRange[cindex].x].y;
char *key = interKeys + interOffsetSizes[valStartIndex].x;
char *vals = interVals + interOffsetSizes[valStartIndex].z;
reduce_count(key,
vals,
keySize,
valCount,
interOffsetSizes,
outputKeysSizePerTask,
outputValsSizePerTask,
outputCountPerTask);
}
}
//-------------------------------------------------------
//Reducer
//
//-------------------------------------------------------
__global__ void Reducer(char* interKeys,
char* interVals,
int4* interOffsetSizes,
int2* interKeyListRange,
int* psKeySizes,
int* psValSizes,
int* psCounts,
char* outputKeys,
char* outputVals,
int4* outputOffsetSizes,
int2* keyValOffsets,
int* curIndex,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
outputOffsetSizes[psCounts[index]].x = psKeySizes[index];
outputOffsetSizes[psCounts[index]].z = psValSizes[index];
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int valStartIndex = interKeyListRange[cindex].x;
int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x;
int keySize = interOffsetSizes[interKeyListRange[cindex].x].y;
char *key = interKeys + interOffsetSizes[valStartIndex].x;
char *vals = interVals + interOffsetSizes[valStartIndex].z;
reduce(key,
vals,
keySize,
valCount,
psKeySizes,
psValSizes,
psCounts,
keyValOffsets,
interOffsetSizes,
outputKeys,
outputVals,
outputOffsetSizes,
curIndex,
valStartIndex);
}
}
//----------------------------------------------
//start reduce
//
//1, if there is not a reduce phase, just return
// then user uses spec->interKeys/spec->intervals
// for further processing
//2, get reduce input data on host
//3, upload reduce input data onto device memory
//4, determine the number of threads to run
//5, calculate output data keys'buf size
// and values' buf size
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//7, allocate output memory on device memory
//8, start reduce
//9, copy output data to Spect_t structure
//10,free allocated memory
//----------------------------------------------
void startReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->interKeys == NULL) {DoLog( "Error: no any intermediate keys"); exit(0);}
if (g_spec->interVals == NULL) {DoLog( "Error: no any intermediate values"); exit(0);}
if (g_spec->interOffsetSizes == NULL) {DoLog( "Error: no any intermediate pointer info");exit(0);}
if (g_spec->interRecordCount == 0) {DoLog( "Error: invalid intermediate record count");exit(0);}
if (g_spec->interKeyListRange == NULL) { DoLog( "Error: no any key list range");exit(0);}
if (g_spec->interDiffKeyCount == 0) { DoLog( "Error: invalid intermediate diff key count");exit(0);}
//-------------------------------------------------------
//2, get reduce input data on host
//-------------------------------------------------------
int h_interDiffKeyCount = g_spec->interDiffKeyCount;
char* d_interKeys = g_spec->interKeys;
char* d_interVals = g_spec->interVals;
int4* d_interOffsetSizes = g_spec->interOffsetSizes;
int2* d_interKeyListRange = g_spec->interKeyListRange;
//----------------------------------------------
//4, determine the number of threads to run
//----------------------------------------------
dim3 h_dimBlock(g_spec->dimBlockReduce,1,1);
dim3 h_dimGrid(1,1,1);
int h_recordsPerTask = g_spec->numRecTaskReduce;
int numBlocks = CEIL(CEIL(h_interDiffKeyCount, h_recordsPerTask), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y;
//----------------------------------------------
//5, calculate output data keys'buf size
// and values' buf size
//----------------------------------------------
DoLog( "** ReduceCount");
int* d_outputKeysSizePerTask = NULL;
hipMalloc((void**)&d_outputKeysSizePerTask, sizeof(int)*h_actualNumThreads);
hipMemset(d_outputKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_outputValsSizePerTask = NULL;
hipMalloc((void**)&d_outputValsSizePerTask, sizeof(int)*h_actualNumThreads);
hipMemset(d_outputValsSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_outputCountPerTask = NULL;
hipMalloc((void**)&d_outputCountPerTask, sizeof(int)*h_actualNumThreads);
hipMemset(d_outputCountPerTask, 0, sizeof(int)*h_actualNumThreads);
hipLaunchKernelGGL(( ReducerCount), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_interKeys,
d_interVals,
d_interOffsetSizes,
d_interKeyListRange,
d_outputKeysSizePerTask,
d_outputValsSizePerTask,
d_outputCountPerTask,
h_interDiffKeyCount,
h_recordsPerTask,
h_actualNumThreads);
hipDeviceSynchronize();
//-----------------------------------------------
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//-----------------------------------------------
DoLog( "** Do prefix sum on output data's size");
int *d_psKeySizes = NULL;
hipMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads);
hipMemset(d_psKeySizes, 0, sizeof(int)*h_actualNumThreads);
int h_allKeySize = prefexSum((int*)d_outputKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads);
int *d_psValSizes = NULL;
hipMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads);
hipMemset(d_psValSizes, 0, sizeof(int)*h_actualNumThreads);
int h_allValSize = prefexSum((int*)d_outputValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads);
int *d_psCounts = NULL;
hipMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads);
hipMemset(d_psCounts, 0, sizeof(int)*h_actualNumThreads);
int h_allCounts = prefexSum((int*)d_outputCountPerTask, (int*)d_psCounts, h_actualNumThreads);
DoLog("** Reduce Output: key buf size %d bytes, val buf size %d bytes, index buf size %d bytes, %d records",
h_allKeySize, h_allValSize, h_allCounts*sizeof(int4),h_allCounts);
//-----------------------------------------------
//7, allocate output memory on device memory
//-----------------------------------------------
DoLog( "** Allocate intermediate memory on device memory");
char* d_outputKeys = NULL;
hipMalloc((void**)&d_outputKeys, h_allKeySize);
char* d_outputVals = NULL;
hipMalloc((void**)&d_outputVals, h_allValSize);
int4* d_outputOffsetSizes = NULL;
hipMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*h_allCounts);
//--------------------------------------------------
//8, start reduce
//--------------------------------------------------
DoLog( "** Reduce");
int2* d_keyValOffsets = NULL;
hipMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads);
hipMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads);
int* d_curIndex = NULL;
hipMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads);
hipMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads);
int sizeSmem = h_dimBlock.x * sizeof(int) * 5;
hipLaunchKernelGGL(( Reducer), dim3(h_dimGrid), dim3(h_dimBlock), sizeSmem, 0, d_interKeys,
d_interVals,
d_interOffsetSizes,
d_interKeyListRange,
d_psKeySizes,
d_psValSizes,
d_psCounts,
d_outputKeys,
d_outputVals,
d_outputOffsetSizes,
d_keyValOffsets,
d_curIndex,
h_interDiffKeyCount,
h_recordsPerTask,
h_actualNumThreads);
hipDeviceSynchronize();
//-------------------------------------------------------
//9, copy output data to Spec_t structure
//-------------------------------------------------------
g_spec->outputKeys = d_outputKeys;
g_spec->outputVals = d_outputVals;
g_spec->outputOffsetSizes = d_outputOffsetSizes;
g_spec->outputRecordCount = h_allCounts;
g_spec->outputAllKeySize = h_allKeySize;
g_spec->outputAllValSize = h_allValSize;
//----------------------------------------------
//10, free allocated memory
//----------------------------------------------
hipFree(d_interKeys);
hipFree(d_interVals);
hipFree(d_interOffsetSizes);
hipFree(d_outputKeysSizePerTask);
hipFree(d_outputValsSizePerTask);
hipFree(d_outputCountPerTask);
hipFree(d_psKeySizes);
hipFree(d_psValSizes);
hipFree(d_psCounts);
hipFree(d_keyValOffsets);
hipFree(d_curIndex);
}
//----------------------------------------------
//start main map reduce procedure
//1, init device
//2, start map
//3, start reduce
//
//param : spec
//----------------------------------------------
void MapReduce(Spec_t *spec)
{
assert(NULL != spec);
Spec_t* g_spec = spec;
DoLog( "=====start map/reduce=====");
//-------------------------------------------
//1, init device
//-------------------------------------------
//CUT_DEVICE_INIT();
DoLog( "** init GPU");
InitMapReduce(spec);
//-------------------------------------------
//2, start map
//-------------------------------------------
DoLog( "----------start map-----------");
if (startMap(spec))
{
printf("** No output.");
return;
}
if (g_spec->workflow == MAP_ONLY)
{
g_spec->outputKeys = g_spec->interKeys;
g_spec->outputVals = g_spec->interVals;
g_spec->outputOffsetSizes = g_spec->interOffsetSizes;
g_spec->outputRecordCount = g_spec->interRecordCount;
g_spec->outputAllKeySize = g_spec->interAllKeySize;
g_spec->outputAllValSize = g_spec->interAllValSize;
goto EXIT_MR;
}
//-------------------------------------------
//3, start group
//-------------------------------------------
DoLog( "----------start group-----------");
TimeVal_t groupTimer;
startTimer(&groupTimer);
startGroup(spec);
endTimer("Group", &groupTimer);
if (g_spec->workflow == MAP_GROUP)
{
g_spec->outputKeys = g_spec->interKeys;
g_spec->outputVals = g_spec->interVals;
g_spec->outputOffsetSizes = g_spec->interOffsetSizes;
g_spec->outputRecordCount = g_spec->interRecordCount;
g_spec->outputAllKeySize = g_spec->interAllKeySize;
g_spec->outputAllValSize = g_spec->interAllValSize;
g_spec->outputDiffKeyCount = g_spec->interDiffKeyCount;
if (g_spec->outputToHost == 1)
{
g_spec->outputKeyListRange = (int2*)malloc(sizeof(int2)*g_spec->outputDiffKeyCount);
CUDA_SAFE_CALL(hipMemcpy(g_spec->outputKeyListRange, g_spec->interKeyListRange, sizeof(int2)*g_spec->outputDiffKeyCount, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(g_spec->interKeyListRange));
}
goto EXIT_MR;
}
//-------------------------------------------
//4, start reduce
//-------------------------------------------
DoLog( "----------start reduce--------");
TimeVal_t reduceTimer;
startTimer(&reduceTimer);
startReduce(spec);
endTimer("Reduce", &reduceTimer);
EXIT_MR:
if (g_spec->outputToHost == 1)
{
int indexSize = g_spec->outputRecordCount * sizeof(int4);
char* h_outputKeys = (char*)malloc(g_spec->outputAllKeySize);
if (h_outputKeys == NULL) exit(0);
char* h_outputVals = (char*)malloc(g_spec->outputAllValSize);
if (h_outputVals == NULL) exit(0);
int4* h_outputOffsetSizes = (int4*)malloc(indexSize);
if (h_outputOffsetSizes == NULL) exit(0);
CUDA_SAFE_CALL(hipMemcpy(h_outputKeys, g_spec->outputKeys, g_spec->outputAllKeySize, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(h_outputVals, g_spec->outputVals, g_spec->outputAllValSize, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(h_outputOffsetSizes, g_spec->outputOffsetSizes, indexSize, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(g_spec->outputKeys));
CUDA_SAFE_CALL(hipFree(g_spec->outputVals));
CUDA_SAFE_CALL(hipFree(g_spec->outputOffsetSizes));
g_spec->outputKeys = h_outputKeys;
g_spec->outputVals = h_outputVals;
g_spec->outputOffsetSizes = h_outputOffsetSizes;
}
}
//------------------------------------------
//the last step
//
//1, free global variables' memory
//2, close log file's file pointer
//------------------------------------------
void FinishMapReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
//-------------------------------------------
//1, free global variables' memory
//-------------------------------------------
free(g_spec->inputKeys);
free(g_spec->inputVals);
free(g_spec->inputOffsetSizes);
if (g_spec->outputToHost == 1)
{
free(g_spec->outputKeys);
free(g_spec->outputVals);
free(g_spec->outputOffsetSizes);
if (g_spec->workflow == MAP_GROUP)
free(g_spec->outputKeyListRange);
}
else
{
hipFree(g_spec->outputKeys);
hipFree(g_spec->outputVals);
hipFree(g_spec->outputOffsetSizes);
if (g_spec->workflow == MAP_GROUP)
hipFree(g_spec->outputKeyListRange);
}
free(g_spec);
DoLog( "=====finish map/reduce=====");
}
#endif //__MRLIB_CU__
| 3fc2a643b6fbec8a1ae2f32c2318cc7cddd79bab.cu | /*$Id: MarsLib.cu 755 2009-11-18 13:22:54Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
#ifndef __MRLIB_CU__
#define __MRLIB_CU__
#include "MarsInc.h"
#include "map.cu"
#include "reduce.cu"
//----------------------------------------------
//Get default runtime configuration
//
//return: default spec
//----------------------------------------------
Spec_t *GetDefaultSpec()
{
Spec_t *spec = (Spec_t*)malloc(sizeof(Spec_t));
if (NULL == spec) exit(-1);
memset(spec, 0, sizeof(Spec_t));
return spec;
}
//--------------------------------------------------------
//Initiate map reduce spec
//--------------------------------------------------------
void InitMapReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->dimBlockMap <= 0)
g_spec->dimBlockMap = DEFAULT_DIMBLOCK;
if (g_spec->dimBlockReduce <= 0)
g_spec->dimBlockReduce = DEFAULT_DIMBLOCK;
if (g_spec->numRecTaskReduce <= 0)
g_spec->numRecTaskReduce = DEFAULT_NUMTASK;
if (g_spec->numRecTaskMap <= 0)
g_spec->numRecTaskMap = DEFAULT_NUMTASK;
if (g_spec->workflow <= 0)
g_spec->workflow = MAP_ONLY;
}
//--------------------------------------------------
//Add a map input record
//
//param : spec
//param : key -- a pointer to a buffer
//param : val -- a pointer to a buffer
//param : keySize
//param : valSize
//--------------------------------------------------
void AddMapInputRecord(Spec_t* spec,
void* key,
void* val,
int keySize,
int valSize)
{
assert(NULL != spec);
static int2 curOffset;
static int3 curChunkNum;
int index = spec->inputRecordCount;
const int dataChunkSize = 1024*1024*256;
if (spec->inputRecordCount > 0)
{
if (dataChunkSize*curChunkNum.x < (curOffset.x + keySize))
spec->inputKeys = (char*)realloc(spec->inputKeys, (++curChunkNum.x)*dataChunkSize);
memcpy(spec->inputKeys+curOffset.x, key, keySize);
if (dataChunkSize*curChunkNum.y < (curOffset.y + valSize))
spec->inputVals = (char*)realloc(spec->inputVals, (++curChunkNum.y)*dataChunkSize);
memcpy(spec->inputVals+curOffset.y, val, valSize);
if (dataChunkSize*curChunkNum.z < (spec->inputRecordCount+1)*sizeof(int4))
spec->inputOffsetSizes = (int4*)realloc(spec->inputOffsetSizes,
(++curChunkNum.z)*dataChunkSize);
}
else
{
spec->inputKeys = (char*)malloc(dataChunkSize);
if (NULL == spec->inputKeys) exit(-1);
memcpy(spec->inputKeys, key, keySize);
spec->inputVals = (char*)malloc(dataChunkSize);
if (NULL == spec->inputVals) exit(-1);
memcpy(spec->inputVals, val, valSize);
spec->inputOffsetSizes = (int4*)malloc(dataChunkSize);
curChunkNum.x++;
curChunkNum.y++;
curChunkNum.z++;
}
spec->inputOffsetSizes[index].x = curOffset.x;
spec->inputOffsetSizes[index].y = keySize;
spec->inputOffsetSizes[index].z = curOffset.y;
spec->inputOffsetSizes[index].w = valSize;
curOffset.x += keySize;
curOffset.y += valSize;
spec->inputRecordCount++;
}
//-------------------------------------------------
//Called by user defined map_count function
//
//param : keySize
//param : valSize
//param : interKeysSizePerTask
//param : interValsSizePerTask
//param : interCountPerTask
//-------------------------------------------------
__device__ void EmitInterCount(int keySize,
int valSize,
int* interKeysSizePerTask,
int* interValsSizePerTask,
int* interCountPerTask)
{
int index = TID;
interKeysSizePerTask[index] += keySize;
interValsSizePerTask[index] += valSize;
interCountPerTask[index]++;
}
//-------------------------------------------------
//called by user defined map function
//
//-------------------------------------------------
__device__ void EmitIntermediate(void* key,
void* val,
int keySize,
int valSize,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* interKeys,
char* interVals,
int4* interOffsetSizes,
int* curIndex)
{
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
int index = TID;
int2 l_keyValOffsets = keyValOffsets[index];
char *pKeySet = (char*)(interKeys + psKeySizes[index] + l_keyValOffsets.x);
char *pValSet = (char*)(interVals + psValSizes[index] + l_keyValOffsets.y);
char* sKey = (char*)key;
char* sVal = (char*)val;
for (int i = 0; i < keySize; ++i)
pKeySet[i] = sKey[i];
for (int i = 0; i < valSize; ++i)
pValSet[i] = sVal[i];
l_keyValOffsets.x += keySize;
l_keyValOffsets.y += valSize;
keyValOffsets[index] = l_keyValOffsets;
int l_curIndex = curIndex[index];
int l_psCounts = psCounts[index];
int l_curPs = l_curIndex + l_psCounts;
int4 l_interOffsetSizes1 = interOffsetSizes[l_curPs];
int4 l_interOffsetSizes2 = interOffsetSizes[l_curPs-1];
if (l_curIndex != 0)
{
l_interOffsetSizes1.x = (l_interOffsetSizes2.x + l_interOffsetSizes2.y);
l_interOffsetSizes1.z = (l_interOffsetSizes2.z + l_interOffsetSizes2.w);
}
l_interOffsetSizes1.y = keySize;
l_interOffsetSizes1.w = valSize;
interOffsetSizes[l_curPs] = l_interOffsetSizes1;
++l_curIndex;
curIndex[index] = l_curIndex;
}
//-------------------------------------------------
//Calculate intermediate data's size
//
//param : inputKeys
//param : inputVals
//param : inputOffsetSizes
//param : interKeysSizesPerTask
//param : interValsSizePerTask
//param : interCountPerTask
//param : recordNum -- total number of records
//param : recordsPerTask
//-------------------------------------------------
__global__ void MapperCount(char* inputKeys,
char* inputVals,
int4* inputOffsetSizes,
int* interKeysSizePerTask,
int* interValsSizePerTask,
int* interCountPerTask,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int4 offsetSize = inputOffsetSizes[cindex];
char *key = inputKeys + offsetSize.x;
char *val = inputVals + offsetSize.z;
map_count(key,
val,
offsetSize.y,
offsetSize.w,
interKeysSizePerTask,
interValsSizePerTask,
interCountPerTask);
}
}
//--------------------------------------------------
//mapper
//--------------------------------------------------
__global__ void Mapper(char* inputKeys,
char* inputVals,
int4* inputOffsetSizes,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* interKeys,
char* interVals,
int4* interOffsetSizes,
int* curIndex,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
int l_psCounts = psCounts[index];
int4 l_interOffsetSizes = interOffsetSizes[l_psCounts];
l_interOffsetSizes.x = psKeySizes[index];
l_interOffsetSizes.z = psValSizes[index];
interOffsetSizes[l_psCounts] = l_interOffsetSizes;
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int4 offsetSize = inputOffsetSizes[cindex];
char *key = inputKeys + offsetSize.x;
char *val = inputVals + offsetSize.z;
map(key,
val,
offsetSize.y,
offsetSize.w,
psKeySizes,
psValSizes,
psCounts,
keyValOffsets,
interKeys,
interVals,
interOffsetSizes,
curIndex);
}
}
//--------------------------------------------------
//start map
//
//1, get map input data on host
//2, upload map input data to device memory
// (keys, vals, keyOffsets, valOffsets, keySizes, valSizes)
//3, determine the number of threads to run
//4, calculate intermediate data keys'buf size
// and values' buf size
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//6, allocate intermediate memory on device memory
//7, start map
//8, free allocated memory
//--------------------------------------------------
int startMap(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);}
if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); }
if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); }
if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);}
//-------------------------------------------------------
//1, get map input data on host
//-------------------------------------------------------
int h_inputRecordCount = g_spec->inputRecordCount;
int h_inputKeysBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].x +
g_spec->inputOffsetSizes[h_inputRecordCount-1].y;
int h_inputValsBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].z +
g_spec->inputOffsetSizes[h_inputRecordCount-1].w;
char* h_inputKeys = g_spec->inputKeys;
char* h_inputVals = g_spec->inputVals;
int4* h_inputOffsetSizes = g_spec->inputOffsetSizes;
DoLog( "** Map Input: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records",
h_inputKeysBufSize, h_inputValsBufSize, sizeof(int4)*h_inputRecordCount, h_inputRecordCount);
//-------------------------------------------------------
//2, upload map input data onto device memory
//-------------------------------------------------------
DoLog( "** Upload map input data onto device memory");
TimeVal_t uploadTv;
startTimer(&uploadTv);
char* d_inputKeys = NULL;
char* d_inputVals = NULL;
int4* d_inputOffsetSizes = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputKeys, h_inputKeysBufSize));
CUDA_SAFE_CALL(cudaMemcpy(d_inputKeys, h_inputKeys, h_inputKeysBufSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputVals, h_inputValsBufSize));
CUDA_SAFE_CALL(cudaMemcpy(d_inputVals, h_inputVals, h_inputValsBufSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputOffsetSizes, sizeof(int4)*h_inputRecordCount));
cudaMemcpy(d_inputOffsetSizes, h_inputOffsetSizes, sizeof(int4)*h_inputRecordCount, cudaMemcpyHostToDevice);
endTimer("PCI-E I/O", &uploadTv);
//----------------------------------------------
//3, determine the number of threads to run
//----------------------------------------------
dim3 h_dimBlock(g_spec->dimBlockMap,1,1);
dim3 h_dimGrid(1,1,1);
int h_recordsPerTask = g_spec->numRecTaskMap;
int numBlocks = CEIL(CEIL(h_inputRecordCount, h_recordsPerTask), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y;
TimeVal_t mapTimer;
startTimer(&mapTimer);
//----------------------------------------------
//4, calculate intermediate data keys'buf size
// and values' buf size
//----------------------------------------------
DoLog( "** MapCount");
int* d_interKeysSizePerTask = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interKeysSizePerTask, sizeof(int)*h_actualNumThreads));
cudaMemset(d_interKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_interValsSizePerTask = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interValsSizePerTask, sizeof(int)*h_actualNumThreads));
cudaMemset(d_interValsSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_interCountPerTask = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interCountPerTask, sizeof(int)*h_actualNumThreads));
cudaMemset(d_interCountPerTask, 0, sizeof(int)*h_actualNumThreads);
MapperCount<<<h_dimGrid, h_dimBlock>>>(d_inputKeys,
d_inputVals,
d_inputOffsetSizes,
d_interKeysSizePerTask,
d_interValsSizePerTask,
d_interCountPerTask,
h_inputRecordCount,
h_recordsPerTask,
h_actualNumThreads);
cudaThreadSynchronize();
//-----------------------------------------------
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//-----------------------------------------------
DoLog( "** Do prefix sum on intermediate data's size\n");
int *d_psKeySizes = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads));
int h_allKeySize = prefexSum((int*)d_interKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads);
int *d_psValSizes = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads));
int h_allValSize = prefexSum((int*)d_interValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads);
int *d_psCounts = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads));
int h_allCounts = prefexSum((int*)d_interCountPerTask, (int*)d_psCounts, h_actualNumThreads);
//fprintf(stderr,"all count = %d",h_allCounts);
DoLog( "** Map Output: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records",
h_allKeySize, h_allValSize, h_allCounts * sizeof(int4), h_allCounts);
if (h_allCounts == 0)
{
DoLog( "** No output.");
cudaFree(d_inputKeys);
cudaFree(d_inputVals);
cudaFree(d_inputOffsetSizes);
cudaFree(d_psKeySizes);
cudaFree(d_psValSizes);
cudaFree(d_psCounts);
endTimer("Map", &mapTimer);
return 1;
}
//-----------------------------------------------
//6, allocate intermediate memory on device memory
//-----------------------------------------------
DoLog( "** Allocate intermediate memory on device memory");
char* d_interKeys = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interKeys, h_allKeySize));
cudaMemset(d_interKeys, 0, h_allKeySize);
char* d_interVals = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interVals, h_allValSize));
cudaMemset(d_interVals, 0, h_allValSize);
int4* d_interOffsetSizes = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interOffsetSizes, sizeof(int4)*h_allCounts));
cudaMemset(d_interOffsetSizes, 0, sizeof(int4)*h_allCounts);
//--------------------------------------------------
//7, start map
//--------------------------------------------------
DoLog( "** Map");
int2* d_keyValOffsets = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads));
cudaMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads);
int* d_curIndex = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads));
cudaMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads);
//h_dimBlock.x =3;
int sizeSmem = h_dimBlock.x * sizeof(int) * 5;
Mapper<<<h_dimGrid, h_dimBlock, sizeSmem>>>(d_inputKeys,
d_inputVals,
d_inputOffsetSizes,
d_psKeySizes,
d_psValSizes,
d_psCounts,
d_keyValOffsets,
d_interKeys,
d_interVals,
d_interOffsetSizes,
d_curIndex,
h_inputRecordCount,
h_recordsPerTask,
h_actualNumThreads);
cudaThreadSynchronize();
g_spec->interKeys = d_interKeys;
g_spec->interVals = d_interVals;
g_spec->interOffsetSizes = d_interOffsetSizes;
//fprintf(stderr,"record count = %d",h_allCounts);
g_spec->interRecordCount = h_allCounts;
g_spec->interDiffKeyCount = h_allCounts;
g_spec->interAllKeySize = h_allKeySize;
g_spec->interAllValSize = h_allValSize;
//----------------------------------------------
//8, free
//----------------------------------------------
cudaFree(d_interKeysSizePerTask);
cudaFree(d_interValsSizePerTask);
cudaFree(d_interCountPerTask);
cudaFree(d_keyValOffsets);
cudaFree(d_curIndex);
cudaFree(d_inputKeys);
cudaFree(d_inputVals);
cudaFree(d_inputOffsetSizes);
cudaFree(d_psKeySizes);
cudaFree(d_psValSizes);
cudaFree(d_psCounts);
endTimer("Map", &mapTimer);
return 0;
}
void startGroup(Spec_t* spec)
{
Spec_t* g_spec = spec;
int interDiffKeyCount = 0;
char* d_outputKeys = NULL;
char* d_outputVals = NULL;
int4* d_outputOffsetSizes = NULL;
int2** h_outputKeyListRange = NULL;
DoLog( "** Sort for group");
CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputKeys, g_spec->interAllKeySize));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputVals, g_spec->interAllValSize));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*g_spec->interRecordCount));
h_outputKeyListRange = (int2**)malloc(sizeof(int2*));
saven_initialPrefixSum(g_spec->interRecordCount);
interDiffKeyCount =
sort_GPU (g_spec->interKeys,
g_spec->interAllKeySize,
g_spec->interVals,
g_spec->interAllValSize,
g_spec->interOffsetSizes,
g_spec->interRecordCount,
d_outputKeys,
d_outputVals,
d_outputOffsetSizes,
h_outputKeyListRange);
DoLog( "** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount);
//fprintf(stderr,"** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount);
g_spec->interKeys = d_outputKeys;
g_spec->interVals = d_outputVals;
g_spec->interOffsetSizes = d_outputOffsetSizes;
g_spec->interDiffKeyCount = interDiffKeyCount;
int keyListRangeSize = g_spec->interDiffKeyCount * sizeof(int2);
CUDA_SAFE_CALL(cudaMalloc((void**)&g_spec->interKeyListRange, keyListRangeSize));
CUDA_SAFE_CALL(cudaMemcpy(g_spec->interKeyListRange, *h_outputKeyListRange, keyListRangeSize, cudaMemcpyHostToDevice));
free(*h_outputKeyListRange);
free(h_outputKeyListRange);
}
//--------------------------------------------------------
//get a value from value list of the same key
//
//param : vals
//param : interOffsetSizes
//param : index
//return: the wanted value
//--------------------------------------------------------
__device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)vals + keyIndex * offset.w);
}
__device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)key + keyIndex * offset.y);
}
//---------------------------------------------------------
//called by user defined reduce_count function
//---------------------------------------------------------
__device__ void EmitCount(int keySize,
int valSize,
int* outputKeysSizePerTask,
int* outputValsSizePerTask,
int* outputCountPerTask)
{
int index = TID;
outputKeysSizePerTask[index] += keySize;
outputValsSizePerTask[index] += valSize;
outputCountPerTask[index]++;
}
//---------------------------------------------------------
//called by user defined reduce function
//---------------------------------------------------------
__device__ void Emit (char* key,
char* val,
int keySize,
int valSize,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* outputKeys,
char* outputVals,
int4* outputOffsetSizes,
int* curIndex)
{
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
int index = TID;
char *pKeySet = (char*)(outputKeys + psKeySizes[index] + keyValOffsets[index].x);
char *pValSet = (char*)(outputVals + psValSizes[index] + keyValOffsets[index].y);
for (int i = 0; i < keySize; i++)
pKeySet[i] = key[i];
for (int i = 0; i < valSize; i++)
pValSet[i] = val[i];
keyValOffsets[index].x += keySize;
keyValOffsets[index].y += valSize;
if (curIndex[index] != 0)
{
outputOffsetSizes[psCounts[index] + curIndex[index]].x =
(outputOffsetSizes[psCounts[index] + curIndex[index] - 1].x +
outputOffsetSizes[psCounts[index] + curIndex[index] - 1].y);
outputOffsetSizes[psCounts[index] + curIndex[index]].z =
(outputOffsetSizes[psCounts[index] + curIndex[index] - 1].z +
outputOffsetSizes[psCounts[index] + curIndex[index] - 1].w);
}
outputOffsetSizes[psCounts[index] + curIndex[index]].y = keySize;
outputOffsetSizes[psCounts[index] + curIndex[index]].w = valSize;
curIndex[index]++;
}
//-------------------------------------------------------
//calculate output data's size
//-------------------------------------------------------
__global__ void ReducerCount(char* interKeys,
char* interVals,
int4* interOffsetSizes,
int2* interKeyListRange,
int* outputKeysSizePerTask,
int* outputValsSizePerTask,
int* outputCountPerTask,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
//for (int i = 0; i <= recordsPerTask; i++)
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int valStartIndex = interKeyListRange[cindex].x;
int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x;
int keySize = interOffsetSizes[interKeyListRange[cindex].x].y;
char *key = interKeys + interOffsetSizes[valStartIndex].x;
char *vals = interVals + interOffsetSizes[valStartIndex].z;
reduce_count(key,
vals,
keySize,
valCount,
interOffsetSizes,
outputKeysSizePerTask,
outputValsSizePerTask,
outputCountPerTask);
}
}
//-------------------------------------------------------
//Reducer
//
//-------------------------------------------------------
__global__ void Reducer(char* interKeys,
char* interVals,
int4* interOffsetSizes,
int2* interKeyListRange,
int* psKeySizes,
int* psValSizes,
int* psCounts,
char* outputKeys,
char* outputVals,
int4* outputOffsetSizes,
int2* keyValOffsets,
int* curIndex,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
outputOffsetSizes[psCounts[index]].x = psKeySizes[index];
outputOffsetSizes[psCounts[index]].z = psValSizes[index];
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int valStartIndex = interKeyListRange[cindex].x;
int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x;
int keySize = interOffsetSizes[interKeyListRange[cindex].x].y;
char *key = interKeys + interOffsetSizes[valStartIndex].x;
char *vals = interVals + interOffsetSizes[valStartIndex].z;
reduce(key,
vals,
keySize,
valCount,
psKeySizes,
psValSizes,
psCounts,
keyValOffsets,
interOffsetSizes,
outputKeys,
outputVals,
outputOffsetSizes,
curIndex,
valStartIndex);
}
}
//----------------------------------------------
//start reduce
//
//1, if there is not a reduce phase, just return
// then user uses spec->interKeys/spec->intervals
// for further processing
//2, get reduce input data on host
//3, upload reduce input data onto device memory
//4, determine the number of threads to run
//5, calculate output data keys'buf size
// and values' buf size
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//7, allocate output memory on device memory
//8, start reduce
//9, copy output data to Spect_t structure
//10,free allocated memory
//----------------------------------------------
void startReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->interKeys == NULL) {DoLog( "Error: no any intermediate keys"); exit(0);}
if (g_spec->interVals == NULL) {DoLog( "Error: no any intermediate values"); exit(0);}
if (g_spec->interOffsetSizes == NULL) {DoLog( "Error: no any intermediate pointer info");exit(0);}
if (g_spec->interRecordCount == 0) {DoLog( "Error: invalid intermediate record count");exit(0);}
if (g_spec->interKeyListRange == NULL) { DoLog( "Error: no any key list range");exit(0);}
if (g_spec->interDiffKeyCount == 0) { DoLog( "Error: invalid intermediate diff key count");exit(0);}
//-------------------------------------------------------
//2, get reduce input data on host
//-------------------------------------------------------
int h_interDiffKeyCount = g_spec->interDiffKeyCount;
char* d_interKeys = g_spec->interKeys;
char* d_interVals = g_spec->interVals;
int4* d_interOffsetSizes = g_spec->interOffsetSizes;
int2* d_interKeyListRange = g_spec->interKeyListRange;
//----------------------------------------------
//4, determine the number of threads to run
//----------------------------------------------
dim3 h_dimBlock(g_spec->dimBlockReduce,1,1);
dim3 h_dimGrid(1,1,1);
int h_recordsPerTask = g_spec->numRecTaskReduce;
int numBlocks = CEIL(CEIL(h_interDiffKeyCount, h_recordsPerTask), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y;
//----------------------------------------------
//5, calculate output data keys'buf size
// and values' buf size
//----------------------------------------------
DoLog( "** ReduceCount");
int* d_outputKeysSizePerTask = NULL;
cudaMalloc((void**)&d_outputKeysSizePerTask, sizeof(int)*h_actualNumThreads);
cudaMemset(d_outputKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_outputValsSizePerTask = NULL;
cudaMalloc((void**)&d_outputValsSizePerTask, sizeof(int)*h_actualNumThreads);
cudaMemset(d_outputValsSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_outputCountPerTask = NULL;
cudaMalloc((void**)&d_outputCountPerTask, sizeof(int)*h_actualNumThreads);
cudaMemset(d_outputCountPerTask, 0, sizeof(int)*h_actualNumThreads);
ReducerCount<<<h_dimGrid, h_dimBlock>>>(d_interKeys,
d_interVals,
d_interOffsetSizes,
d_interKeyListRange,
d_outputKeysSizePerTask,
d_outputValsSizePerTask,
d_outputCountPerTask,
h_interDiffKeyCount,
h_recordsPerTask,
h_actualNumThreads);
cudaThreadSynchronize();
//-----------------------------------------------
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//-----------------------------------------------
DoLog( "** Do prefix sum on output data's size");
int *d_psKeySizes = NULL;
cudaMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads);
cudaMemset(d_psKeySizes, 0, sizeof(int)*h_actualNumThreads);
int h_allKeySize = prefexSum((int*)d_outputKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads);
int *d_psValSizes = NULL;
cudaMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads);
cudaMemset(d_psValSizes, 0, sizeof(int)*h_actualNumThreads);
int h_allValSize = prefexSum((int*)d_outputValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads);
int *d_psCounts = NULL;
cudaMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads);
cudaMemset(d_psCounts, 0, sizeof(int)*h_actualNumThreads);
int h_allCounts = prefexSum((int*)d_outputCountPerTask, (int*)d_psCounts, h_actualNumThreads);
DoLog("** Reduce Output: key buf size %d bytes, val buf size %d bytes, index buf size %d bytes, %d records",
h_allKeySize, h_allValSize, h_allCounts*sizeof(int4),h_allCounts);
//-----------------------------------------------
//7, allocate output memory on device memory
//-----------------------------------------------
DoLog( "** Allocate intermediate memory on device memory");
char* d_outputKeys = NULL;
cudaMalloc((void**)&d_outputKeys, h_allKeySize);
char* d_outputVals = NULL;
cudaMalloc((void**)&d_outputVals, h_allValSize);
int4* d_outputOffsetSizes = NULL;
cudaMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*h_allCounts);
//--------------------------------------------------
//8, start reduce
//--------------------------------------------------
DoLog( "** Reduce");
int2* d_keyValOffsets = NULL;
cudaMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads);
cudaMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads);
int* d_curIndex = NULL;
cudaMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads);
cudaMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads);
int sizeSmem = h_dimBlock.x * sizeof(int) * 5;
Reducer<<<h_dimGrid, h_dimBlock, sizeSmem>>>(d_interKeys,
d_interVals,
d_interOffsetSizes,
d_interKeyListRange,
d_psKeySizes,
d_psValSizes,
d_psCounts,
d_outputKeys,
d_outputVals,
d_outputOffsetSizes,
d_keyValOffsets,
d_curIndex,
h_interDiffKeyCount,
h_recordsPerTask,
h_actualNumThreads);
cudaThreadSynchronize();
//-------------------------------------------------------
//9, copy output data to Spec_t structure
//-------------------------------------------------------
g_spec->outputKeys = d_outputKeys;
g_spec->outputVals = d_outputVals;
g_spec->outputOffsetSizes = d_outputOffsetSizes;
g_spec->outputRecordCount = h_allCounts;
g_spec->outputAllKeySize = h_allKeySize;
g_spec->outputAllValSize = h_allValSize;
//----------------------------------------------
//10, free allocated memory
//----------------------------------------------
cudaFree(d_interKeys);
cudaFree(d_interVals);
cudaFree(d_interOffsetSizes);
cudaFree(d_outputKeysSizePerTask);
cudaFree(d_outputValsSizePerTask);
cudaFree(d_outputCountPerTask);
cudaFree(d_psKeySizes);
cudaFree(d_psValSizes);
cudaFree(d_psCounts);
cudaFree(d_keyValOffsets);
cudaFree(d_curIndex);
}
//----------------------------------------------
//start main map reduce procedure
//1, init device
//2, start map
//3, start reduce
//
//param : spec
//----------------------------------------------
void MapReduce(Spec_t *spec)
{
assert(NULL != spec);
Spec_t* g_spec = spec;
DoLog( "=====start map/reduce=====");
//-------------------------------------------
//1, init device
//-------------------------------------------
//CUT_DEVICE_INIT();
DoLog( "** init GPU");
InitMapReduce(spec);
//-------------------------------------------
//2, start map
//-------------------------------------------
DoLog( "----------start map-----------");
if (startMap(spec))
{
printf("** No output.");
return;
}
if (g_spec->workflow == MAP_ONLY)
{
g_spec->outputKeys = g_spec->interKeys;
g_spec->outputVals = g_spec->interVals;
g_spec->outputOffsetSizes = g_spec->interOffsetSizes;
g_spec->outputRecordCount = g_spec->interRecordCount;
g_spec->outputAllKeySize = g_spec->interAllKeySize;
g_spec->outputAllValSize = g_spec->interAllValSize;
goto EXIT_MR;
}
//-------------------------------------------
//3, start group
//-------------------------------------------
DoLog( "----------start group-----------");
TimeVal_t groupTimer;
startTimer(&groupTimer);
startGroup(spec);
endTimer("Group", &groupTimer);
if (g_spec->workflow == MAP_GROUP)
{
g_spec->outputKeys = g_spec->interKeys;
g_spec->outputVals = g_spec->interVals;
g_spec->outputOffsetSizes = g_spec->interOffsetSizes;
g_spec->outputRecordCount = g_spec->interRecordCount;
g_spec->outputAllKeySize = g_spec->interAllKeySize;
g_spec->outputAllValSize = g_spec->interAllValSize;
g_spec->outputDiffKeyCount = g_spec->interDiffKeyCount;
if (g_spec->outputToHost == 1)
{
g_spec->outputKeyListRange = (int2*)malloc(sizeof(int2)*g_spec->outputDiffKeyCount);
CUDA_SAFE_CALL(cudaMemcpy(g_spec->outputKeyListRange, g_spec->interKeyListRange, sizeof(int2)*g_spec->outputDiffKeyCount, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(g_spec->interKeyListRange));
}
goto EXIT_MR;
}
//-------------------------------------------
//4, start reduce
//-------------------------------------------
DoLog( "----------start reduce--------");
TimeVal_t reduceTimer;
startTimer(&reduceTimer);
startReduce(spec);
endTimer("Reduce", &reduceTimer);
EXIT_MR:
if (g_spec->outputToHost == 1)
{
int indexSize = g_spec->outputRecordCount * sizeof(int4);
char* h_outputKeys = (char*)malloc(g_spec->outputAllKeySize);
if (h_outputKeys == NULL) exit(0);
char* h_outputVals = (char*)malloc(g_spec->outputAllValSize);
if (h_outputVals == NULL) exit(0);
int4* h_outputOffsetSizes = (int4*)malloc(indexSize);
if (h_outputOffsetSizes == NULL) exit(0);
CUDA_SAFE_CALL(cudaMemcpy(h_outputKeys, g_spec->outputKeys, g_spec->outputAllKeySize, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(h_outputVals, g_spec->outputVals, g_spec->outputAllValSize, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(h_outputOffsetSizes, g_spec->outputOffsetSizes, indexSize, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(g_spec->outputKeys));
CUDA_SAFE_CALL(cudaFree(g_spec->outputVals));
CUDA_SAFE_CALL(cudaFree(g_spec->outputOffsetSizes));
g_spec->outputKeys = h_outputKeys;
g_spec->outputVals = h_outputVals;
g_spec->outputOffsetSizes = h_outputOffsetSizes;
}
}
//------------------------------------------
//the last step
//
//1, free global variables' memory
//2, close log file's file pointer
//------------------------------------------
void FinishMapReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
//-------------------------------------------
//1, free global variables' memory
//-------------------------------------------
free(g_spec->inputKeys);
free(g_spec->inputVals);
free(g_spec->inputOffsetSizes);
if (g_spec->outputToHost == 1)
{
free(g_spec->outputKeys);
free(g_spec->outputVals);
free(g_spec->outputOffsetSizes);
if (g_spec->workflow == MAP_GROUP)
free(g_spec->outputKeyListRange);
}
else
{
cudaFree(g_spec->outputKeys);
cudaFree(g_spec->outputVals);
cudaFree(g_spec->outputOffsetSizes);
if (g_spec->workflow == MAP_GROUP)
cudaFree(g_spec->outputKeyListRange);
}
free(g_spec);
DoLog( "=====finish map/reduce=====");
}
#endif //__MRLIB_CU__
|
e972f56b6dae9e346b429dd2c6c9379d9e1c6ad4.hip | // !!! This is a file automatically generated by hipify!!!
/* memManager_gpu.cu
Moises Hernandez-Fernandez - FMRIB Image Analysis Group
Copyright (C) 2015 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
[email protected]
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 6.0 (c) 2018, The University of
Oxford (the "Software")
The Software remains the property of the Oxford University Innovation
("the University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Oxford
University Innovation ("OUI"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
[email protected] quoting Reference Project 9564, FSL.*/
hipError_t checkCuda(hipError_t result){
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
exit(1);
}
return result;
}
void init_gpu(){
int *q;
checkCuda(hipMalloc((void **)&q, sizeof(int)));
checkCuda(hipFree(q));
int device;
checkCuda(hipGetDevice(&device));
printf ("\n...................Allocated GPU %d...................\n", device);
checkCuda(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
}
void allocate_host_mem(
// Input
tractographyData& data_host,
int& MAX_SLs, // MAX streamlines - calculated here
int& THREADS_STREAM, // calculated here
// Input - Output allocated mem
int** lengths_host,
float** paths_host,
float** mprob_host,
float** mprob2_host,
float** mlocaldir_host,
//float** targvalues_host,
//float** targvaluesB_host,
float3** mat_crossed_host,
int** mat_numcrossed_host,
long long& size_mat_cross,
int& max_per_jump_mat,
float3** lrmat_crossed_host,
int** lrmat_numcrossed_host,
long long& size_lrmat_cross,
int& max_per_jump_lrmat)
{
probtrackxOptions& opts=probtrackxOptions::getInstance();
// calculate the maximum number of streamlines that can be executed in parallel
size_t free,total;
cuMemGetInfo(&free,&total); // in bytes
int bytes_per_sl_STREAM=0; // needed for each STREAM (twice)
int bytes_per_sl_COMMON=0; // needed in common to all STREAMS
if(!opts.save_paths.value()){
// only for threads in a STREAM (can discard the coordinates of finished streamlines)
bytes_per_sl_COMMON+= data_host.nsteps*3*sizeof(float); // paths_gpu (3 floats per step - MAX Nsteps)
}else{
// for all the streamlines allocated
bytes_per_sl_STREAM+= data_host.nsteps*3*sizeof(float); // paths_gpu (3 floats per step - MAX Nsteps
}
bytes_per_sl_STREAM+= 2*sizeof(int); // lengths_gpu (2 directions)
bytes_per_sl_STREAM+= sizeof(hiprandState_t); // random seed
if(opts.simpleout.value()){
free=free-data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float); // m_prob
bytes_per_sl_COMMON+= (data_host.nsteps)*sizeof(int); // beenhere
}
if(opts.omeanpathlength.value()&opts.simpleout.value()){
free=free-data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float); // m_prob2
}
if(opts.opathdir.value()){
free=free-data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*6*sizeof(float); // m_localdir
}
if(opts.network.value()){
int nROIS=data_host.network.NVols+data_host.network.NSurfs;
free=free-(nROIS*nROIS*sizeof(float)); //ConNet
if(opts.omeanpathlength.value()){
free=free-(nROIS*nROIS*sizeof(float)); //ConNetb
}
if(nROIS>maxNetsInShared){
// Cannot use Shared Memory if too many ROIS, need Global memory for flags
bytes_per_sl_COMMON+=(2*nROIS)*sizeof(float);
}
}
if(opts.s2tout.value()){
int nROIS=data_host.targets.NVols+data_host.targets.NSurfs;
long total_s2targets=data_host.nseeds*nROIS;
free=free-(total_s2targets*sizeof(float)); //matrix with results
if(opts.omeanpathlength.value()){
free=free-(total_s2targets*sizeof(float)); //s2targetsb
}
if(nROIS>maxTargsInShared){
// Cannot use Shared Memory if too many ROIS, need Global memory for flags
bytes_per_sl_COMMON+=(nROIS)*sizeof(float);
}
}
if(opts.loopcheck.value()){
bytes_per_sl_COMMON+= (data_host.nsteps/5)*sizeof(int); // loopcheckkeys_gpu
bytes_per_sl_COMMON+= (data_host.nsteps/5)*sizeof(float3); // loopcheckdirs_gpu
}
if(opts.matrix3out.value()){
bytes_per_sl_STREAM+= 3*data_host.nsteps*sizeof(float3); // mat_crossed_gpu
//max is 3 by num_steps ... but it will never happens
bytes_per_sl_STREAM+= sizeof(int); // mat_numcrossed_gpu
if(opts.lrmask3.value()!=""){
bytes_per_sl_STREAM+= 3*data_host.nsteps*sizeof(float3); // lrmat_crossed_gpu
//3-> ... never will happens
bytes_per_sl_STREAM+= sizeof(int); // lrmat_numcrossed_gpu
}
}else if(opts.matrix1out.value()||opts.matrix2out.value()){
bytes_per_sl_STREAM+= 3*data_host.nsteps*sizeof(float3); // lrmat_crossed_gpu
//3
bytes_per_sl_STREAM+= sizeof(int); // lrmat_numcrossed_gpu
}
free=free*FREEPERCENTAGE; // 80% defined in options.h
MAX_SLs=free/(bytes_per_sl_STREAM+(bytes_per_sl_COMMON/NSTREAMS));
if(MAX_SLs%2) MAX_SLs++;
unsigned long long totalSLs = (unsigned long long)data_host.nseeds*data_host.nparticles;
if(totalSLs<MAX_SLs){
MAX_SLs=totalSLs;
}
printf("Running %i streamlines in parallel using 2 STREAMS\n",MAX_SLs);
THREADS_STREAM=MAX_SLs/NSTREAMS; // paths_gpu just need to be a single structure if not save_paths (take a look !!)
// Allocate in HOST
checkCuda(hipHostMalloc((void**)lengths_host,2*THREADS_STREAM*sizeof(float))); // 2 paths per sample
if(opts.save_paths.value()){ // if not.. discard it when finished streamline
checkCuda(hipHostMalloc((void**)paths_host,THREADS_STREAM*data_host.nsteps*3*sizeof(float)));
}
if(opts.simpleout.value())
checkCuda(hipHostMalloc((void**)mprob_host,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
if(opts.omeanpathlength.value()&opts.simpleout.value())
checkCuda(hipHostMalloc((void**)mprob2_host,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
if(opts.opathdir.value())
checkCuda(hipHostMalloc((void**)mlocaldir_host,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*6*sizeof(float)));
if(opts.matrix3out.value()){
// If volumes overlap, it is possible to have more than 1 crossing voxel per jump
if(data_host.matrix3.NSurfs){
size_mat_cross=3*THREADS_STREAM*data_host.nsteps; // 3 vertices per jump (is this the maximum?)
max_per_jump_mat=3;
}else{
//size_mat_cross=THREADS_STREAM*data_host.nsteps;
//max_per_jump_mat=1;
size_mat_cross=3*THREADS_STREAM*data_host.nsteps;
max_per_jump_mat=3;
}
checkCuda(hipHostMalloc((void**)mat_crossed_host,size_mat_cross*sizeof(float3)));
checkCuda(hipHostMalloc((void**)mat_numcrossed_host,THREADS_STREAM*sizeof(int)));
if(opts.lrmask3.value()!=""){
if(data_host.matrix3.NSurfs){
size_lrmat_cross=3*THREADS_STREAM*data_host.nsteps; // 3 vertices per jump (is this the maximum?)
max_per_jump_lrmat=3;
}else{
//size_lrmat_cross=THREADS_STREAM*data_host.nsteps;
//max_per_jump_lrmat=1;
size_lrmat_cross=3*THREADS_STREAM*data_host.nsteps;
max_per_jump_lrmat=3;
}
checkCuda(hipHostMalloc((void**)lrmat_crossed_host,size_lrmat_cross*sizeof(float3)));
checkCuda(hipHostMalloc((void**)lrmat_numcrossed_host,THREADS_STREAM*sizeof(int)));
}
}else if(opts.matrix1out.value()||opts.matrix2out.value()){
if(data_host.lrmatrix1.NSurfs){
size_lrmat_cross=3*THREADS_STREAM*data_host.nsteps; // 3 vertices per jump (is this the maximum?)
max_per_jump_lrmat=3;
}else{
//size_lrmat_cross=THREADS_STREAM*data_host.nsteps;
//max_per_jump_lrmat=1;
size_lrmat_cross=3*THREADS_STREAM*data_host.nsteps;
max_per_jump_lrmat=3;
}
checkCuda(hipHostMalloc((void**)lrmat_crossed_host,size_lrmat_cross*sizeof(float3)));
checkCuda(hipHostMalloc((void**)lrmat_numcrossed_host,THREADS_STREAM*sizeof(int)));
}
}
void allocate_gpu_mem(tractographyData& data_host,
int& MAX_SLs,
int THREADS_STREAM,
// Output
float** mprob_gpu,
float** mprob2_gpu,
float** mlocaldir_gpu,
int** beenhere_gpu,
float** ConNet_gpu,
float** ConNetb_gpu,
bool& net_flags_in_shared,
float** net_flags_gpu,
float** net_values_gpu,
float** s2targets_gpu,
float** s2targetsb_gpu,
bool& targ_flags_in_shared,
float** targ_flags_gpu,
float** paths_gpu,
int** lengths_gpu,
// Loopcheck
int** loopcheckkeys_gpu,
float3** loopcheckdirs_gpu,
// Matrix
float3** mat_crossed_gpu,
int** mat_numcrossed_gpu,
int size_mat_cross,
float3** lrmat_crossed_gpu,
int** lrmat_numcrossed_gpu,
int size_lrmat_cross)
{
probtrackxOptions& opts =probtrackxOptions::getInstance();
int nsteps=opts.nsteps.value();
// coordinate visited
long long nbytes;
if(!opts.save_paths.value()){
// only for threads in a STREAM (can discard the coordinates of finished streamlines)
nbytes=THREADS_STREAM*data_host.nsteps;
nbytes*=3;
nbytes*=sizeof(float);
checkCuda(hipMalloc((void**)paths_gpu,nbytes));
}else{
// for all the streamlines allocated
nbytes=MAX_SLs*data_host.nsteps;
nbytes*=3;
nbytes*=sizeof(float);
checkCuda(hipMalloc((void**)paths_gpu,nbytes));
}
// path lenghts
checkCuda(hipMalloc((void**)lengths_gpu,MAX_SLs*2*sizeof(int)));
// Map probabilities
if(opts.simpleout.value()){
checkCuda(hipMalloc((void**)mprob_gpu,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemset(*mprob_gpu,0,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
// beenhere: to avoid 2 updates in same voxel
long long size_beenhere = THREADS_STREAM;
size_beenhere*=data_host.nsteps;
checkCuda(hipMalloc((void**)beenhere_gpu,size_beenhere*sizeof(int)));
}
if(opts.omeanpathlength.value()&&opts.simpleout.value()){
checkCuda(hipMalloc((void**)mprob2_gpu,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemset(*mprob2_gpu,0,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
}
// Map with average local tract orientations
if(opts.opathdir.value()){
checkCuda(hipMalloc((void**)mlocaldir_gpu,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*6*sizeof(float)));
checkCuda(hipMemset(*mlocaldir_gpu,0,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*6*sizeof(float)));
}
if(opts.network.value()){
// Network Matrix
int nROIS=data_host.network.NVols+data_host.network.NSurfs;
checkCuda(hipMalloc((void**)ConNet_gpu,nROIS*nROIS*sizeof(float)));
checkCuda(hipMemset(*ConNet_gpu,0,nROIS*nROIS*sizeof(float)));
if(opts.omeanpathlength.value()){
checkCuda(hipMalloc((void**)ConNetb_gpu,nROIS*nROIS*sizeof(float)));
checkCuda(hipMemset(*ConNetb_gpu,0,nROIS*nROIS*sizeof(float)));
}
// int maxNetsInShared= (24576-(6*THREADS_BLOCK)*sizeof(float))/(THREADS_BLOCK*2*sizeof(float));
// [24KBytes (out of 48KB) 6 floats already allocated (coordinates) 2arrays (values & flags)
// set to 8 in options.h
if(nROIS>maxNetsInShared){
net_flags_in_shared=false;
// Flags for each thread to check if visited
checkCuda(hipMalloc((void**)net_flags_gpu,THREADS_STREAM*nROIS*sizeof(float)));
checkCuda(hipMalloc((void**)net_values_gpu,THREADS_STREAM*nROIS*sizeof(float)));
checkCuda(hipMemset(*net_flags_gpu,0,THREADS_STREAM*nROIS*sizeof(float)));
checkCuda(hipMemset(*net_values_gpu,0,THREADS_STREAM*nROIS*sizeof(float)));
}else{
net_flags_in_shared=true;
}
}
// Seed to targets: this is for s2astext
if(opts.s2tout.value()){
int nROIS=data_host.targets.NVols+data_host.targets.NSurfs;
long total_s2targets=data_host.nseeds*nROIS;
checkCuda(hipMalloc((void**)s2targets_gpu,total_s2targets*sizeof(float)));
checkCuda(hipMemset(*s2targets_gpu,0,total_s2targets*sizeof(float)));
if(opts.omeanpathlength.value()){
checkCuda(hipMalloc((void**)s2targetsb_gpu,total_s2targets*sizeof(float)));
checkCuda(hipMemset(*s2targetsb_gpu,0,total_s2targets*sizeof(float)));
}
if(nROIS>maxTargsInShared){
targ_flags_in_shared=false;
// Flags for each thread to check if visited
checkCuda(hipMalloc((void**)targ_flags_gpu,THREADS_STREAM*nROIS*sizeof(float)));
checkCuda(hipMemset(*targ_flags_gpu,0,THREADS_STREAM*nROIS*sizeof(float)));
}else{
targ_flags_in_shared=true;
}
}
if(opts.loopcheck.value()){
checkCuda(hipMalloc((void**)loopcheckkeys_gpu,(THREADS_STREAM*nsteps/5)*sizeof(int)));
checkCuda(hipMalloc((void**)loopcheckdirs_gpu,(THREADS_STREAM*nsteps/5)*sizeof(float3)));
}
// Connectivity Matrices
if(opts.matrix3out.value()){
checkCuda(hipMalloc((void**)mat_crossed_gpu,NSTREAMS*size_mat_cross*sizeof(float3)));
checkCuda(hipMalloc((void**)mat_numcrossed_gpu,MAX_SLs*sizeof(int)));
if(opts.lrmask3.value()!=""){
checkCuda(hipMalloc((void**)lrmat_crossed_gpu,NSTREAMS*size_lrmat_cross*sizeof(float3)));
checkCuda(hipMalloc((void**)lrmat_numcrossed_gpu,MAX_SLs*sizeof(int)));
}
}else if(opts.matrix1out.value()||opts.matrix2out.value()){
checkCuda(hipMalloc((void**)lrmat_crossed_gpu,NSTREAMS*size_lrmat_cross*sizeof(float3)));
checkCuda(hipMalloc((void**)lrmat_numcrossed_gpu,MAX_SLs*sizeof(int)));
}
}
void copy_ToConstantMemory(tractographyData& data_host)
{
probtrackxOptions& opts=probtrackxOptions::getInstance();
checkCuda(hipMemcpyToSymbol(C_vox2mm,data_host.vox2mm,12*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_steplength,&(data_host.steplength),sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_distthresh,&(data_host.distthresh),sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_curv_thr,&(data_host.curv_thr),sizeof(float)));
//hipMemcpyToSymbol(C_fibthresh,&(data_host.fibthresh),sizeof(float));
checkCuda(hipMemcpyToSymbol(C_Sdims,data_host.Sdims,3*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_Ddims,data_host.Ddims,3*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_Wsampling_S2D_I,data_host.Wsampling_S2D_I,3*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_Wsampling_D2S_I,data_host.Wsampling_D2S_I,3*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_SsamplingI,data_host.SsamplingI,3*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_DsamplingI,data_host.DsamplingI,3*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_Seeds_to_DTI,data_host.Seeds_to_DTI,12*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_DTI_to_Seeds,data_host.DTI_to_Seeds,12*sizeof(float)));
//checkCuda(hipMemcpyToSymbol(C_Seeds_to_M2,data_host.Seeds_to_M2,12*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_Ssizes,data_host.Ssizes,3*sizeof(int)));
checkCuda(hipMemcpyToSymbol(C_Dsizes,data_host.Dsizes,3*sizeof(int)));
//checkCuda(hipMemcpyToSymbol(C_M2sizes,data_host.M2sizes,3*sizeof(int)));
checkCuda(hipMemcpyToSymbol(C_Warp_S2D_sizes,data_host.Warp_S2D_sizes,3*sizeof(int)));
checkCuda(hipMemcpyToSymbol(C_Warp_D2S_sizes,data_host.Warp_D2S_sizes,3*sizeof(int)));
if(data_host.lrmatrix1.NVols){
if(opts.matrix2out.value()){
checkCuda(hipMemcpyToSymbol(C_Seeds_to_M2,data_host.Seeds_to_M2,12*sizeof(float)));
checkCuda(hipMemcpyToSymbol(C_M2sizes,data_host.M2sizes,3*sizeof(int)));
}
}
}
void copy_ToTextureMemory( tractographyData& data_host)
{
probtrackxOptions& opts=probtrackxOptions::getInstance();
hipArray *d_volumeArray1,*d_volumeArray2,*d_volumeArray3;
hipArray *d_volumeArray4,*d_volumeArray5,*d_volumeArray6;
if(opts.seeds_to_dti.value()!="" && fsl_imageexists(opts.seeds_to_dti.value())){
long size_warp=data_host.Warp_S2D_sizes[0]*data_host.Warp_S2D_sizes[1]*data_host.Warp_S2D_sizes[2];
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat);
const hipExtent volumeSize= make_hipExtent(data_host.Warp_S2D_sizes[0],data_host.Warp_S2D_sizes[1],data_host.Warp_S2D_sizes[2]);
checkCuda(hipMalloc3DArray(&d_volumeArray1,&channelDesc,volumeSize));
checkCuda(hipMalloc3DArray(&d_volumeArray2,&channelDesc,volumeSize));
checkCuda(hipMalloc3DArray(&d_volumeArray3,&channelDesc,volumeSize));
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr((void*)data_host.SeedDTIwarp, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray1;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
checkCuda(hipMemcpy3D(©Params));
// default addressMode clamp
// T_SeedDTIwarp1.filterMode=hipFilterModeLinear;
// trilinear interpolation....not good precision
checkCuda(hipBindTextureToArray(T_SeedDTIwarp1,d_volumeArray1,channelDesc));
copyParams.srcPtr = make_hipPitchedPtr((void*)&data_host.SeedDTIwarp[size_warp], volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray2;
checkCuda(hipMemcpy3D(©Params));
checkCuda(hipBindTextureToArray(T_SeedDTIwarp2,d_volumeArray2,channelDesc));
copyParams.srcPtr = make_hipPitchedPtr((void*)&data_host.SeedDTIwarp[2*size_warp], volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray3;
checkCuda(hipMemcpy3D(©Params));
checkCuda(hipBindTextureToArray(T_SeedDTIwarp3,d_volumeArray3,channelDesc));
}
if(opts.dti_to_seeds.value()!="" && fsl_imageexists(opts.dti_to_seeds.value())){
long size_warp=data_host.Warp_D2S_sizes[0]*data_host.Warp_D2S_sizes[1]*data_host.Warp_D2S_sizes[2];
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat);
const hipExtent volumeSize2= make_hipExtent(data_host.Warp_D2S_sizes[0],data_host.Warp_D2S_sizes[1],data_host.Warp_D2S_sizes[2]);
checkCuda(hipMalloc3DArray(&d_volumeArray4,&channelDesc,volumeSize2));
checkCuda(hipMalloc3DArray(&d_volumeArray5,&channelDesc,volumeSize2));
checkCuda(hipMalloc3DArray(&d_volumeArray6,&channelDesc,volumeSize2));
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr((void*)data_host.DTISeedwarp, volumeSize2.width*sizeof(float), volumeSize2.width, volumeSize2.height);
copyParams.dstArray = d_volumeArray4;
copyParams.extent = volumeSize2;
checkCuda(hipMemcpy3D(©Params));
checkCuda(hipBindTextureToArray(T_DTISeedwarp1,d_volumeArray4,channelDesc));
copyParams.srcPtr = make_hipPitchedPtr((void*)&data_host.DTISeedwarp[size_warp], volumeSize2.width*sizeof(float), volumeSize2.width, volumeSize2.height);
copyParams.dstArray = d_volumeArray5;
checkCuda(hipMemcpy3D(©Params));
checkCuda(hipBindTextureToArray(T_DTISeedwarp2,d_volumeArray5,channelDesc));
copyParams.srcPtr = make_hipPitchedPtr((void*)&data_host.DTISeedwarp[2*size_warp], volumeSize2.width*sizeof(float), volumeSize2.width, volumeSize2.height);
copyParams.dstArray = d_volumeArray6;
checkCuda(hipMemcpy3D(©Params));
checkCuda(hipBindTextureToArray(T_DTISeedwarp3,d_volumeArray6,channelDesc));
}
}
void copy_to_gpu( tractographyData& data_host,
tractographyData*& data_gpu)
{
probtrackxOptions& opts =probtrackxOptions::getInstance();
checkCuda(hipMalloc((void**)&data_gpu,sizeof(tractographyData)));
checkCuda(hipMemcpy(data_gpu,&data_host,sizeof(tractographyData),hipMemcpyHostToDevice));
int* auxI;
float* auxF;
// sizes and dims .... now in Constant memory
// seeds
checkCuda(hipMalloc((void**)&auxF,data_host.nseeds*3*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.seeds,data_host.nseeds*3*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->seeds,&auxF,sizeof(float*),hipMemcpyHostToDevice));
if(opts.network.value()){
checkCuda(hipMalloc((void**)&auxF,data_host.nseeds*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.seeds_ROI,data_host.nseeds*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->seeds_ROI,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
// mask
checkCuda(hipMalloc((void**)&auxF,data_host.Dsizes[0]*data_host.Dsizes[1]*data_host.Dsizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.mask,data_host.Dsizes[0]*data_host.Dsizes[1]*data_host.Dsizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->mask,&auxF,sizeof(float*),hipMemcpyHostToDevice));
// th_samples
checkCuda(hipMalloc((void**)&auxF,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.thsamples,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->thsamples,&auxF,sizeof(float*),hipMemcpyHostToDevice));
// ph_samples
checkCuda(hipMalloc((void**)&auxF,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.phsamples,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->phsamples,&auxF,sizeof(float*),hipMemcpyHostToDevice));
// f_samples
checkCuda(hipMalloc((void**)&auxF,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.fsamples,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->fsamples,&auxF,sizeof(float*),hipMemcpyHostToDevice));
// lut_vol2mat
checkCuda(hipMalloc((void**)&auxI,data_host.Dsizes[0]*data_host.Dsizes[1]*data_host.Dsizes[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lut_vol2mat,data_host.Dsizes[0]*data_host.Dsizes[1]*data_host.Dsizes[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lut_vol2mat,&auxI,sizeof(int*),hipMemcpyHostToDevice));
//Seeds_to_DTI...... now in Constant memory
//DTI_to_Seeds...... now in Constant memory
//VOX2MM...... now in Constant memory
//NON-LINEAR ...... now in Constant memory and Texture Memory
//Warp sizes.... now in constant memory
//Sampling Inverse.... now in constant memory
//Avoid mask
if(data_host.avoid.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.avoid.volume,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->avoid.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.avoid.NSurfs){
//hipMalloc((void**)&auxI,data_host.avoid.sizesStr[0]*sizeof(int));
//hipMemcpy(auxI,data_host.avoid.locs,data_host.avoid.sizesStr[0]*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(&data_gpu->avoid.locs,&auxI,sizeof(int*),hipMemcpyHostToDevice);
// no deed locs
checkCuda(hipMalloc((void**)&auxF,data_host.avoid.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.avoid.vertices,data_host.avoid.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->avoid.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.avoid.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.avoid.faces,data_host.avoid.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->avoid.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.avoid.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.avoid.VoxFaces,data_host.avoid.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->avoid.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.avoid.VoxFacesIndex,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->avoid.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
// Stop mask
if(data_host.stop.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.stop.volume,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->stop.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.stop.NSurfs){
//hipMalloc((void**)&auxI,data_host.stop.sizesStr[0]*sizeof(int));
//hipMemcpy(auxI,data_host.stop.locs,data_host.stop.sizesStr[0]*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(&data_gpu->stop.locs,&auxI,sizeof(int*),hipMemcpyHostToDevice);
// no need locs
checkCuda(hipMalloc((void**)&auxF,data_host.stop.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.stop.vertices,data_host.stop.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->stop.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.stop.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.stop.faces,data_host.stop.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->stop.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.stop.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.stop.VoxFaces,data_host.stop.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->stop.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.stop.VoxFacesIndex,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->stop.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
// Wtstop mask
if(data_host.wtstop.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.wtstop.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.wtstop.volume,data_host.wtstop.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->wtstop.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.wtstop.NSurfs){
//hipMalloc((void**)&auxI,data_host.wtstop.sizesStr[0]*sizeof(int));
//hipMemcpy(auxI,data_host.wtstop.locs,data_host.wtstop.sizesStr[0]*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(&data_gpu->wtstop.locs,&auxI,sizeof(int*),hipMemcpyHostToDevice);
// no need locs
checkCuda(hipMalloc((void**)&auxF,data_host.wtstop.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.wtstop.vertices,data_host.wtstop.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->wtstop.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.wtstop.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.wtstop.faces,data_host.wtstop.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->wtstop.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.wtstop.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.wtstop.VoxFaces,data_host.wtstop.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->wtstop.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.wtstop.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.wtstop.VoxFacesIndex,(data_host.wtstop.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->wtstop.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
// Network mask
if(data_host.network.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.network.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.network.volume,data_host.network.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->network.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.network.NSurfs){
//hipMalloc((void**)&auxI,data_host.network.sizesStr[0]*sizeof(int));
//hipMemcpy(auxI,data_host.network.locs,data_host.network.sizesStr[0]*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(&data_gpu->network.locs,&auxI,sizeof(int*),hipMemcpyHostToDevice);
// no locs
checkCuda(hipMalloc((void**)&auxF,data_host.network.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.network.vertices,data_host.network.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->network.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.network.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.network.faces,data_host.network.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->network.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.network.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.network.VoxFaces,data_host.network.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->network.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.network.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.network.VoxFacesIndex,(data_host.network.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->network.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
if(data_host.network.NVols||data_host.network.NSurfs){
int totalrois=data_host.network.NVols+data_host.network.NSurfs;
checkCuda(hipMalloc((void**)&auxI,totalrois*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.network.IndexRoi,totalrois*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->network.IndexRoi,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
// Reference Network mask
if(data_host.networkREF.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.networkREF.volume,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->networkREF.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.networkREF.NSurfs){
checkCuda(hipMalloc((void**)&auxF,data_host.networkREF.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.networkREF.vertices,data_host.networkREF.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->networkREF.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.networkREF.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.networkREF.faces,data_host.networkREF.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->networkREF.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.networkREF.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.networkREF.VoxFaces,data_host.networkREF.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->networkREF.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.networkREF.VoxFacesIndex,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->networkREF.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
// Waypoints mask
if(data_host.waypoint.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.waypoint.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.waypoint.volume,data_host.waypoint.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->waypoint.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.waypoint.NSurfs){
//hipMalloc((void**)&auxI,data_host.waypoint.sizesStr[0]*sizeof(int));
//hipMemcpy(auxI,data_host.waypoint.locs,data_host.waypoint.sizesStr[0]*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(&data_gpu->waypoint.locs,&auxI,sizeof(int*),hipMemcpyHostToDevice);
checkCuda(hipMalloc((void**)&auxF,data_host.waypoint.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.waypoint.vertices,data_host.waypoint.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->waypoint.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.waypoint.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.waypoint.faces,data_host.waypoint.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->waypoint.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.waypoint.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.waypoint.VoxFaces,data_host.waypoint.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->waypoint.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.waypoint.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.waypoint.VoxFacesIndex,(data_host.waypoint.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->waypoint.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
if(data_host.waypoint.NVols||data_host.waypoint.NSurfs){
int totalrois=data_host.waypoint.NVols+data_host.waypoint.NSurfs;
checkCuda(hipMalloc((void**)&auxI,totalrois*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.waypoint.IndexRoi,totalrois*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->waypoint.IndexRoi,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
// Target mask
if(data_host.targets.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.targets.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.targets.volume,data_host.targets.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targets.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.targets.NSurfs){
checkCuda(hipMalloc((void**)&auxF,data_host.targets.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.targets.vertices,data_host.targets.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targets.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.targets.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.targets.faces,data_host.targets.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targets.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.targets.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.targets.VoxFaces,data_host.targets.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targets.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.targets.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.targets.VoxFacesIndex,(data_host.targets.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targets.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
if(data_host.targets.NVols||data_host.targets.NSurfs){
int totalrois=data_host.targets.NVols+data_host.targets.NSurfs;
checkCuda(hipMalloc((void**)&auxI,totalrois*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.targets.IndexRoi,totalrois*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targets.IndexRoi,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
// Reference Targets mask
if(data_host.targetsREF.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.targetsREF.volume,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targetsREF.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.targetsREF.NSurfs){
checkCuda(hipMalloc((void**)&auxF,data_host.targetsREF.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.targetsREF.vertices,data_host.targetsREF.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targetsREF.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.targetsREF.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.targetsREF.faces,data_host.targetsREF.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targetsREF.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.targetsREF.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.targetsREF.VoxFaces,data_host.targetsREF.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targetsREF.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.targetsREF.VoxFacesIndex,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->targetsREF.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
}
// Matrix 1
// LRMatrix 1
if(data_host.lrmatrix1.NVols){
if(opts.matrix2out.value()){
checkCuda(hipMalloc((void**)&auxF,data_host.lrmatrix1.NVols*data_host.M2sizes[0]*data_host.M2sizes[1]*data_host.M2sizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.lrmatrix1.volume,data_host.lrmatrix1.NVols*data_host.M2sizes[0]*data_host.M2sizes[1]*data_host.M2sizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix1.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}else{
checkCuda(hipMalloc((void**)&auxF,data_host.lrmatrix1.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.lrmatrix1.volume,data_host.lrmatrix1.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix1.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
}
if(data_host.lrmatrix1.NSurfs){
checkCuda(hipMalloc((void**)&auxI,data_host.lrmatrix1.sizesStr[0]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lrmatrix1.locs,data_host.lrmatrix1.sizesStr[0]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix1.locs,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxF,data_host.lrmatrix1.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.lrmatrix1.vertices,data_host.lrmatrix1.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix1.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.lrmatrix1.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lrmatrix1.faces,data_host.lrmatrix1.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix1.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.lrmatrix1.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lrmatrix1.VoxFaces,data_host.lrmatrix1.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix1.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.lrmatrix1.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lrmatrix1.VoxFacesIndex,(data_host.lrmatrix1.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix1.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
//hipMalloc((void**)&auxI,data_host.lrmatrix1.sizesStr[4]*sizeof(int));
//hipMemcpy(auxI,data_host.lrmatrix1.IndexRoi,data_host.lrmatrix1.sizesStr[4]*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(&data_gpu->lrmatrix1.IndexRoi,&auxI,sizeof(int*),hipMemcpyHostToDevice);
}
// Matrix 3
if(data_host.matrix3.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.matrix3.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.matrix3.volume,data_host.matrix3.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->matrix3.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.matrix3.NSurfs){
checkCuda(hipMalloc((void**)&auxI,data_host.matrix3.sizesStr[0]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.matrix3.locs,data_host.matrix3.sizesStr[0]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->matrix3.locs,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxF,data_host.matrix3.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.matrix3.vertices,data_host.matrix3.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->matrix3.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.matrix3.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.matrix3.faces,data_host.matrix3.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->matrix3.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.matrix3.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.matrix3.VoxFaces,data_host.matrix3.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->matrix3.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.matrix3.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.matrix3.VoxFacesIndex,(data_host.matrix3.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->matrix3.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
//hipMalloc((void**)&auxI,data_host.matrix3.sizesStr[4]*sizeof(int));
//hipMemcpy(auxI,data_host.matrix3.IndexRoi,data_host.matrix3.sizesStr[4]*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(&data_gpu->matrix3.IndexRoi,&auxI,sizeof(int*),hipMemcpyHostToDevice);
}
// LRMatrix 3
if(data_host.lrmatrix3.NVols){
checkCuda(hipMalloc((void**)&auxF,data_host.lrmatrix3.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.lrmatrix3.volume,data_host.lrmatrix3.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix3.volume,&auxF,sizeof(float*),hipMemcpyHostToDevice));
}
if(data_host.lrmatrix3.NSurfs){
checkCuda(hipMalloc((void**)&auxI,data_host.lrmatrix3.sizesStr[0]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lrmatrix3.locs,data_host.lrmatrix3.sizesStr[0]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix3.locs,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxF,data_host.lrmatrix3.sizesStr[1]*sizeof(float)));
checkCuda(hipMemcpy(auxF,data_host.lrmatrix3.vertices,data_host.lrmatrix3.sizesStr[1]*sizeof(float),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix3.vertices,&auxF,sizeof(float*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.lrmatrix3.sizesStr[2]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lrmatrix3.faces,data_host.lrmatrix3.sizesStr[2]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix3.faces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,data_host.lrmatrix3.sizesStr[3]*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lrmatrix3.VoxFaces,data_host.lrmatrix3.sizesStr[3]*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix3.VoxFaces,&auxI,sizeof(int*),hipMemcpyHostToDevice));
checkCuda(hipMalloc((void**)&auxI,(data_host.lrmatrix3.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(hipMemcpy(auxI,data_host.lrmatrix3.VoxFacesIndex,(data_host.lrmatrix3.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(&data_gpu->lrmatrix3.VoxFacesIndex,&auxI,sizeof(int*),hipMemcpyHostToDevice));
//hipMalloc((void**)&auxI,data_host.lrmatrix3.sizesStr[4]*sizeof(int));
//hipMemcpy(auxI,data_host.lrmatrix3.IndexRoi,data_host.lrmatrix3.sizesStr[4]*sizeof(int),hipMemcpyHostToDevice);
//hipMemcpy(&data_gpu->lrmatrix3.IndexRoi,&auxI,sizeof(int*),hipMemcpyHostToDevice);
}
}
| e972f56b6dae9e346b429dd2c6c9379d9e1c6ad4.cu | /* memManager_gpu.cu
Moises Hernandez-Fernandez - FMRIB Image Analysis Group
Copyright (C) 2015 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
[email protected]
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 6.0 (c) 2018, The University of
Oxford (the "Software")
The Software remains the property of the Oxford University Innovation
("the University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Oxford
University Innovation ("OUI"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
[email protected] quoting Reference Project 9564, FSL.*/
cudaError_t checkCuda(cudaError_t result){
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
exit(1);
}
return result;
}
void init_gpu(){
int *q;
checkCuda(cudaMalloc((void **)&q, sizeof(int)));
checkCuda(cudaFree(q));
int device;
checkCuda(cudaGetDevice(&device));
printf ("\n...................Allocated GPU %d...................\n", device);
checkCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
}
void allocate_host_mem(
// Input
tractographyData& data_host,
int& MAX_SLs, // MAX streamlines - calculated here
int& THREADS_STREAM, // calculated here
// Input - Output allocated mem
int** lengths_host,
float** paths_host,
float** mprob_host,
float** mprob2_host,
float** mlocaldir_host,
//float** targvalues_host,
//float** targvaluesB_host,
float3** mat_crossed_host,
int** mat_numcrossed_host,
long long& size_mat_cross,
int& max_per_jump_mat,
float3** lrmat_crossed_host,
int** lrmat_numcrossed_host,
long long& size_lrmat_cross,
int& max_per_jump_lrmat)
{
probtrackxOptions& opts=probtrackxOptions::getInstance();
// calculate the maximum number of streamlines that can be executed in parallel
size_t free,total;
cuMemGetInfo(&free,&total); // in bytes
int bytes_per_sl_STREAM=0; // needed for each STREAM (twice)
int bytes_per_sl_COMMON=0; // needed in common to all STREAMS
if(!opts.save_paths.value()){
// only for threads in a STREAM (can discard the coordinates of finished streamlines)
bytes_per_sl_COMMON+= data_host.nsteps*3*sizeof(float); // paths_gpu (3 floats per step - MAX Nsteps)
}else{
// for all the streamlines allocated
bytes_per_sl_STREAM+= data_host.nsteps*3*sizeof(float); // paths_gpu (3 floats per step - MAX Nsteps
}
bytes_per_sl_STREAM+= 2*sizeof(int); // lengths_gpu (2 directions)
bytes_per_sl_STREAM+= sizeof(curandState); // random seed
if(opts.simpleout.value()){
free=free-data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float); // m_prob
bytes_per_sl_COMMON+= (data_host.nsteps)*sizeof(int); // beenhere
}
if(opts.omeanpathlength.value()&opts.simpleout.value()){
free=free-data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float); // m_prob2
}
if(opts.opathdir.value()){
free=free-data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*6*sizeof(float); // m_localdir
}
if(opts.network.value()){
int nROIS=data_host.network.NVols+data_host.network.NSurfs;
free=free-(nROIS*nROIS*sizeof(float)); //ConNet
if(opts.omeanpathlength.value()){
free=free-(nROIS*nROIS*sizeof(float)); //ConNetb
}
if(nROIS>maxNetsInShared){
// Cannot use Shared Memory if too many ROIS, need Global memory for flags
bytes_per_sl_COMMON+=(2*nROIS)*sizeof(float);
}
}
if(opts.s2tout.value()){
int nROIS=data_host.targets.NVols+data_host.targets.NSurfs;
long total_s2targets=data_host.nseeds*nROIS;
free=free-(total_s2targets*sizeof(float)); //matrix with results
if(opts.omeanpathlength.value()){
free=free-(total_s2targets*sizeof(float)); //s2targetsb
}
if(nROIS>maxTargsInShared){
// Cannot use Shared Memory if too many ROIS, need Global memory for flags
bytes_per_sl_COMMON+=(nROIS)*sizeof(float);
}
}
if(opts.loopcheck.value()){
bytes_per_sl_COMMON+= (data_host.nsteps/5)*sizeof(int); // loopcheckkeys_gpu
bytes_per_sl_COMMON+= (data_host.nsteps/5)*sizeof(float3); // loopcheckdirs_gpu
}
if(opts.matrix3out.value()){
bytes_per_sl_STREAM+= 3*data_host.nsteps*sizeof(float3); // mat_crossed_gpu
//max is 3 by num_steps ... but it will never happens
bytes_per_sl_STREAM+= sizeof(int); // mat_numcrossed_gpu
if(opts.lrmask3.value()!=""){
bytes_per_sl_STREAM+= 3*data_host.nsteps*sizeof(float3); // lrmat_crossed_gpu
//3-> ... never will happens
bytes_per_sl_STREAM+= sizeof(int); // lrmat_numcrossed_gpu
}
}else if(opts.matrix1out.value()||opts.matrix2out.value()){
bytes_per_sl_STREAM+= 3*data_host.nsteps*sizeof(float3); // lrmat_crossed_gpu
//3
bytes_per_sl_STREAM+= sizeof(int); // lrmat_numcrossed_gpu
}
free=free*FREEPERCENTAGE; // 80% defined in options.h
MAX_SLs=free/(bytes_per_sl_STREAM+(bytes_per_sl_COMMON/NSTREAMS));
if(MAX_SLs%2) MAX_SLs++;
unsigned long long totalSLs = (unsigned long long)data_host.nseeds*data_host.nparticles;
if(totalSLs<MAX_SLs){
MAX_SLs=totalSLs;
}
printf("Running %i streamlines in parallel using 2 STREAMS\n",MAX_SLs);
THREADS_STREAM=MAX_SLs/NSTREAMS; // paths_gpu just need to be a single structure if not save_paths (take a look !!)
// Allocate in HOST
checkCuda(cudaMallocHost((void**)lengths_host,2*THREADS_STREAM*sizeof(float))); // 2 paths per sample
if(opts.save_paths.value()){ // if not.. discard it when finished streamline
checkCuda(cudaMallocHost((void**)paths_host,THREADS_STREAM*data_host.nsteps*3*sizeof(float)));
}
if(opts.simpleout.value())
checkCuda(cudaMallocHost((void**)mprob_host,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
if(opts.omeanpathlength.value()&opts.simpleout.value())
checkCuda(cudaMallocHost((void**)mprob2_host,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
if(opts.opathdir.value())
checkCuda(cudaMallocHost((void**)mlocaldir_host,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*6*sizeof(float)));
if(opts.matrix3out.value()){
// If volumes overlap, it is possible to have more than 1 crossing voxel per jump
if(data_host.matrix3.NSurfs){
size_mat_cross=3*THREADS_STREAM*data_host.nsteps; // 3 vertices per jump (is this the maximum?)
max_per_jump_mat=3;
}else{
//size_mat_cross=THREADS_STREAM*data_host.nsteps;
//max_per_jump_mat=1;
size_mat_cross=3*THREADS_STREAM*data_host.nsteps;
max_per_jump_mat=3;
}
checkCuda(cudaMallocHost((void**)mat_crossed_host,size_mat_cross*sizeof(float3)));
checkCuda(cudaMallocHost((void**)mat_numcrossed_host,THREADS_STREAM*sizeof(int)));
if(opts.lrmask3.value()!=""){
if(data_host.matrix3.NSurfs){
size_lrmat_cross=3*THREADS_STREAM*data_host.nsteps; // 3 vertices per jump (is this the maximum?)
max_per_jump_lrmat=3;
}else{
//size_lrmat_cross=THREADS_STREAM*data_host.nsteps;
//max_per_jump_lrmat=1;
size_lrmat_cross=3*THREADS_STREAM*data_host.nsteps;
max_per_jump_lrmat=3;
}
checkCuda(cudaMallocHost((void**)lrmat_crossed_host,size_lrmat_cross*sizeof(float3)));
checkCuda(cudaMallocHost((void**)lrmat_numcrossed_host,THREADS_STREAM*sizeof(int)));
}
}else if(opts.matrix1out.value()||opts.matrix2out.value()){
if(data_host.lrmatrix1.NSurfs){
size_lrmat_cross=3*THREADS_STREAM*data_host.nsteps; // 3 vertices per jump (is this the maximum?)
max_per_jump_lrmat=3;
}else{
//size_lrmat_cross=THREADS_STREAM*data_host.nsteps;
//max_per_jump_lrmat=1;
size_lrmat_cross=3*THREADS_STREAM*data_host.nsteps;
max_per_jump_lrmat=3;
}
checkCuda(cudaMallocHost((void**)lrmat_crossed_host,size_lrmat_cross*sizeof(float3)));
checkCuda(cudaMallocHost((void**)lrmat_numcrossed_host,THREADS_STREAM*sizeof(int)));
}
}
void allocate_gpu_mem(tractographyData& data_host,
int& MAX_SLs,
int THREADS_STREAM,
// Output
float** mprob_gpu,
float** mprob2_gpu,
float** mlocaldir_gpu,
int** beenhere_gpu,
float** ConNet_gpu,
float** ConNetb_gpu,
bool& net_flags_in_shared,
float** net_flags_gpu,
float** net_values_gpu,
float** s2targets_gpu,
float** s2targetsb_gpu,
bool& targ_flags_in_shared,
float** targ_flags_gpu,
float** paths_gpu,
int** lengths_gpu,
// Loopcheck
int** loopcheckkeys_gpu,
float3** loopcheckdirs_gpu,
// Matrix
float3** mat_crossed_gpu,
int** mat_numcrossed_gpu,
int size_mat_cross,
float3** lrmat_crossed_gpu,
int** lrmat_numcrossed_gpu,
int size_lrmat_cross)
{
probtrackxOptions& opts =probtrackxOptions::getInstance();
int nsteps=opts.nsteps.value();
// coordinate visited
long long nbytes;
if(!opts.save_paths.value()){
// only for threads in a STREAM (can discard the coordinates of finished streamlines)
nbytes=THREADS_STREAM*data_host.nsteps;
nbytes*=3;
nbytes*=sizeof(float);
checkCuda(cudaMalloc((void**)paths_gpu,nbytes));
}else{
// for all the streamlines allocated
nbytes=MAX_SLs*data_host.nsteps;
nbytes*=3;
nbytes*=sizeof(float);
checkCuda(cudaMalloc((void**)paths_gpu,nbytes));
}
// path lenghts
checkCuda(cudaMalloc((void**)lengths_gpu,MAX_SLs*2*sizeof(int)));
// Map probabilities
if(opts.simpleout.value()){
checkCuda(cudaMalloc((void**)mprob_gpu,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemset(*mprob_gpu,0,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
// beenhere: to avoid 2 updates in same voxel
long long size_beenhere = THREADS_STREAM;
size_beenhere*=data_host.nsteps;
checkCuda(cudaMalloc((void**)beenhere_gpu,size_beenhere*sizeof(int)));
}
if(opts.omeanpathlength.value()&&opts.simpleout.value()){
checkCuda(cudaMalloc((void**)mprob2_gpu,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemset(*mprob2_gpu,0,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
}
// Map with average local tract orientations
if(opts.opathdir.value()){
checkCuda(cudaMalloc((void**)mlocaldir_gpu,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*6*sizeof(float)));
checkCuda(cudaMemset(*mlocaldir_gpu,0,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*6*sizeof(float)));
}
if(opts.network.value()){
// Network Matrix
int nROIS=data_host.network.NVols+data_host.network.NSurfs;
checkCuda(cudaMalloc((void**)ConNet_gpu,nROIS*nROIS*sizeof(float)));
checkCuda(cudaMemset(*ConNet_gpu,0,nROIS*nROIS*sizeof(float)));
if(opts.omeanpathlength.value()){
checkCuda(cudaMalloc((void**)ConNetb_gpu,nROIS*nROIS*sizeof(float)));
checkCuda(cudaMemset(*ConNetb_gpu,0,nROIS*nROIS*sizeof(float)));
}
// int maxNetsInShared= (24576-(6*THREADS_BLOCK)*sizeof(float))/(THREADS_BLOCK*2*sizeof(float));
// [24KBytes (out of 48KB) 6 floats already allocated (coordinates) 2arrays (values & flags)
// set to 8 in options.h
if(nROIS>maxNetsInShared){
net_flags_in_shared=false;
// Flags for each thread to check if visited
checkCuda(cudaMalloc((void**)net_flags_gpu,THREADS_STREAM*nROIS*sizeof(float)));
checkCuda(cudaMalloc((void**)net_values_gpu,THREADS_STREAM*nROIS*sizeof(float)));
checkCuda(cudaMemset(*net_flags_gpu,0,THREADS_STREAM*nROIS*sizeof(float)));
checkCuda(cudaMemset(*net_values_gpu,0,THREADS_STREAM*nROIS*sizeof(float)));
}else{
net_flags_in_shared=true;
}
}
// Seed to targets: this is for s2astext
if(opts.s2tout.value()){
int nROIS=data_host.targets.NVols+data_host.targets.NSurfs;
long total_s2targets=data_host.nseeds*nROIS;
checkCuda(cudaMalloc((void**)s2targets_gpu,total_s2targets*sizeof(float)));
checkCuda(cudaMemset(*s2targets_gpu,0,total_s2targets*sizeof(float)));
if(opts.omeanpathlength.value()){
checkCuda(cudaMalloc((void**)s2targetsb_gpu,total_s2targets*sizeof(float)));
checkCuda(cudaMemset(*s2targetsb_gpu,0,total_s2targets*sizeof(float)));
}
if(nROIS>maxTargsInShared){
targ_flags_in_shared=false;
// Flags for each thread to check if visited
checkCuda(cudaMalloc((void**)targ_flags_gpu,THREADS_STREAM*nROIS*sizeof(float)));
checkCuda(cudaMemset(*targ_flags_gpu,0,THREADS_STREAM*nROIS*sizeof(float)));
}else{
targ_flags_in_shared=true;
}
}
if(opts.loopcheck.value()){
checkCuda(cudaMalloc((void**)loopcheckkeys_gpu,(THREADS_STREAM*nsteps/5)*sizeof(int)));
checkCuda(cudaMalloc((void**)loopcheckdirs_gpu,(THREADS_STREAM*nsteps/5)*sizeof(float3)));
}
// Connectivity Matrices
if(opts.matrix3out.value()){
checkCuda(cudaMalloc((void**)mat_crossed_gpu,NSTREAMS*size_mat_cross*sizeof(float3)));
checkCuda(cudaMalloc((void**)mat_numcrossed_gpu,MAX_SLs*sizeof(int)));
if(opts.lrmask3.value()!=""){
checkCuda(cudaMalloc((void**)lrmat_crossed_gpu,NSTREAMS*size_lrmat_cross*sizeof(float3)));
checkCuda(cudaMalloc((void**)lrmat_numcrossed_gpu,MAX_SLs*sizeof(int)));
}
}else if(opts.matrix1out.value()||opts.matrix2out.value()){
checkCuda(cudaMalloc((void**)lrmat_crossed_gpu,NSTREAMS*size_lrmat_cross*sizeof(float3)));
checkCuda(cudaMalloc((void**)lrmat_numcrossed_gpu,MAX_SLs*sizeof(int)));
}
}
void copy_ToConstantMemory(tractographyData& data_host)
{
probtrackxOptions& opts=probtrackxOptions::getInstance();
checkCuda(cudaMemcpyToSymbol(C_vox2mm,data_host.vox2mm,12*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_steplength,&(data_host.steplength),sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_distthresh,&(data_host.distthresh),sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_curv_thr,&(data_host.curv_thr),sizeof(float)));
//cudaMemcpyToSymbol(C_fibthresh,&(data_host.fibthresh),sizeof(float));
checkCuda(cudaMemcpyToSymbol(C_Sdims,data_host.Sdims,3*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_Ddims,data_host.Ddims,3*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_Wsampling_S2D_I,data_host.Wsampling_S2D_I,3*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_Wsampling_D2S_I,data_host.Wsampling_D2S_I,3*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_SsamplingI,data_host.SsamplingI,3*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_DsamplingI,data_host.DsamplingI,3*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_Seeds_to_DTI,data_host.Seeds_to_DTI,12*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_DTI_to_Seeds,data_host.DTI_to_Seeds,12*sizeof(float)));
//checkCuda(cudaMemcpyToSymbol(C_Seeds_to_M2,data_host.Seeds_to_M2,12*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_Ssizes,data_host.Ssizes,3*sizeof(int)));
checkCuda(cudaMemcpyToSymbol(C_Dsizes,data_host.Dsizes,3*sizeof(int)));
//checkCuda(cudaMemcpyToSymbol(C_M2sizes,data_host.M2sizes,3*sizeof(int)));
checkCuda(cudaMemcpyToSymbol(C_Warp_S2D_sizes,data_host.Warp_S2D_sizes,3*sizeof(int)));
checkCuda(cudaMemcpyToSymbol(C_Warp_D2S_sizes,data_host.Warp_D2S_sizes,3*sizeof(int)));
if(data_host.lrmatrix1.NVols){
if(opts.matrix2out.value()){
checkCuda(cudaMemcpyToSymbol(C_Seeds_to_M2,data_host.Seeds_to_M2,12*sizeof(float)));
checkCuda(cudaMemcpyToSymbol(C_M2sizes,data_host.M2sizes,3*sizeof(int)));
}
}
}
void copy_ToTextureMemory( tractographyData& data_host)
{
probtrackxOptions& opts=probtrackxOptions::getInstance();
cudaArray *d_volumeArray1,*d_volumeArray2,*d_volumeArray3;
cudaArray *d_volumeArray4,*d_volumeArray5,*d_volumeArray6;
if(opts.seeds_to_dti.value()!="" && fsl_imageexists(opts.seeds_to_dti.value())){
long size_warp=data_host.Warp_S2D_sizes[0]*data_host.Warp_S2D_sizes[1]*data_host.Warp_S2D_sizes[2];
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
const cudaExtent volumeSize= make_cudaExtent(data_host.Warp_S2D_sizes[0],data_host.Warp_S2D_sizes[1],data_host.Warp_S2D_sizes[2]);
checkCuda(cudaMalloc3DArray(&d_volumeArray1,&channelDesc,volumeSize));
checkCuda(cudaMalloc3DArray(&d_volumeArray2,&channelDesc,volumeSize));
checkCuda(cudaMalloc3DArray(&d_volumeArray3,&channelDesc,volumeSize));
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr((void*)data_host.SeedDTIwarp, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray1;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
checkCuda(cudaMemcpy3D(©Params));
// default addressMode clamp
// T_SeedDTIwarp1.filterMode=cudaFilterModeLinear;
// trilinear interpolation....not good precision
checkCuda(cudaBindTextureToArray(T_SeedDTIwarp1,d_volumeArray1,channelDesc));
copyParams.srcPtr = make_cudaPitchedPtr((void*)&data_host.SeedDTIwarp[size_warp], volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray2;
checkCuda(cudaMemcpy3D(©Params));
checkCuda(cudaBindTextureToArray(T_SeedDTIwarp2,d_volumeArray2,channelDesc));
copyParams.srcPtr = make_cudaPitchedPtr((void*)&data_host.SeedDTIwarp[2*size_warp], volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray3;
checkCuda(cudaMemcpy3D(©Params));
checkCuda(cudaBindTextureToArray(T_SeedDTIwarp3,d_volumeArray3,channelDesc));
}
if(opts.dti_to_seeds.value()!="" && fsl_imageexists(opts.dti_to_seeds.value())){
long size_warp=data_host.Warp_D2S_sizes[0]*data_host.Warp_D2S_sizes[1]*data_host.Warp_D2S_sizes[2];
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
const cudaExtent volumeSize2= make_cudaExtent(data_host.Warp_D2S_sizes[0],data_host.Warp_D2S_sizes[1],data_host.Warp_D2S_sizes[2]);
checkCuda(cudaMalloc3DArray(&d_volumeArray4,&channelDesc,volumeSize2));
checkCuda(cudaMalloc3DArray(&d_volumeArray5,&channelDesc,volumeSize2));
checkCuda(cudaMalloc3DArray(&d_volumeArray6,&channelDesc,volumeSize2));
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr((void*)data_host.DTISeedwarp, volumeSize2.width*sizeof(float), volumeSize2.width, volumeSize2.height);
copyParams.dstArray = d_volumeArray4;
copyParams.extent = volumeSize2;
checkCuda(cudaMemcpy3D(©Params));
checkCuda(cudaBindTextureToArray(T_DTISeedwarp1,d_volumeArray4,channelDesc));
copyParams.srcPtr = make_cudaPitchedPtr((void*)&data_host.DTISeedwarp[size_warp], volumeSize2.width*sizeof(float), volumeSize2.width, volumeSize2.height);
copyParams.dstArray = d_volumeArray5;
checkCuda(cudaMemcpy3D(©Params));
checkCuda(cudaBindTextureToArray(T_DTISeedwarp2,d_volumeArray5,channelDesc));
copyParams.srcPtr = make_cudaPitchedPtr((void*)&data_host.DTISeedwarp[2*size_warp], volumeSize2.width*sizeof(float), volumeSize2.width, volumeSize2.height);
copyParams.dstArray = d_volumeArray6;
checkCuda(cudaMemcpy3D(©Params));
checkCuda(cudaBindTextureToArray(T_DTISeedwarp3,d_volumeArray6,channelDesc));
}
}
void copy_to_gpu( tractographyData& data_host,
tractographyData*& data_gpu)
{
probtrackxOptions& opts =probtrackxOptions::getInstance();
checkCuda(cudaMalloc((void**)&data_gpu,sizeof(tractographyData)));
checkCuda(cudaMemcpy(data_gpu,&data_host,sizeof(tractographyData),cudaMemcpyHostToDevice));
int* auxI;
float* auxF;
// sizes and dims .... now in Constant memory
// seeds
checkCuda(cudaMalloc((void**)&auxF,data_host.nseeds*3*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.seeds,data_host.nseeds*3*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->seeds,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
if(opts.network.value()){
checkCuda(cudaMalloc((void**)&auxF,data_host.nseeds*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.seeds_ROI,data_host.nseeds*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->seeds_ROI,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
// mask
checkCuda(cudaMalloc((void**)&auxF,data_host.Dsizes[0]*data_host.Dsizes[1]*data_host.Dsizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.mask,data_host.Dsizes[0]*data_host.Dsizes[1]*data_host.Dsizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->mask,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
// th_samples
checkCuda(cudaMalloc((void**)&auxF,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.thsamples,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->thsamples,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
// ph_samples
checkCuda(cudaMalloc((void**)&auxF,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.phsamples,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->phsamples,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
// f_samples
checkCuda(cudaMalloc((void**)&auxF,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.fsamples,data_host.nfibres*data_host.nsamples*data_host.nvoxels*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->fsamples,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
// lut_vol2mat
checkCuda(cudaMalloc((void**)&auxI,data_host.Dsizes[0]*data_host.Dsizes[1]*data_host.Dsizes[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lut_vol2mat,data_host.Dsizes[0]*data_host.Dsizes[1]*data_host.Dsizes[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lut_vol2mat,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
//Seeds_to_DTI...... now in Constant memory
//DTI_to_Seeds...... now in Constant memory
//VOX2MM...... now in Constant memory
//NON-LINEAR ...... now in Constant memory and Texture Memory
//Warp sizes.... now in constant memory
//Sampling Inverse.... now in constant memory
//Avoid mask
if(data_host.avoid.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.avoid.volume,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->avoid.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.avoid.NSurfs){
//cudaMalloc((void**)&auxI,data_host.avoid.sizesStr[0]*sizeof(int));
//cudaMemcpy(auxI,data_host.avoid.locs,data_host.avoid.sizesStr[0]*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(&data_gpu->avoid.locs,&auxI,sizeof(int*),cudaMemcpyHostToDevice);
// no deed locs
checkCuda(cudaMalloc((void**)&auxF,data_host.avoid.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.avoid.vertices,data_host.avoid.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->avoid.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.avoid.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.avoid.faces,data_host.avoid.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->avoid.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.avoid.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.avoid.VoxFaces,data_host.avoid.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->avoid.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.avoid.VoxFacesIndex,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->avoid.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
// Stop mask
if(data_host.stop.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.stop.volume,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->stop.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.stop.NSurfs){
//cudaMalloc((void**)&auxI,data_host.stop.sizesStr[0]*sizeof(int));
//cudaMemcpy(auxI,data_host.stop.locs,data_host.stop.sizesStr[0]*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(&data_gpu->stop.locs,&auxI,sizeof(int*),cudaMemcpyHostToDevice);
// no need locs
checkCuda(cudaMalloc((void**)&auxF,data_host.stop.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.stop.vertices,data_host.stop.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->stop.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.stop.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.stop.faces,data_host.stop.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->stop.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.stop.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.stop.VoxFaces,data_host.stop.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->stop.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.stop.VoxFacesIndex,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->stop.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
// Wtstop mask
if(data_host.wtstop.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.wtstop.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.wtstop.volume,data_host.wtstop.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->wtstop.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.wtstop.NSurfs){
//cudaMalloc((void**)&auxI,data_host.wtstop.sizesStr[0]*sizeof(int));
//cudaMemcpy(auxI,data_host.wtstop.locs,data_host.wtstop.sizesStr[0]*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(&data_gpu->wtstop.locs,&auxI,sizeof(int*),cudaMemcpyHostToDevice);
// no need locs
checkCuda(cudaMalloc((void**)&auxF,data_host.wtstop.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.wtstop.vertices,data_host.wtstop.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->wtstop.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.wtstop.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.wtstop.faces,data_host.wtstop.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->wtstop.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.wtstop.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.wtstop.VoxFaces,data_host.wtstop.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->wtstop.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.wtstop.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.wtstop.VoxFacesIndex,(data_host.wtstop.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->wtstop.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
// Network mask
if(data_host.network.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.network.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.network.volume,data_host.network.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->network.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.network.NSurfs){
//cudaMalloc((void**)&auxI,data_host.network.sizesStr[0]*sizeof(int));
//cudaMemcpy(auxI,data_host.network.locs,data_host.network.sizesStr[0]*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(&data_gpu->network.locs,&auxI,sizeof(int*),cudaMemcpyHostToDevice);
// no locs
checkCuda(cudaMalloc((void**)&auxF,data_host.network.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.network.vertices,data_host.network.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->network.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.network.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.network.faces,data_host.network.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->network.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.network.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.network.VoxFaces,data_host.network.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->network.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.network.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.network.VoxFacesIndex,(data_host.network.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->network.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
if(data_host.network.NVols||data_host.network.NSurfs){
int totalrois=data_host.network.NVols+data_host.network.NSurfs;
checkCuda(cudaMalloc((void**)&auxI,totalrois*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.network.IndexRoi,totalrois*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->network.IndexRoi,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
// Reference Network mask
if(data_host.networkREF.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.networkREF.volume,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->networkREF.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.networkREF.NSurfs){
checkCuda(cudaMalloc((void**)&auxF,data_host.networkREF.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.networkREF.vertices,data_host.networkREF.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->networkREF.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.networkREF.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.networkREF.faces,data_host.networkREF.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->networkREF.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.networkREF.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.networkREF.VoxFaces,data_host.networkREF.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->networkREF.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.networkREF.VoxFacesIndex,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->networkREF.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
// Waypoints mask
if(data_host.waypoint.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.waypoint.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.waypoint.volume,data_host.waypoint.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->waypoint.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.waypoint.NSurfs){
//cudaMalloc((void**)&auxI,data_host.waypoint.sizesStr[0]*sizeof(int));
//cudaMemcpy(auxI,data_host.waypoint.locs,data_host.waypoint.sizesStr[0]*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(&data_gpu->waypoint.locs,&auxI,sizeof(int*),cudaMemcpyHostToDevice);
checkCuda(cudaMalloc((void**)&auxF,data_host.waypoint.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.waypoint.vertices,data_host.waypoint.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->waypoint.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.waypoint.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.waypoint.faces,data_host.waypoint.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->waypoint.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.waypoint.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.waypoint.VoxFaces,data_host.waypoint.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->waypoint.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.waypoint.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.waypoint.VoxFacesIndex,(data_host.waypoint.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->waypoint.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
if(data_host.waypoint.NVols||data_host.waypoint.NSurfs){
int totalrois=data_host.waypoint.NVols+data_host.waypoint.NSurfs;
checkCuda(cudaMalloc((void**)&auxI,totalrois*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.waypoint.IndexRoi,totalrois*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->waypoint.IndexRoi,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
// Target mask
if(data_host.targets.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.targets.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.targets.volume,data_host.targets.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targets.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.targets.NSurfs){
checkCuda(cudaMalloc((void**)&auxF,data_host.targets.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.targets.vertices,data_host.targets.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targets.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.targets.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.targets.faces,data_host.targets.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targets.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.targets.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.targets.VoxFaces,data_host.targets.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targets.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.targets.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.targets.VoxFacesIndex,(data_host.targets.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targets.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
if(data_host.targets.NVols||data_host.targets.NSurfs){
int totalrois=data_host.targets.NVols+data_host.targets.NSurfs;
checkCuda(cudaMalloc((void**)&auxI,totalrois*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.targets.IndexRoi,totalrois*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targets.IndexRoi,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
// Reference Targets mask
if(data_host.targetsREF.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.targetsREF.volume,data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targetsREF.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.targetsREF.NSurfs){
checkCuda(cudaMalloc((void**)&auxF,data_host.targetsREF.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.targetsREF.vertices,data_host.targetsREF.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targetsREF.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.targetsREF.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.targetsREF.faces,data_host.targetsREF.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targetsREF.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.targetsREF.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.targetsREF.VoxFaces,data_host.targetsREF.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targetsREF.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.targetsREF.VoxFacesIndex,(data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->targetsREF.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
}
// Matrix 1
// LRMatrix 1
if(data_host.lrmatrix1.NVols){
if(opts.matrix2out.value()){
checkCuda(cudaMalloc((void**)&auxF,data_host.lrmatrix1.NVols*data_host.M2sizes[0]*data_host.M2sizes[1]*data_host.M2sizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.lrmatrix1.volume,data_host.lrmatrix1.NVols*data_host.M2sizes[0]*data_host.M2sizes[1]*data_host.M2sizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix1.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}else{
checkCuda(cudaMalloc((void**)&auxF,data_host.lrmatrix1.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.lrmatrix1.volume,data_host.lrmatrix1.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix1.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
}
if(data_host.lrmatrix1.NSurfs){
checkCuda(cudaMalloc((void**)&auxI,data_host.lrmatrix1.sizesStr[0]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lrmatrix1.locs,data_host.lrmatrix1.sizesStr[0]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix1.locs,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxF,data_host.lrmatrix1.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.lrmatrix1.vertices,data_host.lrmatrix1.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix1.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.lrmatrix1.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lrmatrix1.faces,data_host.lrmatrix1.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix1.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.lrmatrix1.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lrmatrix1.VoxFaces,data_host.lrmatrix1.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix1.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.lrmatrix1.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lrmatrix1.VoxFacesIndex,(data_host.lrmatrix1.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix1.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
//cudaMalloc((void**)&auxI,data_host.lrmatrix1.sizesStr[4]*sizeof(int));
//cudaMemcpy(auxI,data_host.lrmatrix1.IndexRoi,data_host.lrmatrix1.sizesStr[4]*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(&data_gpu->lrmatrix1.IndexRoi,&auxI,sizeof(int*),cudaMemcpyHostToDevice);
}
// Matrix 3
if(data_host.matrix3.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.matrix3.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.matrix3.volume,data_host.matrix3.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->matrix3.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.matrix3.NSurfs){
checkCuda(cudaMalloc((void**)&auxI,data_host.matrix3.sizesStr[0]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.matrix3.locs,data_host.matrix3.sizesStr[0]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->matrix3.locs,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxF,data_host.matrix3.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.matrix3.vertices,data_host.matrix3.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->matrix3.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.matrix3.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.matrix3.faces,data_host.matrix3.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->matrix3.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.matrix3.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.matrix3.VoxFaces,data_host.matrix3.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->matrix3.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.matrix3.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.matrix3.VoxFacesIndex,(data_host.matrix3.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->matrix3.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
//cudaMalloc((void**)&auxI,data_host.matrix3.sizesStr[4]*sizeof(int));
//cudaMemcpy(auxI,data_host.matrix3.IndexRoi,data_host.matrix3.sizesStr[4]*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(&data_gpu->matrix3.IndexRoi,&auxI,sizeof(int*),cudaMemcpyHostToDevice);
}
// LRMatrix 3
if(data_host.lrmatrix3.NVols){
checkCuda(cudaMalloc((void**)&auxF,data_host.lrmatrix3.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.lrmatrix3.volume,data_host.lrmatrix3.NVols*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix3.volume,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
}
if(data_host.lrmatrix3.NSurfs){
checkCuda(cudaMalloc((void**)&auxI,data_host.lrmatrix3.sizesStr[0]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lrmatrix3.locs,data_host.lrmatrix3.sizesStr[0]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix3.locs,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxF,data_host.lrmatrix3.sizesStr[1]*sizeof(float)));
checkCuda(cudaMemcpy(auxF,data_host.lrmatrix3.vertices,data_host.lrmatrix3.sizesStr[1]*sizeof(float),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix3.vertices,&auxF,sizeof(float*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.lrmatrix3.sizesStr[2]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lrmatrix3.faces,data_host.lrmatrix3.sizesStr[2]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix3.faces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,data_host.lrmatrix3.sizesStr[3]*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lrmatrix3.VoxFaces,data_host.lrmatrix3.sizesStr[3]*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix3.VoxFaces,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
checkCuda(cudaMalloc((void**)&auxI,(data_host.lrmatrix3.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int)));
checkCuda(cudaMemcpy(auxI,data_host.lrmatrix3.VoxFacesIndex,(data_host.lrmatrix3.NSurfs*data_host.Ssizes[0]*data_host.Ssizes[1]*data_host.Ssizes[2]+1)*sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(&data_gpu->lrmatrix3.VoxFacesIndex,&auxI,sizeof(int*),cudaMemcpyHostToDevice));
//cudaMalloc((void**)&auxI,data_host.lrmatrix3.sizesStr[4]*sizeof(int));
//cudaMemcpy(auxI,data_host.lrmatrix3.IndexRoi,data_host.lrmatrix3.sizesStr[4]*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(&data_gpu->lrmatrix3.IndexRoi,&auxI,sizeof(int*),cudaMemcpyHostToDevice);
}
}
|
2c93614707edb976e24731af3165d41ffb627269.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file gr_scaling.cpp
* @brief BSSN_GR scaling tests.
* @version 0.1
* @date 2021-07-25
*
* @copyright Copyright (c) 2021
*
*/
#include "gr.h"
#include "grUtils.h"
#include "mpi.h"
#include "TreeNode.h"
#include "mesh.h"
#include <vector>
#include <iostream>
#include "rkBSSN.h"
#include "octUtils.h"
#include "meshUtils.h"
#include "mathUtils.h"
#include <fstream>
#include <ctime>
#include "bssnCtxGPU.cuh"
int bssn_driver(MPI_Comm comm, unsigned int num_step,unsigned int warm_up, std::ostream& outfile,unsigned int ts_mode)
{
int rank, npes;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
std::vector<ot::TreeNode> tmpNodes;
std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::punctureData(x,y,z,var);};
const unsigned int interpVars=bssn::BSSN_NUM_VARS;
unsigned int varIndex[interpVars];
for(unsigned int i=0;i<bssn::BSSN_NUM_VARS;i++)
varIndex[i]=i;
if(false && bssn::BSSN_ENABLE_BLOCK_ADAPTIVITY)
{
if(!rank) std::cout<<YLW<<"Using block adaptive mesh. AMR disabled "<<NRM<<std::endl;
const Point pt_min(bssn::BSSN_BLK_MIN_X,bssn::BSSN_BLK_MIN_Y,bssn::BSSN_BLK_MIN_Z);
const Point pt_max(bssn::BSSN_BLK_MAX_X,bssn::BSSN_BLK_MAX_Y,bssn::BSSN_BLK_MAX_Z);
bssn::blockAdaptiveOctree(tmpNodes,pt_min,pt_max,m_uiMaxDepth-2,m_uiMaxDepth,comm);
}else
{
if(!rank) std::cout<<YLW<<"Using function2Octree. AMR enabled "<<NRM<<std::endl;
const unsigned int f2olmin = ::min(bssn::BSSN_BH1_MAX_LEV,bssn::BSSN_BH2_MAX_LEV);
if(f2olmin < MAXDEAPTH_LEVEL_DIFF + 2)
{
if(!rank)
std::cout<<"BH min level should be larger than "<<(MAXDEAPTH_LEVEL_DIFF+2)<<std::endl;
MPI_Abort(comm,0);
}
function2Octree(f_init,bssn::BSSN_NUM_VARS,varIndex,interpVars,tmpNodes,(f2olmin-MAXDEAPTH_LEVEL_DIFF-2),bssn::BSSN_WAVELET_TOL,bssn::BSSN_ELE_ORDER,comm);
}
//std::vector<ot::TreeNode> f2Octants(tmpNodes);
ot::Mesh * mesh = ot::createMesh(tmpNodes.data(),tmpNodes.size(),bssn::BSSN_ELE_ORDER,comm,1,ot::SM_TYPE::FDM,bssn::BSSN_DENDRO_GRAIN_SZ,bssn::BSSN_LOAD_IMB_TOL,bssn::BSSN_SPLIT_FIX);
mesh->setDomainBounds(Point(bssn::BSSN_GRID_MIN_X,bssn::BSSN_GRID_MIN_Y,bssn::BSSN_GRID_MIN_Z), Point(bssn::BSSN_GRID_MAX_X, bssn::BSSN_GRID_MAX_Y,bssn::BSSN_GRID_MAX_Z));
unsigned int lmin, lmax;
mesh->computeMinMaxLevel(lmin,lmax);
tmpNodes.clear();
if(!rank)
{
std::cout<<"================= Grid Info (Before init grid converge):======================================================="<<std::endl;
std::cout<<"lmin: "<<lmin<<" lmax:"<<lmax<<std::endl;
std::cout<<"dx: "<<((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))))<<std::endl;
std::cout<<"dt: "<<bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))))<<std::endl;
std::cout<<"ts mode: "<<ts_mode<<std::endl;
std::cout<<"==============================================================================================================="<<std::endl;
}
bssn::BSSN_RK45_TIME_STEP_SIZE=bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))));
tmpNodes.clear();
// enable block adaptivity disable remesh.
bssn::BSSN_ENABLE_BLOCK_ADAPTIVITY=1;
ot::Mesh* pMesh = mesh;
if(bssn::BSSN_RESTORE_SOLVER==0)
pMesh = bssn::weakScalingReMesh(mesh,npes);
if(ts_mode == 0)
{
}else if(ts_mode == 1)
{
bssn::BSSNCtxGPU * bssnCtx = new bssn::BSSNCtxGPU(pMesh);
ts::ETS<DendroScalar,bssn::BSSNCtxGPU>* ets = new ts::ETS<DendroScalar,bssn::BSSNCtxGPU>(bssnCtx);
ets->set_evolve_vars(bssnCtx->get_evolution_vars());
if((RKType)bssn::BSSN_RK_TYPE == RKType::RK3)
ets->set_ets_coefficients(ts::ETSType::RK3);
else if((RKType)bssn::BSSN_RK_TYPE == RKType::RK4)
ets->set_ets_coefficients(ts::ETSType::RK4);
else if((RKType)bssn::BSSN_RK_TYPE == RKType::RK45)
ets->set_ets_coefficients(ts::ETSType::RK5);
ets->init();
#if defined __PROFILE_CTX__ && defined __PROFILE_ETS__
std::ofstream outfile;
char fname [256];
sprintf(fname, "bssnCtxGPU_WS_%d.txt",npes);
if(!rank)
{
outfile.open(fname, std::ios_base::app);
time_t now = time(0);
// convert now to string form
char* dt = ctime(&now);
outfile <<"============================================================"<<std::endl;
outfile << "Current time : "<<dt<<" --- "<<std::endl;
outfile <<"============================================================"<<std::endl;
}
ets->init_pt();
bssnCtx->reset_pt();
ets->dump_pt(outfile);
//bssnCtx->dump_pt(outfile);
#endif
hipStream_t s_gw;
hipStreamCreate(&s_gw);
ts::TSInfo ts_gw_output;
ts::TSInfo ts_curr;
bool is_gw_written=false;
bool is_merge_executed =false;
double t1 = MPI_Wtime();
while(ets->curr_time() < bssn::BSSN_RK_TIME_END)
{
const DendroIntL step = ets->curr_step();
const DendroScalar time = ets->curr_time();
bssn::BSSN_CURRENT_RK_COORD_TIME = time;
bssn::BSSN_CURRENT_RK_STEP = step;
// if(time < 200)
// bssn::BSSN_REFINEMENT_MODE = RefinementMode::BH_LOC;
// else
// bssn::BSSN_REFINEMENT_MODE = RefinementMode::WAMR;
const bool isActive = ets->is_active();
const unsigned int rank_global = ets->get_global_rank();
const bool is_merged = bssnCtx->is_bh_merged(0.1);
if(is_merged && !is_merge_executed)
{
bssn::BSSN_REMESH_TEST_FREQ = 3 * bssn::BSSN_REMESH_TEST_FREQ_AFTER_MERGER;
bssn::BSSN_MINDEPTH=5;
bssn::BSSN_GW_EXTRACT_FREQ = bssn::BSSN_GW_EXTRACT_FREQ_AFTER_MERGER;
bssn::BSSN_REFINEMENT_MODE = RefinementMode::WAMR;
}/*else
{
//bssn::BSSN_REFINEMENT_MODE = RefinementMode::BH_LOC;
}*/
/*if((step % bssn::BSSN_GW_EXTRACT_FREQ) == 0 )
{
if(!rank_global)
std::cout<<"[ETS] : Executing step : "<<ets->curr_step()<<"\tcurrent time :"<<ets->curr_time()<<"\t dt:"<<ets->ts_size()<<"\t"<<std::endl;
// 03/23/22 : this works only because we use defult stream to for the evolution. Default stream is special and synchronous with all other streams.
bssnCtx->device_to_host_async(s_gw);
ts_gw_output=bssnCtx->get_ts_info();
is_gw_written=false;
if( (step % bssn::BSSN_REMESH_TEST_FREQ) == 0 )
{
hipStreamSynchronize(s_gw);
bool isRemesh = bssnCtx->is_remesh();
if(isRemesh)
{
if(!rank_global)
std::cout<<"[ETS] : Remesh is triggered. \n";
bssnCtx->remesh_and_gridtransfer(bssn::BSSN_DENDRO_GRAIN_SZ, bssn::BSSN_LOAD_IMB_TOL,bssn::BSSN_SPLIT_FIX);
bssn::deallocate_bssn_deriv_workspace();
bssn::allocate_bssn_deriv_workspace(bssnCtx->get_mesh(),1);
ets->sync_with_mesh();
// correct timestep size
ot::Mesh* pmesh = bssnCtx->get_mesh();
unsigned int lmin, lmax;
pmesh->computeMinMaxLevel(lmin,lmax);
if(!pmesh->getMPIRank())
printf("post merger grid level = (%d, %d)\n",lmin,lmax);
bssn::BSSN_RK45_TIME_STEP_SIZE=bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))));
ts::TSInfo ts_in = bssnCtx->get_ts_info();
ts_in._m_uiTh = bssn::BSSN_RK45_TIME_STEP_SIZE;
bssnCtx->set_ts_info(ts_in);
}
}
}
if((step % bssn::BSSN_GW_EXTRACT_FREQ) == (bssn::BSSN_GW_EXTRACT_FREQ-1))
hipStreamSynchronize(s_gw);
if((!is_gw_written) && (hipStreamQuery(s_gw) == hipSuccess))
{
ts_curr = bssnCtx->get_ts_info();
bssnCtx->set_ts_info(ts_gw_output);
bssnCtx->terminal_output();
bssnCtx->write_vtu();
bssnCtx->evolve_bh_loc(bssnCtx->get_evolution_vars_cpu(),ets->ts_size()*bssn::BSSN_GW_EXTRACT_FREQ);
if( (step % bssn::BSSN_CHECKPT_FREQ) == 0 )
bssnCtx->write_checkpt();
bssnCtx->set_ts_info(ts_curr);
is_gw_written=true;
}*/
ets->evolve();
}
#if defined __PROFILE_CTX__ && defined __PROFILE_ETS__
ets->dump_pt(outfile);
//bssnCtx->dump_pt(outfile);
#endif
double t2 = MPI_Wtime()-t1;
double t2_g;
par::Mpi_Allreduce(&t2,&t2_g,1,MPI_MAX,ets->get_global_comm());
if(!(ets->get_global_rank()))
std::cout<<" ETS time (max) : "<<t2_g<<std::endl;
ot::Mesh* tmp_mesh = bssnCtx->get_mesh();
delete bssnCtx;
delete tmp_mesh;
delete ets;
}else
{
if(!rank)
RAISE_ERROR("invalid ts mode : "<<ts_mode<<"specifed");
MPI_Abort(comm,0);
}
return 0;
}
int main (int argc, char** argv)
{
// 0- NUTS 1-UTS
unsigned int ts_mode=0;
if(argc<2)
{
std::cout<<"Usage: "<<argv[0]<<"paramFile TSMode(0){0-Spatially Adaptive Time Stepping(SATS, "<<GRN<<"default"<<NRM<<") , 1- Uniform Time Stepping. }"<<std::endl;
return 0;
}
if(argc>2)
ts_mode = std::atoi(argv[2]);
MPI_Init(&argc,&argv);
MPI_Comm comm=MPI_COMM_WORLD;
int rank,npes;
MPI_Comm_rank(comm,&rank);
MPI_Comm_size(comm,&npes);
// Print out CMAKE options
if (!rank) {
#ifdef BSSN_COMPUTE_CONSTRAINTS
std::cout<<GRN<<" Compiled with BSSN_COMPUTE_CONSTRAINTS"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_COMPUTE_CONSTRAINTS"<<NRM<<std::endl;
#endif
#ifdef BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT
std::cout<<GRN<<" Compiled with BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT"<<NRM<<std::endl;
#endif
#ifdef BSSN_ENABLE_VTU_OUTPUT
std::cout<<GRN<<" Compiled with BSSN_ENABLE_VTU_OUTPUT"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_ENABLE_VTU_OUTPUT"<<NRM<<std::endl;
#endif
#ifdef BSSN_ETA_FUNCTION
std::cout<<GRN<<" Compiled with BSSN_ETA_FUNCTION"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_ETA_FUNCTION"<<NRM<<std::endl;
#endif
#ifdef BSSN_EXTRACT_BH_LOCATIONS
std::cout<<GRN<<" Compiled with BSSN_EXTRACT_BH_LOCATIONS"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_EXTRACT_BH_LOCATIONS"<<NRM<<std::endl;
#endif
#ifdef BSSN_EXTRACT_GRAVITATIONAL_WAVES
std::cout<<GRN<<" Compiled with BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl;
#endif
#ifdef BSSN_EXTRACT_GRAVITATIONAL_WAVES
std::cout<<GRN<<" Compiled with BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl;
#endif
#ifdef BSSN_GAUGE_ROCHESTER
std::cout<<GRN<<" Compiled with BSSN_GAUGE_ROCHESTER"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_GAUGE_ROCHESTER"<<NRM<<std::endl;
#endif
#ifdef BSSN_KERR_SCHILD_TEST
std::cout<<GRN<<" Compiled with BSSN_KERR_SCHILD_TEST"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_KERR_SCHILD_TEST"<<NRM<<std::endl;
#endif
#ifdef BSSN_REFINE_BASE_EH
std::cout<<GRN<<" Compiled with BSSN_REFINE_BASE_EH"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_REFINE_BASE_EH"<<NRM<<std::endl;
#endif
#ifdef USE_FD_INTERP_FOR_UNZIP
std::cout<<GRN<<" Compiled with USE_FD_INTERP_FOR_UNZIP"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without USE_FD_INTERP_FOR_UNZIP"<<NRM<<std::endl;
#endif
}
//1 . read the parameter file.
if(!rank) std::cout<<" reading parameter file :"<<argv[1]<<std::endl;
bssn::readParamFile(argv[1],comm);
int root = ::min(1,npes-1);
bssn::dumpParamFile(std::cout,root,comm);
_InitializeHcurve(bssn::BSSN_DIM);
m_uiMaxDepth=bssn::BSSN_MAXDEPTH;
if(bssn::BSSN_NUM_VARS%bssn::BSSN_ASYNC_COMM_K!=0)
{
if(!rank) std::cout<<"[overlap communication error]: total BSSN_NUM_VARS: "<<bssn::BSSN_NUM_VARS<<" is not divisable by BSSN_ASYNC_COMM_K: "<<bssn::BSSN_ASYNC_COMM_K<<std::endl;
MPI_Abort(comm,0);
}
if(bssn::BSSN_GW_EXTRACT_FREQ> bssn::BSSN_IO_OUTPUT_FREQ)
{
if(!rank) std::cout<<" BSSN_GW_EXTRACT_FREQ should be less BSSN_IO_OUTPUT_FREQ "<<std::endl;
MPI_Abort(comm,0);
}
//2. generate the initial grid.
std::vector<ot::TreeNode> tmpNodes;
std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::punctureData(x,y,z,var);};
std::function<double(double,double,double)> f_init_alpha=[](double x,double y,double z){ double var[24]; bssn::punctureData(x,y,z,var); return var[0];};
//std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::KerrSchildData(x,y,z,var);};
const unsigned int interpVars=bssn::BSSN_NUM_VARS;
unsigned int varIndex[interpVars];
for(unsigned int i=0;i<bssn::BSSN_NUM_VARS;i++)
varIndex[i]=i;
/*varIndex[0]=bssn::VAR::U_ALPHA;
varIndex[1]=bssn::VAR::U_CHI;*/
DendroIntL localSz,globalSz;
double t_stat;
double t_stat_g[3];
const unsigned int NUM_WARM_UP=2;
const unsigned int NUM_STEPS =1;
std::ofstream outfile;
char fname [256];
sprintf(fname, "bssnCtx_WS_%d.txt",npes);
if(!rank)
{
outfile.open(fname, std::ios_base::app);
time_t now = time(0);
// convert now to string form
char* dt = ctime(&now);
outfile <<"============================================================"<<std::endl;
outfile << "Current time : "<<dt<<" --- "<<std::endl;
outfile <<"============================================================"<<std::endl;
}
bssn_driver(comm,NUM_STEPS,NUM_WARM_UP,outfile,1);
if(!rank)
outfile.close();
#ifdef RUN_WEAK_SCALING
if(!rank) std::cout<<"======================================================================"<<std::endl;
if(!rank) std::cout<<" Weak Scaling Run Begin. "<<std::endl;
if(!rank) std::cout<<"======================================================================"<<std::endl;
int proc_group = 0;
int min_np = 2;
for (int i = npes; rank < i && i >= min_np; i = i >> 1) proc_group++;
MPI_Comm comm_ws;
MPI_Comm_split(comm, proc_group, rank, &comm_ws);
MPI_Comm_rank(comm_ws, &rank);
MPI_Comm_size(comm_ws, &npes);
if(!rank) outfile.open(fname, std::ios_base::app);
MPI_Barrier(comm_ws);
bssn_driver(comm_ws,NUM_STEPS,NUM_WARM_UP,outfile,1);
MPI_Barrier(comm_ws);
if(!rank) outfile.close();
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
MPI_Barrier(comm);
if(!rank) std::cout<<"======================================================================"<<std::endl;
if(!rank) std::cout<<" Weak Scaling Run Complete. "<<std::endl;
if(!rank) std::cout<<"======================================================================"<<std::endl;
#endif
MPI_Finalize();
return 0;
}
| 2c93614707edb976e24731af3165d41ffb627269.cu | /**
* @file gr_scaling.cpp
* @brief BSSN_GR scaling tests.
* @version 0.1
* @date 2021-07-25
*
* @copyright Copyright (c) 2021
*
*/
#include "gr.h"
#include "grUtils.h"
#include "mpi.h"
#include "TreeNode.h"
#include "mesh.h"
#include <vector>
#include <iostream>
#include "rkBSSN.h"
#include "octUtils.h"
#include "meshUtils.h"
#include "mathUtils.h"
#include <fstream>
#include <ctime>
#include "bssnCtxGPU.cuh"
int bssn_driver(MPI_Comm comm, unsigned int num_step,unsigned int warm_up, std::ostream& outfile,unsigned int ts_mode)
{
int rank, npes;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
std::vector<ot::TreeNode> tmpNodes;
std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::punctureData(x,y,z,var);};
const unsigned int interpVars=bssn::BSSN_NUM_VARS;
unsigned int varIndex[interpVars];
for(unsigned int i=0;i<bssn::BSSN_NUM_VARS;i++)
varIndex[i]=i;
if(false && bssn::BSSN_ENABLE_BLOCK_ADAPTIVITY)
{
if(!rank) std::cout<<YLW<<"Using block adaptive mesh. AMR disabled "<<NRM<<std::endl;
const Point pt_min(bssn::BSSN_BLK_MIN_X,bssn::BSSN_BLK_MIN_Y,bssn::BSSN_BLK_MIN_Z);
const Point pt_max(bssn::BSSN_BLK_MAX_X,bssn::BSSN_BLK_MAX_Y,bssn::BSSN_BLK_MAX_Z);
bssn::blockAdaptiveOctree(tmpNodes,pt_min,pt_max,m_uiMaxDepth-2,m_uiMaxDepth,comm);
}else
{
if(!rank) std::cout<<YLW<<"Using function2Octree. AMR enabled "<<NRM<<std::endl;
const unsigned int f2olmin = std::min(bssn::BSSN_BH1_MAX_LEV,bssn::BSSN_BH2_MAX_LEV);
if(f2olmin < MAXDEAPTH_LEVEL_DIFF + 2)
{
if(!rank)
std::cout<<"BH min level should be larger than "<<(MAXDEAPTH_LEVEL_DIFF+2)<<std::endl;
MPI_Abort(comm,0);
}
function2Octree(f_init,bssn::BSSN_NUM_VARS,varIndex,interpVars,tmpNodes,(f2olmin-MAXDEAPTH_LEVEL_DIFF-2),bssn::BSSN_WAVELET_TOL,bssn::BSSN_ELE_ORDER,comm);
}
//std::vector<ot::TreeNode> f2Octants(tmpNodes);
ot::Mesh * mesh = ot::createMesh(tmpNodes.data(),tmpNodes.size(),bssn::BSSN_ELE_ORDER,comm,1,ot::SM_TYPE::FDM,bssn::BSSN_DENDRO_GRAIN_SZ,bssn::BSSN_LOAD_IMB_TOL,bssn::BSSN_SPLIT_FIX);
mesh->setDomainBounds(Point(bssn::BSSN_GRID_MIN_X,bssn::BSSN_GRID_MIN_Y,bssn::BSSN_GRID_MIN_Z), Point(bssn::BSSN_GRID_MAX_X, bssn::BSSN_GRID_MAX_Y,bssn::BSSN_GRID_MAX_Z));
unsigned int lmin, lmax;
mesh->computeMinMaxLevel(lmin,lmax);
tmpNodes.clear();
if(!rank)
{
std::cout<<"================= Grid Info (Before init grid converge):======================================================="<<std::endl;
std::cout<<"lmin: "<<lmin<<" lmax:"<<lmax<<std::endl;
std::cout<<"dx: "<<((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))))<<std::endl;
std::cout<<"dt: "<<bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))))<<std::endl;
std::cout<<"ts mode: "<<ts_mode<<std::endl;
std::cout<<"==============================================================================================================="<<std::endl;
}
bssn::BSSN_RK45_TIME_STEP_SIZE=bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))));
tmpNodes.clear();
// enable block adaptivity disable remesh.
bssn::BSSN_ENABLE_BLOCK_ADAPTIVITY=1;
ot::Mesh* pMesh = mesh;
if(bssn::BSSN_RESTORE_SOLVER==0)
pMesh = bssn::weakScalingReMesh(mesh,npes);
if(ts_mode == 0)
{
}else if(ts_mode == 1)
{
bssn::BSSNCtxGPU * bssnCtx = new bssn::BSSNCtxGPU(pMesh);
ts::ETS<DendroScalar,bssn::BSSNCtxGPU>* ets = new ts::ETS<DendroScalar,bssn::BSSNCtxGPU>(bssnCtx);
ets->set_evolve_vars(bssnCtx->get_evolution_vars());
if((RKType)bssn::BSSN_RK_TYPE == RKType::RK3)
ets->set_ets_coefficients(ts::ETSType::RK3);
else if((RKType)bssn::BSSN_RK_TYPE == RKType::RK4)
ets->set_ets_coefficients(ts::ETSType::RK4);
else if((RKType)bssn::BSSN_RK_TYPE == RKType::RK45)
ets->set_ets_coefficients(ts::ETSType::RK5);
ets->init();
#if defined __PROFILE_CTX__ && defined __PROFILE_ETS__
std::ofstream outfile;
char fname [256];
sprintf(fname, "bssnCtxGPU_WS_%d.txt",npes);
if(!rank)
{
outfile.open(fname, std::ios_base::app);
time_t now = time(0);
// convert now to string form
char* dt = ctime(&now);
outfile <<"============================================================"<<std::endl;
outfile << "Current time : "<<dt<<" --- "<<std::endl;
outfile <<"============================================================"<<std::endl;
}
ets->init_pt();
bssnCtx->reset_pt();
ets->dump_pt(outfile);
//bssnCtx->dump_pt(outfile);
#endif
cudaStream_t s_gw;
cudaStreamCreate(&s_gw);
ts::TSInfo ts_gw_output;
ts::TSInfo ts_curr;
bool is_gw_written=false;
bool is_merge_executed =false;
double t1 = MPI_Wtime();
while(ets->curr_time() < bssn::BSSN_RK_TIME_END)
{
const DendroIntL step = ets->curr_step();
const DendroScalar time = ets->curr_time();
bssn::BSSN_CURRENT_RK_COORD_TIME = time;
bssn::BSSN_CURRENT_RK_STEP = step;
// if(time < 200)
// bssn::BSSN_REFINEMENT_MODE = RefinementMode::BH_LOC;
// else
// bssn::BSSN_REFINEMENT_MODE = RefinementMode::WAMR;
const bool isActive = ets->is_active();
const unsigned int rank_global = ets->get_global_rank();
const bool is_merged = bssnCtx->is_bh_merged(0.1);
if(is_merged && !is_merge_executed)
{
bssn::BSSN_REMESH_TEST_FREQ = 3 * bssn::BSSN_REMESH_TEST_FREQ_AFTER_MERGER;
bssn::BSSN_MINDEPTH=5;
bssn::BSSN_GW_EXTRACT_FREQ = bssn::BSSN_GW_EXTRACT_FREQ_AFTER_MERGER;
bssn::BSSN_REFINEMENT_MODE = RefinementMode::WAMR;
}/*else
{
//bssn::BSSN_REFINEMENT_MODE = RefinementMode::BH_LOC;
}*/
/*if((step % bssn::BSSN_GW_EXTRACT_FREQ) == 0 )
{
if(!rank_global)
std::cout<<"[ETS] : Executing step : "<<ets->curr_step()<<"\tcurrent time :"<<ets->curr_time()<<"\t dt:"<<ets->ts_size()<<"\t"<<std::endl;
// 03/23/22 : this works only because we use defult stream to for the evolution. Default stream is special and synchronous with all other streams.
bssnCtx->device_to_host_async(s_gw);
ts_gw_output=bssnCtx->get_ts_info();
is_gw_written=false;
if( (step % bssn::BSSN_REMESH_TEST_FREQ) == 0 )
{
cudaStreamSynchronize(s_gw);
bool isRemesh = bssnCtx->is_remesh();
if(isRemesh)
{
if(!rank_global)
std::cout<<"[ETS] : Remesh is triggered. \n";
bssnCtx->remesh_and_gridtransfer(bssn::BSSN_DENDRO_GRAIN_SZ, bssn::BSSN_LOAD_IMB_TOL,bssn::BSSN_SPLIT_FIX);
bssn::deallocate_bssn_deriv_workspace();
bssn::allocate_bssn_deriv_workspace(bssnCtx->get_mesh(),1);
ets->sync_with_mesh();
// correct timestep size
ot::Mesh* pmesh = bssnCtx->get_mesh();
unsigned int lmin, lmax;
pmesh->computeMinMaxLevel(lmin,lmax);
if(!pmesh->getMPIRank())
printf("post merger grid level = (%d, %d)\n",lmin,lmax);
bssn::BSSN_RK45_TIME_STEP_SIZE=bssn::BSSN_CFL_FACTOR*((bssn::BSSN_COMPD_MAX[0]-bssn::BSSN_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) bssn::BSSN_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))));
ts::TSInfo ts_in = bssnCtx->get_ts_info();
ts_in._m_uiTh = bssn::BSSN_RK45_TIME_STEP_SIZE;
bssnCtx->set_ts_info(ts_in);
}
}
}
if((step % bssn::BSSN_GW_EXTRACT_FREQ) == (bssn::BSSN_GW_EXTRACT_FREQ-1))
cudaStreamSynchronize(s_gw);
if((!is_gw_written) && (cudaStreamQuery(s_gw) == cudaSuccess))
{
ts_curr = bssnCtx->get_ts_info();
bssnCtx->set_ts_info(ts_gw_output);
bssnCtx->terminal_output();
bssnCtx->write_vtu();
bssnCtx->evolve_bh_loc(bssnCtx->get_evolution_vars_cpu(),ets->ts_size()*bssn::BSSN_GW_EXTRACT_FREQ);
if( (step % bssn::BSSN_CHECKPT_FREQ) == 0 )
bssnCtx->write_checkpt();
bssnCtx->set_ts_info(ts_curr);
is_gw_written=true;
}*/
ets->evolve();
}
#if defined __PROFILE_CTX__ && defined __PROFILE_ETS__
ets->dump_pt(outfile);
//bssnCtx->dump_pt(outfile);
#endif
double t2 = MPI_Wtime()-t1;
double t2_g;
par::Mpi_Allreduce(&t2,&t2_g,1,MPI_MAX,ets->get_global_comm());
if(!(ets->get_global_rank()))
std::cout<<" ETS time (max) : "<<t2_g<<std::endl;
ot::Mesh* tmp_mesh = bssnCtx->get_mesh();
delete bssnCtx;
delete tmp_mesh;
delete ets;
}else
{
if(!rank)
RAISE_ERROR("invalid ts mode : "<<ts_mode<<"specifed");
MPI_Abort(comm,0);
}
return 0;
}
int main (int argc, char** argv)
{
// 0- NUTS 1-UTS
unsigned int ts_mode=0;
if(argc<2)
{
std::cout<<"Usage: "<<argv[0]<<"paramFile TSMode(0){0-Spatially Adaptive Time Stepping(SATS, "<<GRN<<"default"<<NRM<<") , 1- Uniform Time Stepping. }"<<std::endl;
return 0;
}
if(argc>2)
ts_mode = std::atoi(argv[2]);
MPI_Init(&argc,&argv);
MPI_Comm comm=MPI_COMM_WORLD;
int rank,npes;
MPI_Comm_rank(comm,&rank);
MPI_Comm_size(comm,&npes);
// Print out CMAKE options
if (!rank) {
#ifdef BSSN_COMPUTE_CONSTRAINTS
std::cout<<GRN<<" Compiled with BSSN_COMPUTE_CONSTRAINTS"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_COMPUTE_CONSTRAINTS"<<NRM<<std::endl;
#endif
#ifdef BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT
std::cout<<GRN<<" Compiled with BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_ENABLE_VTU_CONSTRAINT_OUTPUT"<<NRM<<std::endl;
#endif
#ifdef BSSN_ENABLE_VTU_OUTPUT
std::cout<<GRN<<" Compiled with BSSN_ENABLE_VTU_OUTPUT"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_ENABLE_VTU_OUTPUT"<<NRM<<std::endl;
#endif
#ifdef BSSN_ETA_FUNCTION
std::cout<<GRN<<" Compiled with BSSN_ETA_FUNCTION"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_ETA_FUNCTION"<<NRM<<std::endl;
#endif
#ifdef BSSN_EXTRACT_BH_LOCATIONS
std::cout<<GRN<<" Compiled with BSSN_EXTRACT_BH_LOCATIONS"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_EXTRACT_BH_LOCATIONS"<<NRM<<std::endl;
#endif
#ifdef BSSN_EXTRACT_GRAVITATIONAL_WAVES
std::cout<<GRN<<" Compiled with BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl;
#endif
#ifdef BSSN_EXTRACT_GRAVITATIONAL_WAVES
std::cout<<GRN<<" Compiled with BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_EXTRACT_GRAVITATIONAL_WAVES"<<NRM<<std::endl;
#endif
#ifdef BSSN_GAUGE_ROCHESTER
std::cout<<GRN<<" Compiled with BSSN_GAUGE_ROCHESTER"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_GAUGE_ROCHESTER"<<NRM<<std::endl;
#endif
#ifdef BSSN_KERR_SCHILD_TEST
std::cout<<GRN<<" Compiled with BSSN_KERR_SCHILD_TEST"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_KERR_SCHILD_TEST"<<NRM<<std::endl;
#endif
#ifdef BSSN_REFINE_BASE_EH
std::cout<<GRN<<" Compiled with BSSN_REFINE_BASE_EH"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without BSSN_REFINE_BASE_EH"<<NRM<<std::endl;
#endif
#ifdef USE_FD_INTERP_FOR_UNZIP
std::cout<<GRN<<" Compiled with USE_FD_INTERP_FOR_UNZIP"<<NRM<<std::endl;
#else
std::cout<<RED<<" Compiled without USE_FD_INTERP_FOR_UNZIP"<<NRM<<std::endl;
#endif
}
//1 . read the parameter file.
if(!rank) std::cout<<" reading parameter file :"<<argv[1]<<std::endl;
bssn::readParamFile(argv[1],comm);
int root = std::min(1,npes-1);
bssn::dumpParamFile(std::cout,root,comm);
_InitializeHcurve(bssn::BSSN_DIM);
m_uiMaxDepth=bssn::BSSN_MAXDEPTH;
if(bssn::BSSN_NUM_VARS%bssn::BSSN_ASYNC_COMM_K!=0)
{
if(!rank) std::cout<<"[overlap communication error]: total BSSN_NUM_VARS: "<<bssn::BSSN_NUM_VARS<<" is not divisable by BSSN_ASYNC_COMM_K: "<<bssn::BSSN_ASYNC_COMM_K<<std::endl;
MPI_Abort(comm,0);
}
if(bssn::BSSN_GW_EXTRACT_FREQ> bssn::BSSN_IO_OUTPUT_FREQ)
{
if(!rank) std::cout<<" BSSN_GW_EXTRACT_FREQ should be less BSSN_IO_OUTPUT_FREQ "<<std::endl;
MPI_Abort(comm,0);
}
//2. generate the initial grid.
std::vector<ot::TreeNode> tmpNodes;
std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::punctureData(x,y,z,var);};
std::function<double(double,double,double)> f_init_alpha=[](double x,double y,double z){ double var[24]; bssn::punctureData(x,y,z,var); return var[0];};
//std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){bssn::KerrSchildData(x,y,z,var);};
const unsigned int interpVars=bssn::BSSN_NUM_VARS;
unsigned int varIndex[interpVars];
for(unsigned int i=0;i<bssn::BSSN_NUM_VARS;i++)
varIndex[i]=i;
/*varIndex[0]=bssn::VAR::U_ALPHA;
varIndex[1]=bssn::VAR::U_CHI;*/
DendroIntL localSz,globalSz;
double t_stat;
double t_stat_g[3];
const unsigned int NUM_WARM_UP=2;
const unsigned int NUM_STEPS =1;
std::ofstream outfile;
char fname [256];
sprintf(fname, "bssnCtx_WS_%d.txt",npes);
if(!rank)
{
outfile.open(fname, std::ios_base::app);
time_t now = time(0);
// convert now to string form
char* dt = ctime(&now);
outfile <<"============================================================"<<std::endl;
outfile << "Current time : "<<dt<<" --- "<<std::endl;
outfile <<"============================================================"<<std::endl;
}
bssn_driver(comm,NUM_STEPS,NUM_WARM_UP,outfile,1);
if(!rank)
outfile.close();
#ifdef RUN_WEAK_SCALING
if(!rank) std::cout<<"======================================================================"<<std::endl;
if(!rank) std::cout<<" Weak Scaling Run Begin. "<<std::endl;
if(!rank) std::cout<<"======================================================================"<<std::endl;
int proc_group = 0;
int min_np = 2;
for (int i = npes; rank < i && i >= min_np; i = i >> 1) proc_group++;
MPI_Comm comm_ws;
MPI_Comm_split(comm, proc_group, rank, &comm_ws);
MPI_Comm_rank(comm_ws, &rank);
MPI_Comm_size(comm_ws, &npes);
if(!rank) outfile.open(fname, std::ios_base::app);
MPI_Barrier(comm_ws);
bssn_driver(comm_ws,NUM_STEPS,NUM_WARM_UP,outfile,1);
MPI_Barrier(comm_ws);
if(!rank) outfile.close();
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &npes);
MPI_Barrier(comm);
if(!rank) std::cout<<"======================================================================"<<std::endl;
if(!rank) std::cout<<" Weak Scaling Run Complete. "<<std::endl;
if(!rank) std::cout<<"======================================================================"<<std::endl;
#endif
MPI_Finalize();
return 0;
}
|
3d2a963d04cfdf85708081320838587fc13a783d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void saxpy(int n, float a, float *x, float *y, char *ad, char *bd)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){ y[i] = a*x[i] + y[i];
ad[0] = 'C';
}
} | 3d2a963d04cfdf85708081320838587fc13a783d.cu | #include "includes.h"
__global__ void saxpy(int n, float a, float *x, float *y, char *ad, char *bd)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){ y[i] = a*x[i] + y[i];
ad[0] = 'C';
}
} |
8a5c51526d34b2ebdaca3c7261bf2800fec48e8d.hip | // !!! This is a file automatically generated by hipify!!!
#include <h2opusconf.h>
/* skip compilation of this .cu file if H2OPUS is CPU only while PETSc has GPU support */
#if !defined(__HIPCC__) || defined(H2OPUS_USE_GPU)
#include <h2opus.h>
#if defined(H2OPUS_USE_MPI)
#include <h2opus/distributed/distributed_h2opus_handle.h>
#include <h2opus/distributed/distributed_geometric_construction.h>
#include <h2opus/distributed/distributed_hgemv.h>
#include <h2opus/distributed/distributed_horthog.h>
#include <h2opus/distributed/distributed_hcompress.h>
#endif
#include <h2opus/util/boxentrygen.h>
#include <petsc/private/matimpl.h>
#include <petsc/private/vecimpl.h>
#include <petsc/private/deviceimpl.h>
#include <petscsf.h>
/* math2opusutils */
PETSC_INTERN PetscErrorCode PetscSFGetVectorSF(PetscSF,PetscInt,PetscInt,PetscInt,PetscSF*);
PETSC_INTERN PetscErrorCode VecSign(Vec,Vec);
PETSC_INTERN PetscErrorCode VecSetDelta(Vec,PetscInt);
PETSC_INTERN PetscErrorCode MatApproximateNorm_Private(Mat,NormType,PetscInt,PetscReal*);
#define MatH2OpusGetThrustPointer(v) thrust::raw_pointer_cast((v).data())
/* Use GPU only if H2OPUS is configured for GPU */
#if defined(PETSC_HAVE_CUDA) && defined(H2OPUS_USE_GPU)
#define PETSC_H2OPUS_USE_GPU
#endif
#if defined(PETSC_H2OPUS_USE_GPU)
#define MatH2OpusUpdateIfNeeded(A,B) MatBindToCPU(A,(PetscBool)((A)->boundtocpu || (B)))
#else
#define MatH2OpusUpdateIfNeeded(A,B) 0
#endif
// TODO H2OPUS:
// DistributedHMatrix
// unsymmetric ?
// transpose for distributed_hgemv?
// clearData()
// Unify interface for sequential and parallel?
// Reuse geometric construction (almost possible, only the unsymmetric case is explicitly handled)
//
template <class T> class PetscPointCloud : public H2OpusDataSet<T>
{
private:
int dimension;
size_t num_points;
std::vector<T> pts;
public:
PetscPointCloud(int dim, size_t num_pts, const T coords[])
{
this->dimension = dim;
this->num_points = num_pts;
pts.resize(num_pts*dim);
if (coords) {
for (size_t n = 0; n < num_points; n++)
for (int i = 0; i < dim; i++)
pts[n*dim + i] = coords[n*dim + i];
} else {
PetscReal h = 1./(num_points - 1);
for (size_t n = 0; n < num_points; n++)
for (int i = 0; i < dim; i++)
pts[n*dim + i] = i*h;
}
}
PetscPointCloud(const PetscPointCloud<T>& other)
{
size_t N = other.dimension * other.num_points;
this->dimension = other.dimension;
this->num_points = other.num_points;
this->pts.resize(N);
for (size_t i = 0; i < N; i++)
this->pts[i] = other.pts[i];
}
int getDimension() const
{
return dimension;
}
size_t getDataSetSize() const
{
return num_points;
}
T getDataPoint(size_t idx, int dim) const
{
assert(dim < dimension && idx < num_points);
return pts[idx*dimension + dim];
}
void Print(std::ostream& out = std::cout)
{
out << "Dimension: " << dimension << std::endl;
out << "NumPoints: " << num_points << std::endl;
for (size_t n = 0; n < num_points; n++) {
for (int d = 0; d < dimension; d++)
out << pts[n*dimension + d] << " ";
out << std::endl;
}
}
};
template<class T> class PetscFunctionGenerator
{
private:
MatH2OpusKernel k;
int dim;
void *ctx;
public:
PetscFunctionGenerator(MatH2OpusKernel k, int dim, void* ctx) { this->k = k; this->dim = dim; this->ctx = ctx; }
PetscFunctionGenerator(PetscFunctionGenerator& other) { this->k = other.k; this->dim = other.dim; this->ctx = other.ctx; }
T operator()(PetscReal *pt1, PetscReal *pt2)
{
return (T)((*this->k)(this->dim,pt1,pt2,this->ctx));
}
};
#include <../src/mat/impls/h2opus/math2opussampler.hpp>
/* just to not clutter the code */
#if !defined(H2OPUS_USE_GPU)
typedef HMatrix HMatrix_GPU;
#if defined(H2OPUS_USE_MPI)
typedef DistributedHMatrix DistributedHMatrix_GPU;
#endif
#endif
typedef struct {
#if defined(H2OPUS_USE_MPI)
distributedH2OpusHandle_t handle;
#else
h2opusHandle_t handle;
#endif
/* Sequential and parallel matrices are two different classes at the moment */
HMatrix *hmatrix;
#if defined(H2OPUS_USE_MPI)
DistributedHMatrix *dist_hmatrix;
#else
HMatrix *dist_hmatrix; /* just to not clutter the code */
#endif
/* May use permutations */
PetscSF sf;
PetscLayout h2opus_rmap, h2opus_cmap;
IS h2opus_indexmap;
thrust::host_vector<PetscScalar> *xx,*yy;
PetscInt xxs,yys;
PetscBool multsetup;
/* GPU */
HMatrix_GPU *hmatrix_gpu;
#if defined(H2OPUS_USE_MPI)
DistributedHMatrix_GPU *dist_hmatrix_gpu;
#else
HMatrix_GPU *dist_hmatrix_gpu; /* just to not clutter the code */
#endif
#if defined(PETSC_H2OPUS_USE_GPU)
thrust::device_vector<PetscScalar> *xx_gpu,*yy_gpu;
PetscInt xxs_gpu,yys_gpu;
#endif
/* construction from matvecs */
PetscMatrixSampler* sampler;
PetscBool nativemult;
/* Admissibility */
PetscReal eta;
PetscInt leafsize;
/* for dof reordering */
PetscPointCloud<PetscReal> *ptcloud;
/* kernel for generating matrix entries */
PetscFunctionGenerator<PetscScalar> *kernel;
/* basis orthogonalized? */
PetscBool orthogonal;
/* customization */
PetscInt basisord;
PetscInt max_rank;
PetscInt bs;
PetscReal rtol;
PetscInt norm_max_samples;
PetscBool check_construction;
PetscBool hara_verbose;
/* keeps track of MatScale values */
PetscScalar s;
} Mat_H2OPUS;
static PetscErrorCode MatDestroy_H2OPUS(Mat A)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
#if defined(H2OPUS_USE_MPI)
h2opusDestroyDistributedHandle(a->handle);
#else
h2opusDestroyHandle(a->handle);
#endif
delete a->dist_hmatrix;
delete a->hmatrix;
ierr = PetscSFDestroy(&a->sf);CHKERRQ(ierr);
ierr = PetscLayoutDestroy(&a->h2opus_rmap);CHKERRQ(ierr);
ierr = PetscLayoutDestroy(&a->h2opus_cmap);CHKERRQ(ierr);
ierr = ISDestroy(&a->h2opus_indexmap);CHKERRQ(ierr);
delete a->xx;
delete a->yy;
delete a->hmatrix_gpu;
delete a->dist_hmatrix_gpu;
#if defined(PETSC_H2OPUS_USE_GPU)
delete a->xx_gpu;
delete a->yy_gpu;
#endif
delete a->sampler;
delete a->ptcloud;
delete a->kernel;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_mpidense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_mpidensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,NULL);CHKERRQ(ierr);
ierr = PetscFree(A->data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatH2OpusSetNativeMult(Mat A, PetscBool nm)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscBool ish2opus;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidLogicalCollectiveBool(A,nm,2);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (ish2opus) {
if (a->h2opus_rmap) { /* need to swap layouts for vector creation */
if ((!a->nativemult && nm) || (a->nativemult && !nm)) {
PetscLayout t;
t = A->rmap;
A->rmap = a->h2opus_rmap;
a->h2opus_rmap = t;
t = A->cmap;
A->cmap = a->h2opus_cmap;
a->h2opus_cmap = t;
}
}
a->nativemult = nm;
}
PetscFunctionReturn(0);
}
PetscErrorCode MatH2OpusGetNativeMult(Mat A, PetscBool *nm)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscBool ish2opus;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(nm,2);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not for type %s",((PetscObject)A)->type_name);
*nm = a->nativemult;
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatNorm_H2OPUS(Mat A, NormType normtype, PetscReal* n)
{
PetscErrorCode ierr;
PetscBool ish2opus;
PetscInt nmax = PETSC_DECIDE;
Mat_H2OPUS *a = NULL;
PetscBool mult = PETSC_FALSE;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (ish2opus) { /* set userdefine number of samples and fastpath for mult (norms are order independent) */
a = (Mat_H2OPUS*)A->data;
nmax = a->norm_max_samples;
mult = a->nativemult;
ierr = MatH2OpusSetNativeMult(A,PETSC_TRUE);CHKERRQ(ierr);
} else {
ierr = PetscOptionsGetInt(((PetscObject)A)->options,((PetscObject)A)->prefix,"-mat_approximate_norm_samples",&nmax,NULL);CHKERRQ(ierr);
}
ierr = MatApproximateNorm_Private(A,normtype,nmax,n);CHKERRQ(ierr);
if (a) { ierr = MatH2OpusSetNativeMult(A,mult);CHKERRQ(ierr); }
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultNKernel_H2OPUS(Mat A, PetscBool transA, Mat B, Mat C)
{
Mat_H2OPUS *h2opus = (Mat_H2OPUS*)A->data;
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = h2opus->handle->handle;
#else
h2opusHandle_t handle = h2opus->handle;
#endif
PetscBool boundtocpu = PETSC_TRUE;
PetscScalar *xx,*yy,*uxx,*uyy;
PetscInt blda,clda;
PetscMPIInt size;
PetscSF bsf,csf;
PetscBool usesf = (PetscBool)(h2opus->sf && !h2opus->nativemult);
PetscErrorCode ierr;
PetscFunctionBegin;
HLibProfile::clear();
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
#endif
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
if (usesf) {
PetscInt n;
ierr = PetscSFGetGraph(h2opus->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
ierr = PetscObjectQuery((PetscObject)B,"_math2opus_vectorsf",(PetscObject*)&bsf);CHKERRQ(ierr);
if (!bsf) {
ierr = PetscSFGetVectorSF(h2opus->sf,B->cmap->N,blda,PETSC_DECIDE,&bsf);CHKERRQ(ierr);
ierr = PetscObjectCompose((PetscObject)B,"_math2opus_vectorsf",(PetscObject)bsf);CHKERRQ(ierr);
ierr = PetscObjectDereference((PetscObject)bsf);CHKERRQ(ierr);
}
ierr = PetscObjectQuery((PetscObject)C,"_math2opus_vectorsf",(PetscObject*)&csf);CHKERRQ(ierr);
if (!csf) {
ierr = PetscSFGetVectorSF(h2opus->sf,B->cmap->N,clda,PETSC_DECIDE,&csf);CHKERRQ(ierr);
ierr = PetscObjectCompose((PetscObject)C,"_math2opus_vectorsf",(PetscObject)csf);CHKERRQ(ierr);
ierr = PetscObjectDereference((PetscObject)csf);CHKERRQ(ierr);
}
blda = n;
clda = n;
}
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (boundtocpu) {
if (usesf) {
PetscInt n;
ierr = PetscSFGetGraph(h2opus->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
if (h2opus->xxs < B->cmap->n) { h2opus->xx->resize(n*B->cmap->N); h2opus->xxs = B->cmap->N; }
if (h2opus->yys < B->cmap->n) { h2opus->yy->resize(n*B->cmap->N); h2opus->yys = B->cmap->N; }
}
ierr = MatDenseGetArrayRead(B,(const PetscScalar**)&xx);CHKERRQ(ierr);
ierr = MatDenseGetArrayWrite(C,&yy);CHKERRQ(ierr);
if (usesf) {
uxx = MatH2OpusGetThrustPointer(*h2opus->xx);
uyy = MatH2OpusGetThrustPointer(*h2opus->yy);
ierr = PetscSFBcastBegin(bsf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(bsf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
} else {
uxx = xx;
uyy = yy;
}
if (size > 1) {
if (!h2opus->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
if (transA && !A->symmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMultTranspose not yet coded in parallel");
#if defined(H2OPUS_USE_MPI)
distributed_hgemv(/*transA ? H2Opus_Trans : H2Opus_NoTrans, */h2opus->s, *h2opus->dist_hmatrix, uxx, blda, 0.0, uyy, clda, B->cmap->N, h2opus->handle);
#endif
} else {
if (!h2opus->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
hgemv(transA ? H2Opus_Trans : H2Opus_NoTrans, h2opus->s, *h2opus->hmatrix, uxx, blda, 0.0, uyy, clda, B->cmap->N, handle);
}
ierr = MatDenseRestoreArrayRead(B,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (usesf) {
ierr = PetscSFReduceBegin(csf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(csf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
}
ierr = MatDenseRestoreArrayWrite(C,&yy);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
} else {
PetscBool ciscuda,biscuda;
if (usesf) {
PetscInt n;
ierr = PetscSFGetGraph(h2opus->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
if (h2opus->xxs_gpu < B->cmap->n) { h2opus->xx_gpu->resize(n*B->cmap->N); h2opus->xxs_gpu = B->cmap->N; }
if (h2opus->yys_gpu < B->cmap->n) { h2opus->yy_gpu->resize(n*B->cmap->N); h2opus->yys_gpu = B->cmap->N; }
}
/* If not of type seqdensecuda, convert on the fly (i.e. allocate GPU memory) */
ierr = PetscObjectTypeCompareAny((PetscObject)B,&biscuda,MATSEQDENSECUDA,MATMPIDENSECUDA,"");CHKERRQ(ierr);
if (!biscuda) {
ierr = MatConvert(B,MATDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
ierr = PetscObjectTypeCompareAny((PetscObject)C,&ciscuda,MATSEQDENSECUDA,MATMPIDENSECUDA,"");CHKERRQ(ierr);
if (!ciscuda) {
C->assembled = PETSC_TRUE;
ierr = MatConvert(C,MATDENSECUDA,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
ierr = MatDenseCUDAGetArrayRead(B,(const PetscScalar**)&xx);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayWrite(C,&yy);CHKERRQ(ierr);
if (usesf) {
uxx = MatH2OpusGetThrustPointer(*h2opus->xx_gpu);
uyy = MatH2OpusGetThrustPointer(*h2opus->yy_gpu);
ierr = PetscSFBcastBegin(bsf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(bsf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
} else {
uxx = xx;
uyy = yy;
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (size > 1) {
if (!h2opus->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed GPU matrix");
if (transA && !A->symmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMultTranspose not yet coded in parallel");
#if defined(H2OPUS_USE_MPI)
distributed_hgemv(/* transA ? H2Opus_Trans : H2Opus_NoTrans, */h2opus->s, *h2opus->dist_hmatrix_gpu, uxx, blda, 0.0, uyy, clda, B->cmap->N, h2opus->handle);
#endif
} else {
if (!h2opus->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
hgemv(transA ? H2Opus_Trans : H2Opus_NoTrans, h2opus->s, *h2opus->hmatrix_gpu, uxx, blda, 0.0, uyy, clda, B->cmap->N, handle);
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (usesf) {
ierr = PetscSFReduceBegin(csf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(csf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
}
ierr = MatDenseCUDARestoreArrayWrite(C,&yy);CHKERRQ(ierr);
if (!biscuda) {
ierr = MatConvert(B,MATDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
if (!ciscuda) {
ierr = MatConvert(C,MATDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
#endif
}
{ /* log flops */
double gops,time,perf,dev;
HLibProfile::getHgemvPerf(gops,time,perf,dev);
#if defined(PETSC_H2OPUS_USE_GPU)
if (boundtocpu) {
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(1e9*gops);CHKERRQ(ierr);
}
#else
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
#endif
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductNumeric_H2OPUS(Mat C)
{
Mat_Product *product = C->product;
PetscErrorCode ierr;
PetscFunctionBegin;
MatCheckProduct(C,1);
switch (product->type) {
case MATPRODUCT_AB:
ierr = MatMultNKernel_H2OPUS(product->A,PETSC_FALSE,product->B,C);CHKERRQ(ierr);
break;
case MATPRODUCT_AtB:
ierr = MatMultNKernel_H2OPUS(product->A,PETSC_TRUE,product->B,C);CHKERRQ(ierr);
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type %s is not supported",MatProductTypes[product->type]);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_H2OPUS(Mat C)
{
PetscErrorCode ierr;
Mat_Product *product = C->product;
PetscBool cisdense;
Mat A,B;
PetscFunctionBegin;
MatCheckProduct(C,1);
A = product->A;
B = product->B;
switch (product->type) {
case MATPRODUCT_AB:
ierr = MatSetSizes(C,A->rmap->n,B->cmap->n,A->rmap->N,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetBlockSizesFromMats(C,product->A,product->B);CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATSEQDENSE,MATMPIDENSE,MATSEQDENSECUDA,MATMPIDENSECUDA,"");CHKERRQ(ierr);
if (!cisdense) { ierr = MatSetType(C,((PetscObject)product->B)->type_name);CHKERRQ(ierr); }
ierr = MatSetUp(C);CHKERRQ(ierr);
break;
case MATPRODUCT_AtB:
ierr = MatSetSizes(C,A->cmap->n,B->cmap->n,A->cmap->N,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetBlockSizesFromMats(C,product->A,product->B);CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATSEQDENSE,MATMPIDENSE,MATSEQDENSECUDA,MATMPIDENSECUDA,"");CHKERRQ(ierr);
if (!cisdense) { ierr = MatSetType(C,((PetscObject)product->B)->type_name);CHKERRQ(ierr); }
ierr = MatSetUp(C);CHKERRQ(ierr);
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type %s is not supported",MatProductTypes[product->type]);
}
C->ops->productsymbolic = NULL;
C->ops->productnumeric = MatProductNumeric_H2OPUS;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSetFromOptions_H2OPUS(Mat C)
{
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->type == MATPRODUCT_AB || C->product->type == MATPRODUCT_AtB) {
C->ops->productsymbolic = MatProductSymbolic_H2OPUS;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultKernel_H2OPUS(Mat A, Vec x, PetscScalar sy, Vec y, PetscBool trans)
{
Mat_H2OPUS *h2opus = (Mat_H2OPUS*)A->data;
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = h2opus->handle->handle;
#else
h2opusHandle_t handle = h2opus->handle;
#endif
PetscBool boundtocpu = PETSC_TRUE;
PetscInt n;
PetscScalar *xx,*yy,*uxx,*uyy;
PetscMPIInt size;
PetscBool usesf = (PetscBool)(h2opus->sf && !h2opus->nativemult);
PetscErrorCode ierr;
PetscFunctionBegin;
HLibProfile::clear();
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
#endif
if (usesf) {
ierr = PetscSFGetGraph(h2opus->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
} else n = A->rmap->n;
if (boundtocpu) {
ierr = VecGetArrayRead(x,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (sy == 0.0) {
ierr = VecGetArrayWrite(y,&yy);CHKERRQ(ierr);
} else {
ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
}
if (usesf) {
uxx = MatH2OpusGetThrustPointer(*h2opus->xx);
uyy = MatH2OpusGetThrustPointer(*h2opus->yy);
ierr = PetscSFBcastBegin(h2opus->sf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(h2opus->sf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
if (sy != 0.0) {
ierr = PetscSFBcastBegin(h2opus->sf,MPIU_SCALAR,yy,uyy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(h2opus->sf,MPIU_SCALAR,yy,uyy,MPI_REPLACE);CHKERRQ(ierr);
}
} else {
uxx = xx;
uyy = yy;
}
if (size > 1) {
if (!h2opus->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
if (trans && !A->symmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMultTranspose not yet coded in parallel");
#if defined(H2OPUS_USE_MPI)
distributed_hgemv(/*trans ? H2Opus_Trans : H2Opus_NoTrans, */h2opus->s, *h2opus->dist_hmatrix, uxx, n, sy, uyy, n, 1, h2opus->handle);
#endif
} else {
if (!h2opus->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
hgemv(trans ? H2Opus_Trans : H2Opus_NoTrans, h2opus->s, *h2opus->hmatrix, uxx, n, sy, uyy, n, 1, handle);
}
ierr = VecRestoreArrayRead(x,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (usesf) {
ierr = PetscSFReduceBegin(h2opus->sf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(h2opus->sf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
}
if (sy == 0.0) {
ierr = VecRestoreArrayWrite(y,&yy);CHKERRQ(ierr);
} else {
ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
}
#if defined(PETSC_H2OPUS_USE_GPU)
} else {
ierr = VecCUDAGetArrayRead(x,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (sy == 0.0) {
ierr = VecCUDAGetArrayWrite(y,&yy);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArray(y,&yy);CHKERRQ(ierr);
}
if (usesf) {
uxx = MatH2OpusGetThrustPointer(*h2opus->xx_gpu);
uyy = MatH2OpusGetThrustPointer(*h2opus->yy_gpu);
ierr = PetscSFBcastBegin(h2opus->sf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(h2opus->sf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
if (sy != 0.0) {
ierr = PetscSFBcastBegin(h2opus->sf,MPIU_SCALAR,yy,uyy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(h2opus->sf,MPIU_SCALAR,yy,uyy,MPI_REPLACE);CHKERRQ(ierr);
}
} else {
uxx = xx;
uyy = yy;
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (size > 1) {
if (!h2opus->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed GPU matrix");
if (trans && !A->symmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMultTranspose not yet coded in parallel");
#if defined(H2OPUS_USE_MPI)
distributed_hgemv(/*trans ? H2Opus_Trans : H2Opus_NoTrans, */h2opus->s, *h2opus->dist_hmatrix_gpu, uxx, n, sy, uyy, n, 1, h2opus->handle);
#endif
} else {
if (!h2opus->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
hgemv(trans ? H2Opus_Trans : H2Opus_NoTrans, h2opus->s, *h2opus->hmatrix_gpu, uxx, n, sy, uyy, n, 1, handle);
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(x,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (usesf) {
ierr = PetscSFReduceBegin(h2opus->sf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(h2opus->sf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
}
if (sy == 0.0) {
ierr = VecCUDARestoreArrayWrite(y,&yy);CHKERRQ(ierr);
} else {
ierr = VecCUDARestoreArray(y,&yy);CHKERRQ(ierr);
}
#endif
}
{ /* log flops */
double gops,time,perf,dev;
HLibProfile::getHgemvPerf(gops,time,perf,dev);
#if defined(PETSC_H2OPUS_USE_GPU)
if (boundtocpu) {
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(1e9*gops);CHKERRQ(ierr);
}
#else
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
#endif
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTranspose_H2OPUS(Mat A, Vec x, Vec y)
{
PetscBool xiscuda,yiscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompareAny((PetscObject)x,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)y,&yiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = MatH2OpusUpdateIfNeeded(A,!xiscuda || !yiscuda);CHKERRQ(ierr);
ierr = MatMultKernel_H2OPUS(A,x,0.0,y,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMult_H2OPUS(Mat A, Vec x, Vec y)
{
PetscBool xiscuda,yiscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompareAny((PetscObject)x,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)y,&yiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = MatH2OpusUpdateIfNeeded(A,!xiscuda || !yiscuda);CHKERRQ(ierr);
ierr = MatMultKernel_H2OPUS(A,x,0.0,y,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTransposeAdd_H2OPUS(Mat A, Vec x, Vec y, Vec z)
{
PetscBool xiscuda,ziscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCopy(y,z);CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)x,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)z,&ziscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = MatH2OpusUpdateIfNeeded(A,!xiscuda || !ziscuda);CHKERRQ(ierr);
ierr = MatMultKernel_H2OPUS(A,x,1.0,z,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultAdd_H2OPUS(Mat A, Vec x, Vec y, Vec z)
{
PetscBool xiscuda,ziscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCopy(y,z);CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)x,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)z,&ziscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = MatH2OpusUpdateIfNeeded(A,!xiscuda || !ziscuda);CHKERRQ(ierr);
ierr = MatMultKernel_H2OPUS(A,x,1.0,z,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatScale_H2OPUS(Mat A, PetscScalar s)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscFunctionBegin;
a->s *= s;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetFromOptions_H2OPUS(PetscOptionItems *PetscOptionsObject,Mat A)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"H2OPUS options");CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_leafsize","Leaf size of cluster tree",NULL,a->leafsize,&a->leafsize,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal("-mat_h2opus_eta","Admissibility condition tolerance",NULL,a->eta,&a->eta,NULL);CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_order","Basis order for off-diagonal sampling when constructed from kernel",NULL,a->basisord,&a->basisord,NULL);CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_maxrank","Maximum rank when constructed from matvecs",NULL,a->max_rank,&a->max_rank,NULL);CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_samples","Maximum number of samples to be taken concurrently when constructing from matvecs",NULL,a->bs,&a->bs,NULL);CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_normsamples","Maximum bumber of samples to be when estimating norms",NULL,a->norm_max_samples,&a->norm_max_samples,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal("-mat_h2opus_rtol","Relative tolerance for construction from sampling",NULL,a->rtol,&a->rtol,NULL);CHKERRQ(ierr);
ierr = PetscOptionsBool("-mat_h2opus_check","Check error when constructing from sampling during MatAssemblyEnd()",NULL,a->check_construction,&a->check_construction,NULL);CHKERRQ(ierr);
ierr = PetscOptionsBool("-mat_h2opus_hara_verbose","Verbose output from hara construction",NULL,a->hara_verbose,&a->hara_verbose,NULL);CHKERRQ(ierr);
ierr = PetscOptionsTail();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatH2OpusSetCoords_H2OPUS(Mat,PetscInt,const PetscReal[],PetscBool,MatH2OpusKernel,void*);
static PetscErrorCode MatH2OpusInferCoordinates_Private(Mat A)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
Vec c;
PetscInt spacedim;
const PetscScalar *coords;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->ptcloud) PetscFunctionReturn(0);
ierr = PetscObjectQuery((PetscObject)A,"__math2opus_coords",(PetscObject*)&c);CHKERRQ(ierr);
if (!c && a->sampler) {
Mat S = a->sampler->GetSamplingMat();
ierr = PetscObjectQuery((PetscObject)S,"__math2opus_coords",(PetscObject*)&c);CHKERRQ(ierr);
}
if (!c) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Missing coordinates");
ierr = VecGetArrayRead(c,&coords);CHKERRQ(ierr);
ierr = VecGetBlockSize(c,&spacedim);CHKERRQ(ierr);
ierr = MatH2OpusSetCoords_H2OPUS(A,spacedim,coords,PETSC_FALSE,NULL,NULL);CHKERRQ(ierr);
ierr = VecRestoreArrayRead(c,&coords);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetUpMultiply_H2OPUS(Mat A)
{
MPI_Comm comm;
PetscMPIInt size;
PetscErrorCode ierr;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscInt n = 0,*idx = NULL;
int *iidx = NULL;
PetscCopyMode own;
PetscBool rid;
PetscFunctionBegin;
if (a->multsetup) PetscFunctionReturn(0);
if (a->sf) { /* MatDuplicate_H2OPUS takes reference to the SF */
ierr = PetscSFGetGraph(a->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
a->xx_gpu = new thrust::device_vector<PetscScalar>(n);
a->yy_gpu = new thrust::device_vector<PetscScalar>(n);
a->xxs_gpu = 1;
a->yys_gpu = 1;
#endif
a->xx = new thrust::host_vector<PetscScalar>(n);
a->yy = new thrust::host_vector<PetscScalar>(n);
a->xxs = 1;
a->yys = 1;
} else {
IS is;
ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
if (!a->h2opus_indexmap) {
if (size > 1) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
#if defined(H2OPUS_USE_MPI)
iidx = MatH2OpusGetThrustPointer(a->dist_hmatrix->basis_tree.basis_branch.index_map);
n = a->dist_hmatrix->basis_tree.basis_branch.index_map.size();
#endif
} else {
iidx = MatH2OpusGetThrustPointer(a->hmatrix->u_basis_tree.index_map);
n = a->hmatrix->u_basis_tree.index_map.size();
}
if (PetscDefined(USE_64BIT_INDICES)) {
PetscInt i;
own = PETSC_OWN_POINTER;
ierr = PetscMalloc1(n,&idx);CHKERRQ(ierr);
for (i=0;i<n;i++) idx[i] = iidx[i];
} else {
own = PETSC_COPY_VALUES;
idx = (PetscInt*)iidx;
}
ierr = ISCreateGeneral(comm,n,idx,own,&is);CHKERRQ(ierr);
ierr = ISSetPermutation(is);CHKERRQ(ierr);
ierr = ISViewFromOptions(is,(PetscObject)A,"-mat_h2opus_indexmap_view");CHKERRQ(ierr);
a->h2opus_indexmap = is;
}
ierr = ISGetLocalSize(a->h2opus_indexmap,&n);CHKERRQ(ierr);
ierr = ISGetIndices(a->h2opus_indexmap,(const PetscInt **)&idx);CHKERRQ(ierr);
rid = (PetscBool)(n == A->rmap->n);
ierr = MPIU_Allreduce(MPI_IN_PLACE,&rid,1,MPIU_BOOL,MPI_LAND,comm);CHKERRMPI(ierr);
if (rid) {
ierr = ISIdentity(a->h2opus_indexmap,&rid);CHKERRQ(ierr);
}
if (!rid) {
if (size > 1) { /* Parallel distribution may be different, save it here for fast path in MatMult (see MatH2OpusSetNativeMult) */
ierr = PetscLayoutCreate(comm,&a->h2opus_rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetLocalSize(a->h2opus_rmap,n);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(a->h2opus_rmap);CHKERRQ(ierr);
ierr = PetscLayoutReference(a->h2opus_rmap,&a->h2opus_cmap);CHKERRQ(ierr);
}
ierr = PetscSFCreate(comm,&a->sf);CHKERRQ(ierr);
ierr = PetscSFSetGraphLayout(a->sf,A->rmap,n,NULL,PETSC_OWN_POINTER,idx);CHKERRQ(ierr);
ierr = PetscSFViewFromOptions(a->sf,(PetscObject)A,"-mat_h2opus_sf_view");CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
a->xx_gpu = new thrust::device_vector<PetscScalar>(n);
a->yy_gpu = new thrust::device_vector<PetscScalar>(n);
a->xxs_gpu = 1;
a->yys_gpu = 1;
#endif
a->xx = new thrust::host_vector<PetscScalar>(n);
a->yy = new thrust::host_vector<PetscScalar>(n);
a->xxs = 1;
a->yys = 1;
}
ierr = ISRestoreIndices(a->h2opus_indexmap,(const PetscInt **)&idx);CHKERRQ(ierr);
}
a->multsetup = PETSC_TRUE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatAssemblyEnd_H2OPUS(Mat A, MatAssemblyType assemblytype)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = a->handle->handle;
#else
h2opusHandle_t handle = a->handle;
#endif
PetscBool kernel = PETSC_FALSE;
PetscBool boundtocpu = PETSC_TRUE;
PetscBool samplingdone = PETSC_FALSE;
MPI_Comm comm;
PetscMPIInt size;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Different row and column local sizes are not supported");
if (A->rmap->N != A->cmap->N) SETERRQ(comm,PETSC_ERR_SUP,"Rectangular matrices are not supported");
ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
/* TODO REUSABILITY of geometric construction */
delete a->hmatrix;
delete a->dist_hmatrix;
#if defined(PETSC_H2OPUS_USE_GPU)
delete a->hmatrix_gpu;
delete a->dist_hmatrix_gpu;
#endif
a->orthogonal = PETSC_FALSE;
/* TODO: other? */
H2OpusBoxCenterAdmissibility adm(a->eta);
ierr = PetscLogEventBegin(MAT_H2Opus_Build,A,0,0,0);CHKERRQ(ierr);
if (size > 1) {
#if defined(H2OPUS_USE_MPI)
a->dist_hmatrix = new DistributedHMatrix(A->rmap->n/*,A->symmetric*/);
#else
a->dist_hmatrix = NULL;
#endif
} else {
a->hmatrix = new HMatrix(A->rmap->n,A->symmetric);
}
ierr = MatH2OpusInferCoordinates_Private(A);CHKERRQ(ierr);
if (!a->ptcloud) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing pointcloud");
if (a->kernel) {
BoxEntryGen<PetscScalar, H2OPUS_HWTYPE_CPU, PetscFunctionGenerator<PetscScalar>> entry_gen(*a->kernel);
if (size > 1) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
#if defined(H2OPUS_USE_MPI)
buildDistributedHMatrix(*a->dist_hmatrix,a->ptcloud,adm,entry_gen,a->leafsize,a->basisord,a->handle);
#endif
} else {
buildHMatrix(*a->hmatrix,a->ptcloud,adm,entry_gen,a->leafsize,a->basisord);
}
kernel = PETSC_TRUE;
} else {
if (size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Construction from sampling not supported in parallel");
buildHMatrixStructure(*a->hmatrix,a->ptcloud,a->leafsize,adm);
}
ierr = MatSetUpMultiply_H2OPUS(A);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
if (!boundtocpu) {
if (size > 1) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
#if defined(H2OPUS_USE_MPI)
a->dist_hmatrix_gpu = new DistributedHMatrix_GPU(*a->dist_hmatrix);
#endif
} else {
a->hmatrix_gpu = new HMatrix_GPU(*a->hmatrix);
}
}
#endif
if (size == 1) {
if (!kernel && a->sampler && a->sampler->GetSamplingMat()) {
PetscReal Anorm;
bool verbose;
ierr = PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-mat_h2opus_hara_verbose",&a->hara_verbose,NULL);CHKERRQ(ierr);
verbose = a->hara_verbose;
ierr = MatApproximateNorm_Private(a->sampler->GetSamplingMat(),NORM_2,a->norm_max_samples,&Anorm);CHKERRQ(ierr);
if (a->hara_verbose) { ierr = PetscPrintf(PETSC_COMM_SELF,"Sampling uses max rank %d, tol %g (%g*%g), %s samples %d\n",a->max_rank,a->rtol*Anorm,a->rtol,Anorm,boundtocpu ? "CPU" : "GPU",a->bs);CHKERRQ(ierr); }
if (a->sf && !a->nativemult) {
a->sampler->SetIndexMap(a->hmatrix->u_basis_tree.index_map.size(),a->hmatrix->u_basis_tree.index_map.data());
}
a->sampler->SetStream(handle->getMainStream());
if (boundtocpu) {
a->sampler->SetGPUSampling(false);
hara(a->sampler, *a->hmatrix, a->max_rank, 10 /* TODO */,a->rtol*Anorm,a->bs,handle,verbose);
#if defined(PETSC_H2OPUS_USE_GPU)
} else {
a->sampler->SetGPUSampling(true);
hara(a->sampler, *a->hmatrix_gpu, a->max_rank, 10 /* TODO */,a->rtol*Anorm,a->bs,handle,verbose);
#endif
}
samplingdone = PETSC_TRUE;
}
}
#if defined(PETSC_H2OPUS_USE_GPU)
if (!boundtocpu) {
delete a->hmatrix;
delete a->dist_hmatrix;
a->hmatrix = NULL;
a->dist_hmatrix = NULL;
}
A->offloadmask = boundtocpu ? PETSC_OFFLOAD_CPU : PETSC_OFFLOAD_GPU;
#endif
ierr = PetscLogEventEnd(MAT_H2Opus_Build,A,0,0,0);CHKERRQ(ierr);
if (!a->s) a->s = 1.0;
A->assembled = PETSC_TRUE;
if (samplingdone) {
PetscBool check = a->check_construction;
PetscBool checke = PETSC_FALSE;
ierr = PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-mat_h2opus_check",&check,NULL);CHKERRQ(ierr);
ierr = PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-mat_h2opus_check_explicit",&checke,NULL);CHKERRQ(ierr);
if (check) {
Mat E,Ae;
PetscReal n1,ni,n2;
PetscReal n1A,niA,n2A;
void (*normfunc)(void);
Ae = a->sampler->GetSamplingMat();
ierr = MatConvert(A,MATSHELL,MAT_INITIAL_MATRIX,&E);CHKERRQ(ierr);
ierr = MatShellSetOperation(E,MATOP_NORM,(void (*)(void))MatNorm_H2OPUS);CHKERRQ(ierr);
ierr = MatAXPY(E,-1.0,Ae,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
ierr = MatNorm(E,NORM_1,&n1);CHKERRQ(ierr);
ierr = MatNorm(E,NORM_INFINITY,&ni);CHKERRQ(ierr);
ierr = MatNorm(E,NORM_2,&n2);CHKERRQ(ierr);
if (checke) {
Mat eA,eE,eAe;
ierr = MatComputeOperator(A,MATAIJ,&eA);CHKERRQ(ierr);
ierr = MatComputeOperator(E,MATAIJ,&eE);CHKERRQ(ierr);
ierr = MatComputeOperator(Ae,MATAIJ,&eAe);CHKERRQ(ierr);
ierr = MatChop(eA,PETSC_SMALL);CHKERRQ(ierr);
ierr = MatChop(eE,PETSC_SMALL);CHKERRQ(ierr);
ierr = MatChop(eAe,PETSC_SMALL);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject)eA,"H2Mat");CHKERRQ(ierr);
ierr = MatView(eA,NULL);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject)eAe,"S");CHKERRQ(ierr);
ierr = MatView(eAe,NULL);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject)eE,"H2Mat - S");CHKERRQ(ierr);
ierr = MatView(eE,NULL);CHKERRQ(ierr);
ierr = MatDestroy(&eA);CHKERRQ(ierr);
ierr = MatDestroy(&eE);CHKERRQ(ierr);
ierr = MatDestroy(&eAe);CHKERRQ(ierr);
}
ierr = MatGetOperation(Ae,MATOP_NORM,&normfunc);CHKERRQ(ierr);
ierr = MatSetOperation(Ae,MATOP_NORM,(void (*)(void))MatNorm_H2OPUS);CHKERRQ(ierr);
ierr = MatNorm(Ae,NORM_1,&n1A);CHKERRQ(ierr);
ierr = MatNorm(Ae,NORM_INFINITY,&niA);CHKERRQ(ierr);
ierr = MatNorm(Ae,NORM_2,&n2A);CHKERRQ(ierr);
n1A = PetscMax(n1A,PETSC_SMALL);
n2A = PetscMax(n2A,PETSC_SMALL);
niA = PetscMax(niA,PETSC_SMALL);
ierr = MatSetOperation(Ae,MATOP_NORM,normfunc);CHKERRQ(ierr);
ierr = PetscPrintf(PetscObjectComm((PetscObject)A),"MATH2OPUS construction errors: NORM_1 %g, NORM_INFINITY %g, NORM_2 %g (%g %g %g)\n",(double)n1,(double)ni,(double)n2,(double)(n1/n1A),(double)(ni/niA),(double)(n2/n2A));
ierr = MatDestroy(&E);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatZeroEntries_H2OPUS(Mat A)
{
PetscErrorCode ierr;
PetscMPIInt size;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (size > 1) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not yet supported");
else {
a->hmatrix->clearData();
#if defined(PETSC_H2OPUS_USE_GPU)
if (a->hmatrix_gpu) a->hmatrix_gpu->clearData();
#endif
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatDuplicate_H2OPUS(Mat B, MatDuplicateOption op, Mat *nA)
{
Mat A;
Mat_H2OPUS *a, *b = (Mat_H2OPUS*)B->data;
#if defined(PETSC_H2OPUS_USE_GPU)
PetscBool iscpu = PETSC_FALSE;
#else
PetscBool iscpu = PETSC_TRUE;
#endif
PetscErrorCode ierr;
MPI_Comm comm;
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)B,&comm);CHKERRQ(ierr);
ierr = MatCreate(comm,&A);CHKERRQ(ierr);
ierr = MatSetSizes(A,B->rmap->n,B->cmap->n,B->rmap->N,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetType(A,MATH2OPUS);CHKERRQ(ierr);
ierr = MatPropagateSymmetryOptions(B,A);CHKERRQ(ierr);
a = (Mat_H2OPUS*)A->data;
a->eta = b->eta;
a->leafsize = b->leafsize;
a->basisord = b->basisord;
a->max_rank = b->max_rank;
a->bs = b->bs;
a->rtol = b->rtol;
a->norm_max_samples = b->norm_max_samples;
if (op == MAT_COPY_VALUES) a->s = b->s;
a->ptcloud = new PetscPointCloud<PetscReal>(*b->ptcloud);
if (op == MAT_COPY_VALUES && b->kernel) a->kernel = new PetscFunctionGenerator<PetscScalar>(*b->kernel);
#if defined(H2OPUS_USE_MPI)
if (b->dist_hmatrix) { a->dist_hmatrix = new DistributedHMatrix(*b->dist_hmatrix); }
#if defined(PETSC_H2OPUS_USE_GPU)
if (b->dist_hmatrix_gpu) { a->dist_hmatrix_gpu = new DistributedHMatrix_GPU(*b->dist_hmatrix_gpu); }
#endif
#endif
if (b->hmatrix) {
a->hmatrix = new HMatrix(*b->hmatrix);
if (op == MAT_DO_NOT_COPY_VALUES) a->hmatrix->clearData();
}
#if defined(PETSC_H2OPUS_USE_GPU)
if (b->hmatrix_gpu) {
a->hmatrix_gpu = new HMatrix_GPU(*b->hmatrix_gpu);
if (op == MAT_DO_NOT_COPY_VALUES) a->hmatrix_gpu->clearData();
}
#endif
if (b->sf) {
ierr = PetscObjectReference((PetscObject)b->sf);CHKERRQ(ierr);
a->sf = b->sf;
}
if (b->h2opus_indexmap) {
ierr = PetscObjectReference((PetscObject)b->h2opus_indexmap);CHKERRQ(ierr);
a->h2opus_indexmap = b->h2opus_indexmap;
}
ierr = MatSetUp(A);CHKERRQ(ierr);
ierr = MatSetUpMultiply_H2OPUS(A);CHKERRQ(ierr);
if (op == MAT_COPY_VALUES) {
A->assembled = PETSC_TRUE;
a->orthogonal = b->orthogonal;
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = B->offloadmask;
#endif
}
#if defined(PETSC_H2OPUS_USE_GPU)
iscpu = B->boundtocpu;
#endif
ierr = MatBindToCPU(A,iscpu);CHKERRQ(ierr);
*nA = A;
PetscFunctionReturn(0);
}
static PetscErrorCode MatView_H2OPUS(Mat A, PetscViewer view)
{
Mat_H2OPUS *h2opus = (Mat_H2OPUS*)A->data;
PetscBool isascii;
PetscErrorCode ierr;
PetscMPIInt size;
PetscViewerFormat format;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)view,PETSCVIEWERASCII,&isascii);CHKERRQ(ierr);
ierr = PetscViewerGetFormat(view,&format);CHKERRQ(ierr);
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (isascii) {
if (format == PETSC_VIEWER_ASCII_MATLAB) {
if (size == 1) {
FILE *fp;
ierr = PetscViewerASCIIGetPointer(view,&fp);CHKERRQ(ierr);
dumpHMatrix(*h2opus->hmatrix,6,fp);
}
} else {
ierr = PetscViewerASCIIPrintf(view," H-Matrix constructed from %s\n",h2opus->kernel ? "Kernel" : "Mat");CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(view," PointCloud dim %D\n",h2opus->ptcloud ? h2opus->ptcloud->getDimension() : 0);CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(view," Admissibility parameters: leaf size %D, eta %g\n",h2opus->leafsize,(double)h2opus->eta);CHKERRQ(ierr);
if (!h2opus->kernel) {
ierr = PetscViewerASCIIPrintf(view," Sampling parameters: max_rank %D, samples %D, tolerance %g\n",h2opus->max_rank,h2opus->bs,(double)h2opus->rtol);CHKERRQ(ierr);
} else {
ierr = PetscViewerASCIIPrintf(view," Offdiagonal blocks approximation order %D\n",h2opus->basisord);CHKERRQ(ierr);
}
ierr = PetscViewerASCIIPrintf(view," Number of samples for norms %D\n",h2opus->norm_max_samples);CHKERRQ(ierr);
if (size == 1) {
double dense_mem_cpu = h2opus->hmatrix ? h2opus->hmatrix->getDenseMemoryUsage() : 0;
double low_rank_cpu = h2opus->hmatrix ? h2opus->hmatrix->getLowRankMemoryUsage() : 0;
#if defined(PETSC_HAVE_CUDA)
double dense_mem_gpu = h2opus->hmatrix_gpu ? h2opus->hmatrix_gpu->getDenseMemoryUsage() : 0;
double low_rank_gpu = h2opus->hmatrix_gpu ? h2opus->hmatrix_gpu->getLowRankMemoryUsage() : 0;
#endif
ierr = PetscViewerASCIIPrintf(view," Memory consumption GB (CPU): %g (dense) %g (low rank) %g (total)\n", dense_mem_cpu, low_rank_cpu, low_rank_cpu + dense_mem_cpu);CHKERRQ(ierr);
#if defined(PETSC_HAVE_CUDA)
ierr = PetscViewerASCIIPrintf(view," Memory consumption GB (GPU): %g (dense) %g (low rank) %g (total)\n", dense_mem_gpu, low_rank_gpu, low_rank_gpu + dense_mem_gpu);CHKERRQ(ierr);
#endif
} else {
#if defined(PETSC_HAVE_CUDA)
double matrix_mem[4] = {0.,0.,0.,0.};
PetscMPIInt rsize = 4;
#else
double matrix_mem[2] = {0.,0.};
PetscMPIInt rsize = 2;
#endif
#if defined(H2OPUS_USE_MPI)
matrix_mem[0] = h2opus->dist_hmatrix ? h2opus->dist_hmatrix->getLocalDenseMemoryUsage() : 0;
matrix_mem[1] = h2opus->dist_hmatrix ? h2opus->dist_hmatrix->getLocalLowRankMemoryUsage() : 0;
#if defined(PETSC_HAVE_CUDA)
matrix_mem[2] = h2opus->dist_hmatrix_gpu ? h2opus->dist_hmatrix_gpu->getLocalDenseMemoryUsage() : 0;
matrix_mem[3] = h2opus->dist_hmatrix_gpu ? h2opus->dist_hmatrix_gpu->getLocalLowRankMemoryUsage() : 0;
#endif
#endif
ierr = MPIU_Allreduce(MPI_IN_PLACE,matrix_mem,rsize,MPI_DOUBLE_PRECISION,MPI_SUM,PetscObjectComm((PetscObject)A));CHKERRMPI(ierr);
ierr = PetscViewerASCIIPrintf(view," Memory consumption GB (CPU): %g (dense) %g (low rank) %g (total)\n", matrix_mem[0], matrix_mem[1], matrix_mem[0] + matrix_mem[1]);CHKERRQ(ierr);
#if defined(PETSC_HAVE_CUDA)
ierr = PetscViewerASCIIPrintf(view," Memory consumption GB (GPU): %g (dense) %g (low rank) %g (total)\n", matrix_mem[2], matrix_mem[3], matrix_mem[2] + matrix_mem[3]);CHKERRQ(ierr);
#endif
}
}
}
#if 0
if (size == 1) {
char filename[256];
const char *name;
ierr = PetscObjectGetName((PetscObject)A,&name);CHKERRQ(ierr);
ierr = PetscSNPrintf(filename,sizeof(filename),"%s_structure.eps",name);CHKERRQ(ierr);
outputEps(*h2opus->hmatrix,filename);
}
#endif
PetscFunctionReturn(0);
}
static PetscErrorCode MatH2OpusSetCoords_H2OPUS(Mat A, PetscInt spacedim, const PetscReal coords[], PetscBool cdist, MatH2OpusKernel kernel, void *kernelctx)
{
Mat_H2OPUS *h2opus = (Mat_H2OPUS*)A->data;
PetscReal *gcoords;
PetscInt N;
MPI_Comm comm;
PetscMPIInt size;
PetscBool cong;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
ierr = MatHasCongruentLayouts(A,&cong);CHKERRQ(ierr);
if (!cong) SETERRQ(comm,PETSC_ERR_SUP,"Only for square matrices with congruent layouts");
N = A->rmap->N;
ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
if (size > 1 && cdist) {
PetscSF sf;
MPI_Datatype dtype;
ierr = MPI_Type_contiguous(spacedim,MPIU_REAL,&dtype);CHKERRMPI(ierr);
ierr = MPI_Type_commit(&dtype);CHKERRMPI(ierr);
ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr);
ierr = PetscSFSetGraphWithPattern(sf,A->rmap,PETSCSF_PATTERN_ALLGATHER);CHKERRQ(ierr);
ierr = PetscMalloc1(spacedim*N,&gcoords);CHKERRQ(ierr);
ierr = PetscSFBcastBegin(sf,dtype,coords,gcoords,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(sf,dtype,coords,gcoords,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
ierr = MPI_Type_free(&dtype);CHKERRMPI(ierr);
} else gcoords = (PetscReal*)coords;
delete h2opus->ptcloud;
delete h2opus->kernel;
h2opus->ptcloud = new PetscPointCloud<PetscReal>(spacedim,N,gcoords);
if (kernel) h2opus->kernel = new PetscFunctionGenerator<PetscScalar>(kernel,spacedim,kernelctx);
if (gcoords != coords) { ierr = PetscFree(gcoords);CHKERRQ(ierr); }
A->preallocated = PETSC_TRUE;
PetscFunctionReturn(0);
}
#if defined(PETSC_H2OPUS_USE_GPU)
static PetscErrorCode MatBindToCPU_H2OPUS(Mat A, PetscBool flg)
{
PetscMPIInt size;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (flg && A->offloadmask == PETSC_OFFLOAD_GPU) {
if (size > 1) {
if (!a->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
#if defined(H2OPUS_USE_MPI)
if (!a->dist_hmatrix) a->dist_hmatrix = new DistributedHMatrix(*a->dist_hmatrix_gpu);
else *a->dist_hmatrix = *a->dist_hmatrix_gpu;
#endif
} else {
if (!a->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
if (!a->hmatrix) a->hmatrix = new HMatrix(*a->hmatrix_gpu);
else *a->hmatrix = *a->hmatrix_gpu;
}
delete a->hmatrix_gpu;
delete a->dist_hmatrix_gpu;
a->hmatrix_gpu = NULL;
a->dist_hmatrix_gpu = NULL;
A->offloadmask = PETSC_OFFLOAD_CPU;
} else if (!flg && A->offloadmask == PETSC_OFFLOAD_CPU) {
if (size > 1) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
#if defined(H2OPUS_USE_MPI)
if (!a->dist_hmatrix_gpu) a->dist_hmatrix_gpu = new DistributedHMatrix_GPU(*a->dist_hmatrix);
else *a->dist_hmatrix_gpu = *a->dist_hmatrix;
#endif
} else {
if (!a->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
if (!a->hmatrix_gpu) a->hmatrix_gpu = new HMatrix_GPU(*a->hmatrix);
else *a->hmatrix_gpu = *a->hmatrix;
}
delete a->hmatrix;
delete a->dist_hmatrix;
a->hmatrix = NULL;
a->dist_hmatrix = NULL;
A->offloadmask = PETSC_OFFLOAD_GPU;
}
ierr = PetscFree(A->defaultvectype);CHKERRQ(ierr);
if (!flg) {
ierr = PetscStrallocpy(VECCUDA,&A->defaultvectype);CHKERRQ(ierr);
} else {
ierr = PetscStrallocpy(VECSTANDARD,&A->defaultvectype);CHKERRQ(ierr);
}
A->boundtocpu = flg;
PetscFunctionReturn(0);
}
#endif
/*MC
MATH2OPUS = "h2opus" - A matrix type for hierarchical matrices using the H2Opus package.
Options Database Keys:
. -mat_type h2opus - matrix type to "h2opus" during a call to MatSetFromOptions()
Notes:
H2Opus implements hierarchical matrices in the H^2 flavour.
It supports CPU or NVIDIA GPUs.
For CPU only builds, use ./configure --download-h2opus --download-thrust to install PETSc to use H2Opus.
In order to run on NVIDIA GPUs, use ./configure --download-h2opus --download-magma --download-kblas.
For details and additional references, see
"H2Opus: A distributed-memory multi-GPU software package for non-local operators",
available at https://arxiv.org/abs/2109.05451.
Level: beginner
.seealso: MATHTOOL, MATDENSE, MatCreateH2OpusFromKernel(), MatCreateH2OpusFromMat()
M*/
PETSC_EXTERN PetscErrorCode MatCreate_H2OPUS(Mat A)
{
Mat_H2OPUS *a;
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
#if defined(PETSC_H2OPUS_USE_GPU)
ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr);
#endif
ierr = PetscNewLog(A,&a);CHKERRQ(ierr);
A->data = (void*)a;
a->eta = 0.9;
a->leafsize = 32;
a->basisord = 4;
a->max_rank = 64;
a->bs = 32;
a->rtol = 1.e-4;
a->s = 1.0;
a->norm_max_samples = 10;
#if defined(H2OPUS_USE_MPI)
h2opusCreateDistributedHandleComm(&a->handle,PetscObjectComm((PetscObject)A));
#else
h2opusCreateHandle(&a->handle);
#endif
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,MATH2OPUS);CHKERRQ(ierr);
ierr = PetscMemzero(A->ops,sizeof(struct _MatOps));CHKERRQ(ierr);
A->ops->destroy = MatDestroy_H2OPUS;
A->ops->view = MatView_H2OPUS;
A->ops->assemblyend = MatAssemblyEnd_H2OPUS;
A->ops->mult = MatMult_H2OPUS;
A->ops->multtranspose = MatMultTranspose_H2OPUS;
A->ops->multadd = MatMultAdd_H2OPUS;
A->ops->multtransposeadd = MatMultTransposeAdd_H2OPUS;
A->ops->scale = MatScale_H2OPUS;
A->ops->duplicate = MatDuplicate_H2OPUS;
A->ops->setfromoptions = MatSetFromOptions_H2OPUS;
A->ops->norm = MatNorm_H2OPUS;
A->ops->zeroentries = MatZeroEntries_H2OPUS;
#if defined(PETSC_H2OPUS_USE_GPU)
A->ops->bindtocpu = MatBindToCPU_H2OPUS;
#endif
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_seqdense_C",MatProductSetFromOptions_H2OPUS);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_seqdensecuda_C",MatProductSetFromOptions_H2OPUS);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_mpidense_C",MatProductSetFromOptions_H2OPUS);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_mpidensecuda_C",MatProductSetFromOptions_H2OPUS);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
ierr = PetscFree(A->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&A->defaultvectype);CHKERRQ(ierr);
#endif
PetscFunctionReturn(0);
}
/*@C
MatH2OpusOrthogonalize - Orthogonalize the basis tree of a hierarchical matrix.
Input Parameter:
. A - the matrix
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel(), MatH2OpusCompress()
*/
PetscErrorCode MatH2OpusOrthogonalize(Mat A)
{
PetscErrorCode ierr;
PetscBool ish2opus;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscMPIInt size;
PetscBool boundtocpu = PETSC_TRUE;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) PetscFunctionReturn(0);
if (a->orthogonal) PetscFunctionReturn(0);
HLibProfile::clear();
ierr = PetscLogEventBegin(MAT_H2Opus_Orthog,A,0,0,0);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
#endif
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (size > 1) {
if (boundtocpu) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
#if defined(H2OPUS_USE_MPI)
distributed_horthog(*a->dist_hmatrix, a->handle);
#endif
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = PETSC_OFFLOAD_CPU;
} else {
if (!a->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if defined(H2OPUS_USE_MPI)
distributed_horthog(*a->dist_hmatrix_gpu, a->handle);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
}
} else {
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = a->handle->handle;
#else
h2opusHandle_t handle = a->handle;
#endif
if (boundtocpu) {
if (!a->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
horthog(*a->hmatrix, handle);
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = PETSC_OFFLOAD_CPU;
} else {
if (!a->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
horthog(*a->hmatrix_gpu, handle);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
}
}
a->orthogonal = PETSC_TRUE;
{ /* log flops */
double gops,time,perf,dev;
HLibProfile::getHorthogPerf(gops,time,perf,dev);
#if defined(PETSC_H2OPUS_USE_GPU)
if (boundtocpu) {
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(1e9*gops);CHKERRQ(ierr);
}
#else
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
#endif
}
ierr = PetscLogEventEnd(MAT_H2Opus_Orthog,A,0,0,0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
MatH2OpusCompress - Compress a hierarchical matrix.
Input Parameters:
+ A - the matrix
- tol - the absolute truncation threshold
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel(), MatH2OpusOrthogonalize()
*/
PetscErrorCode MatH2OpusCompress(Mat A, PetscReal tol)
{
PetscErrorCode ierr;
PetscBool ish2opus;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscMPIInt size;
PetscBool boundtocpu = PETSC_TRUE;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) PetscFunctionReturn(0);
ierr = MatH2OpusOrthogonalize(A);CHKERRQ(ierr);
HLibProfile::clear();
ierr = PetscLogEventBegin(MAT_H2Opus_Compress,A,0,0,0);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
#endif
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (size > 1) {
if (boundtocpu) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
#if defined(H2OPUS_USE_MPI)
distributed_hcompress(*a->dist_hmatrix, tol, a->handle);
#endif
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = PETSC_OFFLOAD_CPU;
} else {
if (!a->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if defined(H2OPUS_USE_MPI)
distributed_hcompress(*a->dist_hmatrix_gpu, tol, a->handle);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
}
} else {
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = a->handle->handle;
#else
h2opusHandle_t handle = a->handle;
#endif
if (boundtocpu) {
if (!a->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
hcompress(*a->hmatrix, tol, handle);
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = PETSC_OFFLOAD_CPU;
} else {
if (!a->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
hcompress(*a->hmatrix_gpu, tol, handle);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
}
}
{ /* log flops */
double gops,time,perf,dev;
HLibProfile::getHcompressPerf(gops,time,perf,dev);
#if defined(PETSC_H2OPUS_USE_GPU)
if (boundtocpu) {
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(1e9*gops);CHKERRQ(ierr);
}
#else
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
#endif
}
ierr = PetscLogEventEnd(MAT_H2Opus_Compress,A,0,0,0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
MatH2OpusSetSamplingMat - Set a matrix to be sampled from matrix vector product to construct a hierarchical matrix.
Input Parameters:
+ A - the hierarchical matrix
. B - the matrix to be sampled
. bs - maximum number of samples to be taken concurrently
- tol - relative tolerance for construction
Notes: Need to call MatAssemblyBegin/End() to update the hierarchical matrix.
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel(), MatH2OpusCompress(), MatH2OpusOrthogonalize()
*/
PetscErrorCode MatH2OpusSetSamplingMat(Mat A, Mat B, PetscInt bs, PetscReal tol)
{
PetscBool ish2opus;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
if (B) PetscValidHeaderSpecific(B,MAT_CLASSID,2);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (ish2opus) {
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
if (!a->sampler) a->sampler = new PetscMatrixSampler();
a->sampler->SetSamplingMat(B);
if (bs > 0) a->bs = bs;
if (tol > 0.) a->rtol = tol;
delete a->kernel;
}
PetscFunctionReturn(0);
}
/*@C
MatCreateH2OpusFromKernel - Creates a MATH2OPUS from a user-supplied kernel.
Input Parameters:
+ comm - MPI communicator
. m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
. n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
. M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
. N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
. spacedim - dimension of the space coordinates
. coords - coordinates of the points
. cdist - whether or not coordinates are distributed
. kernel - computational kernel (or NULL)
. kernelctx - kernel context
. eta - admissibility condition tolerance
. leafsize - leaf size in cluster tree
- basisord - approximation order for Chebychev interpolation of low-rank blocks
Output Parameter:
. nA - matrix
Options Database Keys:
+ -mat_h2opus_leafsize <PetscInt>
. -mat_h2opus_eta <PetscReal>
. -mat_h2opus_order <PetscInt> - Chebychev approximation order
- -mat_h2opus_normsamples <PetscInt> - Maximum bumber of samples to be when estimating norms
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat()
@*/
PetscErrorCode MatCreateH2OpusFromKernel(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt spacedim, const PetscReal coords[], PetscBool cdist, MatH2OpusKernel kernel, void *kernelctx, PetscReal eta, PetscInt leafsize, PetscInt basisord, Mat* nA)
{
Mat A;
Mat_H2OPUS *h2opus;
#if defined(PETSC_H2OPUS_USE_GPU)
PetscBool iscpu = PETSC_FALSE;
#else
PetscBool iscpu = PETSC_TRUE;
#endif
PetscErrorCode ierr;
PetscFunctionBegin;
if (m != n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Different row and column local sizes are not supported");
ierr = MatCreate(comm,&A);CHKERRQ(ierr);
ierr = MatSetSizes(A,m,n,M,N);CHKERRQ(ierr);
if (M != N) SETERRQ(comm,PETSC_ERR_SUP,"Rectangular matrices are not supported");
ierr = MatSetType(A,MATH2OPUS);CHKERRQ(ierr);
ierr = MatBindToCPU(A,iscpu);CHKERRQ(ierr);
ierr = MatH2OpusSetCoords_H2OPUS(A,spacedim,coords,cdist,kernel,kernelctx);CHKERRQ(ierr);
h2opus = (Mat_H2OPUS*)A->data;
if (eta > 0.) h2opus->eta = eta;
if (leafsize > 0) h2opus->leafsize = leafsize;
if (basisord > 0) h2opus->basisord = basisord;
*nA = A;
PetscFunctionReturn(0);
}
/*@C
MatCreateH2OpusFromMat - Creates a MATH2OPUS sampling from a user-supplied operator.
Input Parameters:
+ B - the matrix to be sampled
. spacedim - dimension of the space coordinates
. coords - coordinates of the points
. cdist - whether or not coordinates are distributed
. eta - admissibility condition tolerance
. leafsize - leaf size in cluster tree
. maxrank - maximum rank allowed
. bs - maximum number of samples to be taken concurrently
- rtol - relative tolerance for construction
Output Parameter:
. nA - matrix
Options Database Keys:
+ -mat_h2opus_leafsize <PetscInt>
. -mat_h2opus_eta <PetscReal>
. -mat_h2opus_maxrank <PetscInt>
. -mat_h2opus_samples <PetscInt>
. -mat_h2opus_rtol <PetscReal>
. -mat_h2opus_check <PetscBool> - Check error when constructing from sampling during MatAssemblyEnd()
. -mat_h2opus_hara_verbose <PetscBool> - Verbose output from hara construction
- -mat_h2opus_normsamples <PetscInt> - Maximum bumber of samples to be when estimating norms
Notes: not available in parallel
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromKernel()
@*/
PetscErrorCode MatCreateH2OpusFromMat(Mat B, PetscInt spacedim, const PetscReal coords[], PetscBool cdist, PetscReal eta, PetscInt leafsize, PetscInt maxrank, PetscInt bs, PetscReal rtol, Mat *nA)
{
Mat A;
Mat_H2OPUS *h2opus;
MPI_Comm comm;
PetscBool boundtocpu = PETSC_TRUE;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(B,MAT_CLASSID,1);
PetscValidLogicalCollectiveInt(B,spacedim,2);
PetscValidLogicalCollectiveReal(B,eta,4);
PetscValidLogicalCollectiveInt(B,leafsize,5);
PetscValidLogicalCollectiveInt(B,maxrank,6);
PetscValidLogicalCollectiveInt(B,bs,7);
PetscValidLogicalCollectiveReal(B,rtol,8);
PetscValidPointer(nA,9);
ierr = PetscObjectGetComm((PetscObject)B,&comm);CHKERRQ(ierr);
if (B->rmap->n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Different row and column local sizes are not supported");
if (B->rmap->N != B->cmap->N) SETERRQ(comm,PETSC_ERR_SUP,"Rectangular matrices are not supported");
ierr = MatCreate(comm,&A);CHKERRQ(ierr);
ierr = MatSetSizes(A,B->rmap->n,B->cmap->n,B->rmap->N,B->cmap->N);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
{
PetscBool iscuda;
VecType vtype;
ierr = MatGetVecType(B,&vtype);CHKERRQ(ierr);
ierr = PetscStrcmp(vtype,VECCUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = PetscStrcmp(vtype,VECSEQCUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = PetscStrcmp(vtype,VECMPICUDA,&iscuda);CHKERRQ(ierr);
}
}
if (iscuda && !B->boundtocpu) boundtocpu = PETSC_FALSE;
}
#endif
ierr = MatSetType(A,MATH2OPUS);CHKERRQ(ierr);
ierr = MatBindToCPU(A,boundtocpu);CHKERRQ(ierr);
if (spacedim) {
ierr = MatH2OpusSetCoords_H2OPUS(A,spacedim,coords,cdist,NULL,NULL);CHKERRQ(ierr);
}
ierr = MatPropagateSymmetryOptions(B,A);CHKERRQ(ierr);
/* if (!A->symmetric) SETERRQ(comm,PETSC_ERR_SUP,"Unsymmetric sampling does not work"); */
h2opus = (Mat_H2OPUS*)A->data;
h2opus->sampler = new PetscMatrixSampler(B);
if (eta > 0.) h2opus->eta = eta;
if (leafsize > 0) h2opus->leafsize = leafsize;
if (maxrank > 0) h2opus->max_rank = maxrank;
if (bs > 0) h2opus->bs = bs;
if (rtol > 0.) h2opus->rtol = rtol;
*nA = A;
A->preallocated = PETSC_TRUE;
PetscFunctionReturn(0);
}
/*@C
MatH2OpusGetIndexMap - Access reordering index set.
Input Parameters:
. A - the matrix
Output Parameter:
. indexmap - the index set for the reordering
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel()
@*/
PetscErrorCode MatH2OpusGetIndexMap(Mat A, IS *indexmap)
{
PetscBool ish2opus;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
PetscValidPointer(indexmap,2);
if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not for type %s",((PetscObject)A)->type_name);
*indexmap = a->h2opus_indexmap;
PetscFunctionReturn(0);
}
/*@C
MatH2OpusMapVec - Maps a vector between PETSc and H2Opus ordering
Input Parameters:
+ A - the matrix
. nativetopetsc - if true, maps from H2Opus ordering to PETSc ordering. If false, applies the reverse map
- in - the vector to be mapped
Output Parameter:
. out - the newly created mapped vector
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel()
*/
PetscErrorCode MatH2OpusMapVec(Mat A, PetscBool nativetopetsc, Vec in, Vec* out)
{
PetscBool ish2opus;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscScalar *xin,*xout;
PetscBool nm;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
PetscValidLogicalCollectiveBool(A,nativetopetsc,2);
PetscValidHeaderSpecific(in,VEC_CLASSID,3);
PetscValidPointer(out,4);
if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not for type %s",((PetscObject)A)->type_name);
nm = a->nativemult;
ierr = MatH2OpusSetNativeMult(A,(PetscBool)!nativetopetsc);CHKERRQ(ierr);
ierr = MatCreateVecs(A,out,NULL);CHKERRQ(ierr);
ierr = MatH2OpusSetNativeMult(A,nm);CHKERRQ(ierr);
if (!a->sf) { /* same ordering */
ierr = VecCopy(in,*out);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
ierr = VecGetArrayRead(in,(const PetscScalar**)&xin);CHKERRQ(ierr);
ierr = VecGetArrayWrite(*out,&xout);CHKERRQ(ierr);
if (nativetopetsc) {
ierr = PetscSFReduceBegin(a->sf,MPIU_SCALAR,xin,xout,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(a->sf,MPIU_SCALAR,xin,xout,MPI_REPLACE);CHKERRQ(ierr);
} else {
ierr = PetscSFBcastBegin(a->sf,MPIU_SCALAR,xin,xout,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(a->sf,MPIU_SCALAR,xin,xout,MPI_REPLACE);CHKERRQ(ierr);
}
ierr = VecRestoreArrayRead(in,(const PetscScalar**)&xin);CHKERRQ(ierr);
ierr = VecRestoreArrayWrite(*out,&xout);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#endif
| 8a5c51526d34b2ebdaca3c7261bf2800fec48e8d.cu | #include <h2opusconf.h>
/* skip compilation of this .cu file if H2OPUS is CPU only while PETSc has GPU support */
#if !defined(__CUDACC__) || defined(H2OPUS_USE_GPU)
#include <h2opus.h>
#if defined(H2OPUS_USE_MPI)
#include <h2opus/distributed/distributed_h2opus_handle.h>
#include <h2opus/distributed/distributed_geometric_construction.h>
#include <h2opus/distributed/distributed_hgemv.h>
#include <h2opus/distributed/distributed_horthog.h>
#include <h2opus/distributed/distributed_hcompress.h>
#endif
#include <h2opus/util/boxentrygen.h>
#include <petsc/private/matimpl.h>
#include <petsc/private/vecimpl.h>
#include <petsc/private/deviceimpl.h>
#include <petscsf.h>
/* math2opusutils */
PETSC_INTERN PetscErrorCode PetscSFGetVectorSF(PetscSF,PetscInt,PetscInt,PetscInt,PetscSF*);
PETSC_INTERN PetscErrorCode VecSign(Vec,Vec);
PETSC_INTERN PetscErrorCode VecSetDelta(Vec,PetscInt);
PETSC_INTERN PetscErrorCode MatApproximateNorm_Private(Mat,NormType,PetscInt,PetscReal*);
#define MatH2OpusGetThrustPointer(v) thrust::raw_pointer_cast((v).data())
/* Use GPU only if H2OPUS is configured for GPU */
#if defined(PETSC_HAVE_CUDA) && defined(H2OPUS_USE_GPU)
#define PETSC_H2OPUS_USE_GPU
#endif
#if defined(PETSC_H2OPUS_USE_GPU)
#define MatH2OpusUpdateIfNeeded(A,B) MatBindToCPU(A,(PetscBool)((A)->boundtocpu || (B)))
#else
#define MatH2OpusUpdateIfNeeded(A,B) 0
#endif
// TODO H2OPUS:
// DistributedHMatrix
// unsymmetric ?
// transpose for distributed_hgemv?
// clearData()
// Unify interface for sequential and parallel?
// Reuse geometric construction (almost possible, only the unsymmetric case is explicitly handled)
//
template <class T> class PetscPointCloud : public H2OpusDataSet<T>
{
private:
int dimension;
size_t num_points;
std::vector<T> pts;
public:
PetscPointCloud(int dim, size_t num_pts, const T coords[])
{
this->dimension = dim;
this->num_points = num_pts;
pts.resize(num_pts*dim);
if (coords) {
for (size_t n = 0; n < num_points; n++)
for (int i = 0; i < dim; i++)
pts[n*dim + i] = coords[n*dim + i];
} else {
PetscReal h = 1./(num_points - 1);
for (size_t n = 0; n < num_points; n++)
for (int i = 0; i < dim; i++)
pts[n*dim + i] = i*h;
}
}
PetscPointCloud(const PetscPointCloud<T>& other)
{
size_t N = other.dimension * other.num_points;
this->dimension = other.dimension;
this->num_points = other.num_points;
this->pts.resize(N);
for (size_t i = 0; i < N; i++)
this->pts[i] = other.pts[i];
}
int getDimension() const
{
return dimension;
}
size_t getDataSetSize() const
{
return num_points;
}
T getDataPoint(size_t idx, int dim) const
{
assert(dim < dimension && idx < num_points);
return pts[idx*dimension + dim];
}
void Print(std::ostream& out = std::cout)
{
out << "Dimension: " << dimension << std::endl;
out << "NumPoints: " << num_points << std::endl;
for (size_t n = 0; n < num_points; n++) {
for (int d = 0; d < dimension; d++)
out << pts[n*dimension + d] << " ";
out << std::endl;
}
}
};
template<class T> class PetscFunctionGenerator
{
private:
MatH2OpusKernel k;
int dim;
void *ctx;
public:
PetscFunctionGenerator(MatH2OpusKernel k, int dim, void* ctx) { this->k = k; this->dim = dim; this->ctx = ctx; }
PetscFunctionGenerator(PetscFunctionGenerator& other) { this->k = other.k; this->dim = other.dim; this->ctx = other.ctx; }
T operator()(PetscReal *pt1, PetscReal *pt2)
{
return (T)((*this->k)(this->dim,pt1,pt2,this->ctx));
}
};
#include <../src/mat/impls/h2opus/math2opussampler.hpp>
/* just to not clutter the code */
#if !defined(H2OPUS_USE_GPU)
typedef HMatrix HMatrix_GPU;
#if defined(H2OPUS_USE_MPI)
typedef DistributedHMatrix DistributedHMatrix_GPU;
#endif
#endif
typedef struct {
#if defined(H2OPUS_USE_MPI)
distributedH2OpusHandle_t handle;
#else
h2opusHandle_t handle;
#endif
/* Sequential and parallel matrices are two different classes at the moment */
HMatrix *hmatrix;
#if defined(H2OPUS_USE_MPI)
DistributedHMatrix *dist_hmatrix;
#else
HMatrix *dist_hmatrix; /* just to not clutter the code */
#endif
/* May use permutations */
PetscSF sf;
PetscLayout h2opus_rmap, h2opus_cmap;
IS h2opus_indexmap;
thrust::host_vector<PetscScalar> *xx,*yy;
PetscInt xxs,yys;
PetscBool multsetup;
/* GPU */
HMatrix_GPU *hmatrix_gpu;
#if defined(H2OPUS_USE_MPI)
DistributedHMatrix_GPU *dist_hmatrix_gpu;
#else
HMatrix_GPU *dist_hmatrix_gpu; /* just to not clutter the code */
#endif
#if defined(PETSC_H2OPUS_USE_GPU)
thrust::device_vector<PetscScalar> *xx_gpu,*yy_gpu;
PetscInt xxs_gpu,yys_gpu;
#endif
/* construction from matvecs */
PetscMatrixSampler* sampler;
PetscBool nativemult;
/* Admissibility */
PetscReal eta;
PetscInt leafsize;
/* for dof reordering */
PetscPointCloud<PetscReal> *ptcloud;
/* kernel for generating matrix entries */
PetscFunctionGenerator<PetscScalar> *kernel;
/* basis orthogonalized? */
PetscBool orthogonal;
/* customization */
PetscInt basisord;
PetscInt max_rank;
PetscInt bs;
PetscReal rtol;
PetscInt norm_max_samples;
PetscBool check_construction;
PetscBool hara_verbose;
/* keeps track of MatScale values */
PetscScalar s;
} Mat_H2OPUS;
static PetscErrorCode MatDestroy_H2OPUS(Mat A)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
#if defined(H2OPUS_USE_MPI)
h2opusDestroyDistributedHandle(a->handle);
#else
h2opusDestroyHandle(a->handle);
#endif
delete a->dist_hmatrix;
delete a->hmatrix;
ierr = PetscSFDestroy(&a->sf);CHKERRQ(ierr);
ierr = PetscLayoutDestroy(&a->h2opus_rmap);CHKERRQ(ierr);
ierr = PetscLayoutDestroy(&a->h2opus_cmap);CHKERRQ(ierr);
ierr = ISDestroy(&a->h2opus_indexmap);CHKERRQ(ierr);
delete a->xx;
delete a->yy;
delete a->hmatrix_gpu;
delete a->dist_hmatrix_gpu;
#if defined(PETSC_H2OPUS_USE_GPU)
delete a->xx_gpu;
delete a->yy_gpu;
#endif
delete a->sampler;
delete a->ptcloud;
delete a->kernel;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_mpidense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_mpidensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,NULL);CHKERRQ(ierr);
ierr = PetscFree(A->data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatH2OpusSetNativeMult(Mat A, PetscBool nm)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscBool ish2opus;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidLogicalCollectiveBool(A,nm,2);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (ish2opus) {
if (a->h2opus_rmap) { /* need to swap layouts for vector creation */
if ((!a->nativemult && nm) || (a->nativemult && !nm)) {
PetscLayout t;
t = A->rmap;
A->rmap = a->h2opus_rmap;
a->h2opus_rmap = t;
t = A->cmap;
A->cmap = a->h2opus_cmap;
a->h2opus_cmap = t;
}
}
a->nativemult = nm;
}
PetscFunctionReturn(0);
}
PetscErrorCode MatH2OpusGetNativeMult(Mat A, PetscBool *nm)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscBool ish2opus;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(nm,2);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not for type %s",((PetscObject)A)->type_name);
*nm = a->nativemult;
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatNorm_H2OPUS(Mat A, NormType normtype, PetscReal* n)
{
PetscErrorCode ierr;
PetscBool ish2opus;
PetscInt nmax = PETSC_DECIDE;
Mat_H2OPUS *a = NULL;
PetscBool mult = PETSC_FALSE;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (ish2opus) { /* set userdefine number of samples and fastpath for mult (norms are order independent) */
a = (Mat_H2OPUS*)A->data;
nmax = a->norm_max_samples;
mult = a->nativemult;
ierr = MatH2OpusSetNativeMult(A,PETSC_TRUE);CHKERRQ(ierr);
} else {
ierr = PetscOptionsGetInt(((PetscObject)A)->options,((PetscObject)A)->prefix,"-mat_approximate_norm_samples",&nmax,NULL);CHKERRQ(ierr);
}
ierr = MatApproximateNorm_Private(A,normtype,nmax,n);CHKERRQ(ierr);
if (a) { ierr = MatH2OpusSetNativeMult(A,mult);CHKERRQ(ierr); }
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultNKernel_H2OPUS(Mat A, PetscBool transA, Mat B, Mat C)
{
Mat_H2OPUS *h2opus = (Mat_H2OPUS*)A->data;
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = h2opus->handle->handle;
#else
h2opusHandle_t handle = h2opus->handle;
#endif
PetscBool boundtocpu = PETSC_TRUE;
PetscScalar *xx,*yy,*uxx,*uyy;
PetscInt blda,clda;
PetscMPIInt size;
PetscSF bsf,csf;
PetscBool usesf = (PetscBool)(h2opus->sf && !h2opus->nativemult);
PetscErrorCode ierr;
PetscFunctionBegin;
HLibProfile::clear();
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
#endif
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
if (usesf) {
PetscInt n;
ierr = PetscSFGetGraph(h2opus->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
ierr = PetscObjectQuery((PetscObject)B,"_math2opus_vectorsf",(PetscObject*)&bsf);CHKERRQ(ierr);
if (!bsf) {
ierr = PetscSFGetVectorSF(h2opus->sf,B->cmap->N,blda,PETSC_DECIDE,&bsf);CHKERRQ(ierr);
ierr = PetscObjectCompose((PetscObject)B,"_math2opus_vectorsf",(PetscObject)bsf);CHKERRQ(ierr);
ierr = PetscObjectDereference((PetscObject)bsf);CHKERRQ(ierr);
}
ierr = PetscObjectQuery((PetscObject)C,"_math2opus_vectorsf",(PetscObject*)&csf);CHKERRQ(ierr);
if (!csf) {
ierr = PetscSFGetVectorSF(h2opus->sf,B->cmap->N,clda,PETSC_DECIDE,&csf);CHKERRQ(ierr);
ierr = PetscObjectCompose((PetscObject)C,"_math2opus_vectorsf",(PetscObject)csf);CHKERRQ(ierr);
ierr = PetscObjectDereference((PetscObject)csf);CHKERRQ(ierr);
}
blda = n;
clda = n;
}
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (boundtocpu) {
if (usesf) {
PetscInt n;
ierr = PetscSFGetGraph(h2opus->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
if (h2opus->xxs < B->cmap->n) { h2opus->xx->resize(n*B->cmap->N); h2opus->xxs = B->cmap->N; }
if (h2opus->yys < B->cmap->n) { h2opus->yy->resize(n*B->cmap->N); h2opus->yys = B->cmap->N; }
}
ierr = MatDenseGetArrayRead(B,(const PetscScalar**)&xx);CHKERRQ(ierr);
ierr = MatDenseGetArrayWrite(C,&yy);CHKERRQ(ierr);
if (usesf) {
uxx = MatH2OpusGetThrustPointer(*h2opus->xx);
uyy = MatH2OpusGetThrustPointer(*h2opus->yy);
ierr = PetscSFBcastBegin(bsf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(bsf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
} else {
uxx = xx;
uyy = yy;
}
if (size > 1) {
if (!h2opus->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
if (transA && !A->symmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMultTranspose not yet coded in parallel");
#if defined(H2OPUS_USE_MPI)
distributed_hgemv(/*transA ? H2Opus_Trans : H2Opus_NoTrans, */h2opus->s, *h2opus->dist_hmatrix, uxx, blda, 0.0, uyy, clda, B->cmap->N, h2opus->handle);
#endif
} else {
if (!h2opus->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
hgemv(transA ? H2Opus_Trans : H2Opus_NoTrans, h2opus->s, *h2opus->hmatrix, uxx, blda, 0.0, uyy, clda, B->cmap->N, handle);
}
ierr = MatDenseRestoreArrayRead(B,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (usesf) {
ierr = PetscSFReduceBegin(csf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(csf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
}
ierr = MatDenseRestoreArrayWrite(C,&yy);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
} else {
PetscBool ciscuda,biscuda;
if (usesf) {
PetscInt n;
ierr = PetscSFGetGraph(h2opus->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
if (h2opus->xxs_gpu < B->cmap->n) { h2opus->xx_gpu->resize(n*B->cmap->N); h2opus->xxs_gpu = B->cmap->N; }
if (h2opus->yys_gpu < B->cmap->n) { h2opus->yy_gpu->resize(n*B->cmap->N); h2opus->yys_gpu = B->cmap->N; }
}
/* If not of type seqdensecuda, convert on the fly (i.e. allocate GPU memory) */
ierr = PetscObjectTypeCompareAny((PetscObject)B,&biscuda,MATSEQDENSECUDA,MATMPIDENSECUDA,"");CHKERRQ(ierr);
if (!biscuda) {
ierr = MatConvert(B,MATDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
ierr = PetscObjectTypeCompareAny((PetscObject)C,&ciscuda,MATSEQDENSECUDA,MATMPIDENSECUDA,"");CHKERRQ(ierr);
if (!ciscuda) {
C->assembled = PETSC_TRUE;
ierr = MatConvert(C,MATDENSECUDA,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
ierr = MatDenseCUDAGetArrayRead(B,(const PetscScalar**)&xx);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayWrite(C,&yy);CHKERRQ(ierr);
if (usesf) {
uxx = MatH2OpusGetThrustPointer(*h2opus->xx_gpu);
uyy = MatH2OpusGetThrustPointer(*h2opus->yy_gpu);
ierr = PetscSFBcastBegin(bsf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(bsf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
} else {
uxx = xx;
uyy = yy;
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (size > 1) {
if (!h2opus->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed GPU matrix");
if (transA && !A->symmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMultTranspose not yet coded in parallel");
#if defined(H2OPUS_USE_MPI)
distributed_hgemv(/* transA ? H2Opus_Trans : H2Opus_NoTrans, */h2opus->s, *h2opus->dist_hmatrix_gpu, uxx, blda, 0.0, uyy, clda, B->cmap->N, h2opus->handle);
#endif
} else {
if (!h2opus->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
hgemv(transA ? H2Opus_Trans : H2Opus_NoTrans, h2opus->s, *h2opus->hmatrix_gpu, uxx, blda, 0.0, uyy, clda, B->cmap->N, handle);
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (usesf) {
ierr = PetscSFReduceBegin(csf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(csf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
}
ierr = MatDenseCUDARestoreArrayWrite(C,&yy);CHKERRQ(ierr);
if (!biscuda) {
ierr = MatConvert(B,MATDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
if (!ciscuda) {
ierr = MatConvert(C,MATDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
#endif
}
{ /* log flops */
double gops,time,perf,dev;
HLibProfile::getHgemvPerf(gops,time,perf,dev);
#if defined(PETSC_H2OPUS_USE_GPU)
if (boundtocpu) {
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(1e9*gops);CHKERRQ(ierr);
}
#else
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
#endif
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductNumeric_H2OPUS(Mat C)
{
Mat_Product *product = C->product;
PetscErrorCode ierr;
PetscFunctionBegin;
MatCheckProduct(C,1);
switch (product->type) {
case MATPRODUCT_AB:
ierr = MatMultNKernel_H2OPUS(product->A,PETSC_FALSE,product->B,C);CHKERRQ(ierr);
break;
case MATPRODUCT_AtB:
ierr = MatMultNKernel_H2OPUS(product->A,PETSC_TRUE,product->B,C);CHKERRQ(ierr);
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type %s is not supported",MatProductTypes[product->type]);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_H2OPUS(Mat C)
{
PetscErrorCode ierr;
Mat_Product *product = C->product;
PetscBool cisdense;
Mat A,B;
PetscFunctionBegin;
MatCheckProduct(C,1);
A = product->A;
B = product->B;
switch (product->type) {
case MATPRODUCT_AB:
ierr = MatSetSizes(C,A->rmap->n,B->cmap->n,A->rmap->N,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetBlockSizesFromMats(C,product->A,product->B);CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATSEQDENSE,MATMPIDENSE,MATSEQDENSECUDA,MATMPIDENSECUDA,"");CHKERRQ(ierr);
if (!cisdense) { ierr = MatSetType(C,((PetscObject)product->B)->type_name);CHKERRQ(ierr); }
ierr = MatSetUp(C);CHKERRQ(ierr);
break;
case MATPRODUCT_AtB:
ierr = MatSetSizes(C,A->cmap->n,B->cmap->n,A->cmap->N,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetBlockSizesFromMats(C,product->A,product->B);CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATSEQDENSE,MATMPIDENSE,MATSEQDENSECUDA,MATMPIDENSECUDA,"");CHKERRQ(ierr);
if (!cisdense) { ierr = MatSetType(C,((PetscObject)product->B)->type_name);CHKERRQ(ierr); }
ierr = MatSetUp(C);CHKERRQ(ierr);
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type %s is not supported",MatProductTypes[product->type]);
}
C->ops->productsymbolic = NULL;
C->ops->productnumeric = MatProductNumeric_H2OPUS;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSetFromOptions_H2OPUS(Mat C)
{
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->type == MATPRODUCT_AB || C->product->type == MATPRODUCT_AtB) {
C->ops->productsymbolic = MatProductSymbolic_H2OPUS;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultKernel_H2OPUS(Mat A, Vec x, PetscScalar sy, Vec y, PetscBool trans)
{
Mat_H2OPUS *h2opus = (Mat_H2OPUS*)A->data;
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = h2opus->handle->handle;
#else
h2opusHandle_t handle = h2opus->handle;
#endif
PetscBool boundtocpu = PETSC_TRUE;
PetscInt n;
PetscScalar *xx,*yy,*uxx,*uyy;
PetscMPIInt size;
PetscBool usesf = (PetscBool)(h2opus->sf && !h2opus->nativemult);
PetscErrorCode ierr;
PetscFunctionBegin;
HLibProfile::clear();
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
#endif
if (usesf) {
ierr = PetscSFGetGraph(h2opus->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
} else n = A->rmap->n;
if (boundtocpu) {
ierr = VecGetArrayRead(x,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (sy == 0.0) {
ierr = VecGetArrayWrite(y,&yy);CHKERRQ(ierr);
} else {
ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
}
if (usesf) {
uxx = MatH2OpusGetThrustPointer(*h2opus->xx);
uyy = MatH2OpusGetThrustPointer(*h2opus->yy);
ierr = PetscSFBcastBegin(h2opus->sf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(h2opus->sf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
if (sy != 0.0) {
ierr = PetscSFBcastBegin(h2opus->sf,MPIU_SCALAR,yy,uyy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(h2opus->sf,MPIU_SCALAR,yy,uyy,MPI_REPLACE);CHKERRQ(ierr);
}
} else {
uxx = xx;
uyy = yy;
}
if (size > 1) {
if (!h2opus->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
if (trans && !A->symmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMultTranspose not yet coded in parallel");
#if defined(H2OPUS_USE_MPI)
distributed_hgemv(/*trans ? H2Opus_Trans : H2Opus_NoTrans, */h2opus->s, *h2opus->dist_hmatrix, uxx, n, sy, uyy, n, 1, h2opus->handle);
#endif
} else {
if (!h2opus->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
hgemv(trans ? H2Opus_Trans : H2Opus_NoTrans, h2opus->s, *h2opus->hmatrix, uxx, n, sy, uyy, n, 1, handle);
}
ierr = VecRestoreArrayRead(x,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (usesf) {
ierr = PetscSFReduceBegin(h2opus->sf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(h2opus->sf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
}
if (sy == 0.0) {
ierr = VecRestoreArrayWrite(y,&yy);CHKERRQ(ierr);
} else {
ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
}
#if defined(PETSC_H2OPUS_USE_GPU)
} else {
ierr = VecCUDAGetArrayRead(x,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (sy == 0.0) {
ierr = VecCUDAGetArrayWrite(y,&yy);CHKERRQ(ierr);
} else {
ierr = VecCUDAGetArray(y,&yy);CHKERRQ(ierr);
}
if (usesf) {
uxx = MatH2OpusGetThrustPointer(*h2opus->xx_gpu);
uyy = MatH2OpusGetThrustPointer(*h2opus->yy_gpu);
ierr = PetscSFBcastBegin(h2opus->sf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(h2opus->sf,MPIU_SCALAR,xx,uxx,MPI_REPLACE);CHKERRQ(ierr);
if (sy != 0.0) {
ierr = PetscSFBcastBegin(h2opus->sf,MPIU_SCALAR,yy,uyy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(h2opus->sf,MPIU_SCALAR,yy,uyy,MPI_REPLACE);CHKERRQ(ierr);
}
} else {
uxx = xx;
uyy = yy;
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (size > 1) {
if (!h2opus->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed GPU matrix");
if (trans && !A->symmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMultTranspose not yet coded in parallel");
#if defined(H2OPUS_USE_MPI)
distributed_hgemv(/*trans ? H2Opus_Trans : H2Opus_NoTrans, */h2opus->s, *h2opus->dist_hmatrix_gpu, uxx, n, sy, uyy, n, 1, h2opus->handle);
#endif
} else {
if (!h2opus->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
hgemv(trans ? H2Opus_Trans : H2Opus_NoTrans, h2opus->s, *h2opus->hmatrix_gpu, uxx, n, sy, uyy, n, 1, handle);
}
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(x,(const PetscScalar**)&xx);CHKERRQ(ierr);
if (usesf) {
ierr = PetscSFReduceBegin(h2opus->sf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(h2opus->sf,MPIU_SCALAR,uyy,yy,MPI_REPLACE);CHKERRQ(ierr);
}
if (sy == 0.0) {
ierr = VecCUDARestoreArrayWrite(y,&yy);CHKERRQ(ierr);
} else {
ierr = VecCUDARestoreArray(y,&yy);CHKERRQ(ierr);
}
#endif
}
{ /* log flops */
double gops,time,perf,dev;
HLibProfile::getHgemvPerf(gops,time,perf,dev);
#if defined(PETSC_H2OPUS_USE_GPU)
if (boundtocpu) {
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(1e9*gops);CHKERRQ(ierr);
}
#else
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
#endif
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTranspose_H2OPUS(Mat A, Vec x, Vec y)
{
PetscBool xiscuda,yiscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompareAny((PetscObject)x,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)y,&yiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = MatH2OpusUpdateIfNeeded(A,!xiscuda || !yiscuda);CHKERRQ(ierr);
ierr = MatMultKernel_H2OPUS(A,x,0.0,y,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMult_H2OPUS(Mat A, Vec x, Vec y)
{
PetscBool xiscuda,yiscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompareAny((PetscObject)x,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)y,&yiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = MatH2OpusUpdateIfNeeded(A,!xiscuda || !yiscuda);CHKERRQ(ierr);
ierr = MatMultKernel_H2OPUS(A,x,0.0,y,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTransposeAdd_H2OPUS(Mat A, Vec x, Vec y, Vec z)
{
PetscBool xiscuda,ziscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCopy(y,z);CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)x,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)z,&ziscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = MatH2OpusUpdateIfNeeded(A,!xiscuda || !ziscuda);CHKERRQ(ierr);
ierr = MatMultKernel_H2OPUS(A,x,1.0,z,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultAdd_H2OPUS(Mat A, Vec x, Vec y, Vec z)
{
PetscBool xiscuda,ziscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCopy(y,z);CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)x,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = PetscObjectTypeCompareAny((PetscObject)z,&ziscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
ierr = MatH2OpusUpdateIfNeeded(A,!xiscuda || !ziscuda);CHKERRQ(ierr);
ierr = MatMultKernel_H2OPUS(A,x,1.0,z,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatScale_H2OPUS(Mat A, PetscScalar s)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscFunctionBegin;
a->s *= s;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetFromOptions_H2OPUS(PetscOptionItems *PetscOptionsObject,Mat A)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"H2OPUS options");CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_leafsize","Leaf size of cluster tree",NULL,a->leafsize,&a->leafsize,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal("-mat_h2opus_eta","Admissibility condition tolerance",NULL,a->eta,&a->eta,NULL);CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_order","Basis order for off-diagonal sampling when constructed from kernel",NULL,a->basisord,&a->basisord,NULL);CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_maxrank","Maximum rank when constructed from matvecs",NULL,a->max_rank,&a->max_rank,NULL);CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_samples","Maximum number of samples to be taken concurrently when constructing from matvecs",NULL,a->bs,&a->bs,NULL);CHKERRQ(ierr);
ierr = PetscOptionsInt("-mat_h2opus_normsamples","Maximum bumber of samples to be when estimating norms",NULL,a->norm_max_samples,&a->norm_max_samples,NULL);CHKERRQ(ierr);
ierr = PetscOptionsReal("-mat_h2opus_rtol","Relative tolerance for construction from sampling",NULL,a->rtol,&a->rtol,NULL);CHKERRQ(ierr);
ierr = PetscOptionsBool("-mat_h2opus_check","Check error when constructing from sampling during MatAssemblyEnd()",NULL,a->check_construction,&a->check_construction,NULL);CHKERRQ(ierr);
ierr = PetscOptionsBool("-mat_h2opus_hara_verbose","Verbose output from hara construction",NULL,a->hara_verbose,&a->hara_verbose,NULL);CHKERRQ(ierr);
ierr = PetscOptionsTail();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatH2OpusSetCoords_H2OPUS(Mat,PetscInt,const PetscReal[],PetscBool,MatH2OpusKernel,void*);
static PetscErrorCode MatH2OpusInferCoordinates_Private(Mat A)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
Vec c;
PetscInt spacedim;
const PetscScalar *coords;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->ptcloud) PetscFunctionReturn(0);
ierr = PetscObjectQuery((PetscObject)A,"__math2opus_coords",(PetscObject*)&c);CHKERRQ(ierr);
if (!c && a->sampler) {
Mat S = a->sampler->GetSamplingMat();
ierr = PetscObjectQuery((PetscObject)S,"__math2opus_coords",(PetscObject*)&c);CHKERRQ(ierr);
}
if (!c) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Missing coordinates");
ierr = VecGetArrayRead(c,&coords);CHKERRQ(ierr);
ierr = VecGetBlockSize(c,&spacedim);CHKERRQ(ierr);
ierr = MatH2OpusSetCoords_H2OPUS(A,spacedim,coords,PETSC_FALSE,NULL,NULL);CHKERRQ(ierr);
ierr = VecRestoreArrayRead(c,&coords);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetUpMultiply_H2OPUS(Mat A)
{
MPI_Comm comm;
PetscMPIInt size;
PetscErrorCode ierr;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscInt n = 0,*idx = NULL;
int *iidx = NULL;
PetscCopyMode own;
PetscBool rid;
PetscFunctionBegin;
if (a->multsetup) PetscFunctionReturn(0);
if (a->sf) { /* MatDuplicate_H2OPUS takes reference to the SF */
ierr = PetscSFGetGraph(a->sf,NULL,&n,NULL,NULL);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
a->xx_gpu = new thrust::device_vector<PetscScalar>(n);
a->yy_gpu = new thrust::device_vector<PetscScalar>(n);
a->xxs_gpu = 1;
a->yys_gpu = 1;
#endif
a->xx = new thrust::host_vector<PetscScalar>(n);
a->yy = new thrust::host_vector<PetscScalar>(n);
a->xxs = 1;
a->yys = 1;
} else {
IS is;
ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
if (!a->h2opus_indexmap) {
if (size > 1) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
#if defined(H2OPUS_USE_MPI)
iidx = MatH2OpusGetThrustPointer(a->dist_hmatrix->basis_tree.basis_branch.index_map);
n = a->dist_hmatrix->basis_tree.basis_branch.index_map.size();
#endif
} else {
iidx = MatH2OpusGetThrustPointer(a->hmatrix->u_basis_tree.index_map);
n = a->hmatrix->u_basis_tree.index_map.size();
}
if (PetscDefined(USE_64BIT_INDICES)) {
PetscInt i;
own = PETSC_OWN_POINTER;
ierr = PetscMalloc1(n,&idx);CHKERRQ(ierr);
for (i=0;i<n;i++) idx[i] = iidx[i];
} else {
own = PETSC_COPY_VALUES;
idx = (PetscInt*)iidx;
}
ierr = ISCreateGeneral(comm,n,idx,own,&is);CHKERRQ(ierr);
ierr = ISSetPermutation(is);CHKERRQ(ierr);
ierr = ISViewFromOptions(is,(PetscObject)A,"-mat_h2opus_indexmap_view");CHKERRQ(ierr);
a->h2opus_indexmap = is;
}
ierr = ISGetLocalSize(a->h2opus_indexmap,&n);CHKERRQ(ierr);
ierr = ISGetIndices(a->h2opus_indexmap,(const PetscInt **)&idx);CHKERRQ(ierr);
rid = (PetscBool)(n == A->rmap->n);
ierr = MPIU_Allreduce(MPI_IN_PLACE,&rid,1,MPIU_BOOL,MPI_LAND,comm);CHKERRMPI(ierr);
if (rid) {
ierr = ISIdentity(a->h2opus_indexmap,&rid);CHKERRQ(ierr);
}
if (!rid) {
if (size > 1) { /* Parallel distribution may be different, save it here for fast path in MatMult (see MatH2OpusSetNativeMult) */
ierr = PetscLayoutCreate(comm,&a->h2opus_rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetLocalSize(a->h2opus_rmap,n);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(a->h2opus_rmap);CHKERRQ(ierr);
ierr = PetscLayoutReference(a->h2opus_rmap,&a->h2opus_cmap);CHKERRQ(ierr);
}
ierr = PetscSFCreate(comm,&a->sf);CHKERRQ(ierr);
ierr = PetscSFSetGraphLayout(a->sf,A->rmap,n,NULL,PETSC_OWN_POINTER,idx);CHKERRQ(ierr);
ierr = PetscSFViewFromOptions(a->sf,(PetscObject)A,"-mat_h2opus_sf_view");CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
a->xx_gpu = new thrust::device_vector<PetscScalar>(n);
a->yy_gpu = new thrust::device_vector<PetscScalar>(n);
a->xxs_gpu = 1;
a->yys_gpu = 1;
#endif
a->xx = new thrust::host_vector<PetscScalar>(n);
a->yy = new thrust::host_vector<PetscScalar>(n);
a->xxs = 1;
a->yys = 1;
}
ierr = ISRestoreIndices(a->h2opus_indexmap,(const PetscInt **)&idx);CHKERRQ(ierr);
}
a->multsetup = PETSC_TRUE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatAssemblyEnd_H2OPUS(Mat A, MatAssemblyType assemblytype)
{
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = a->handle->handle;
#else
h2opusHandle_t handle = a->handle;
#endif
PetscBool kernel = PETSC_FALSE;
PetscBool boundtocpu = PETSC_TRUE;
PetscBool samplingdone = PETSC_FALSE;
MPI_Comm comm;
PetscMPIInt size;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Different row and column local sizes are not supported");
if (A->rmap->N != A->cmap->N) SETERRQ(comm,PETSC_ERR_SUP,"Rectangular matrices are not supported");
ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
/* TODO REUSABILITY of geometric construction */
delete a->hmatrix;
delete a->dist_hmatrix;
#if defined(PETSC_H2OPUS_USE_GPU)
delete a->hmatrix_gpu;
delete a->dist_hmatrix_gpu;
#endif
a->orthogonal = PETSC_FALSE;
/* TODO: other? */
H2OpusBoxCenterAdmissibility adm(a->eta);
ierr = PetscLogEventBegin(MAT_H2Opus_Build,A,0,0,0);CHKERRQ(ierr);
if (size > 1) {
#if defined(H2OPUS_USE_MPI)
a->dist_hmatrix = new DistributedHMatrix(A->rmap->n/*,A->symmetric*/);
#else
a->dist_hmatrix = NULL;
#endif
} else {
a->hmatrix = new HMatrix(A->rmap->n,A->symmetric);
}
ierr = MatH2OpusInferCoordinates_Private(A);CHKERRQ(ierr);
if (!a->ptcloud) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing pointcloud");
if (a->kernel) {
BoxEntryGen<PetscScalar, H2OPUS_HWTYPE_CPU, PetscFunctionGenerator<PetscScalar>> entry_gen(*a->kernel);
if (size > 1) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
#if defined(H2OPUS_USE_MPI)
buildDistributedHMatrix(*a->dist_hmatrix,a->ptcloud,adm,entry_gen,a->leafsize,a->basisord,a->handle);
#endif
} else {
buildHMatrix(*a->hmatrix,a->ptcloud,adm,entry_gen,a->leafsize,a->basisord);
}
kernel = PETSC_TRUE;
} else {
if (size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Construction from sampling not supported in parallel");
buildHMatrixStructure(*a->hmatrix,a->ptcloud,a->leafsize,adm);
}
ierr = MatSetUpMultiply_H2OPUS(A);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
if (!boundtocpu) {
if (size > 1) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing distributed CPU matrix");
#if defined(H2OPUS_USE_MPI)
a->dist_hmatrix_gpu = new DistributedHMatrix_GPU(*a->dist_hmatrix);
#endif
} else {
a->hmatrix_gpu = new HMatrix_GPU(*a->hmatrix);
}
}
#endif
if (size == 1) {
if (!kernel && a->sampler && a->sampler->GetSamplingMat()) {
PetscReal Anorm;
bool verbose;
ierr = PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-mat_h2opus_hara_verbose",&a->hara_verbose,NULL);CHKERRQ(ierr);
verbose = a->hara_verbose;
ierr = MatApproximateNorm_Private(a->sampler->GetSamplingMat(),NORM_2,a->norm_max_samples,&Anorm);CHKERRQ(ierr);
if (a->hara_verbose) { ierr = PetscPrintf(PETSC_COMM_SELF,"Sampling uses max rank %d, tol %g (%g*%g), %s samples %d\n",a->max_rank,a->rtol*Anorm,a->rtol,Anorm,boundtocpu ? "CPU" : "GPU",a->bs);CHKERRQ(ierr); }
if (a->sf && !a->nativemult) {
a->sampler->SetIndexMap(a->hmatrix->u_basis_tree.index_map.size(),a->hmatrix->u_basis_tree.index_map.data());
}
a->sampler->SetStream(handle->getMainStream());
if (boundtocpu) {
a->sampler->SetGPUSampling(false);
hara(a->sampler, *a->hmatrix, a->max_rank, 10 /* TODO */,a->rtol*Anorm,a->bs,handle,verbose);
#if defined(PETSC_H2OPUS_USE_GPU)
} else {
a->sampler->SetGPUSampling(true);
hara(a->sampler, *a->hmatrix_gpu, a->max_rank, 10 /* TODO */,a->rtol*Anorm,a->bs,handle,verbose);
#endif
}
samplingdone = PETSC_TRUE;
}
}
#if defined(PETSC_H2OPUS_USE_GPU)
if (!boundtocpu) {
delete a->hmatrix;
delete a->dist_hmatrix;
a->hmatrix = NULL;
a->dist_hmatrix = NULL;
}
A->offloadmask = boundtocpu ? PETSC_OFFLOAD_CPU : PETSC_OFFLOAD_GPU;
#endif
ierr = PetscLogEventEnd(MAT_H2Opus_Build,A,0,0,0);CHKERRQ(ierr);
if (!a->s) a->s = 1.0;
A->assembled = PETSC_TRUE;
if (samplingdone) {
PetscBool check = a->check_construction;
PetscBool checke = PETSC_FALSE;
ierr = PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-mat_h2opus_check",&check,NULL);CHKERRQ(ierr);
ierr = PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-mat_h2opus_check_explicit",&checke,NULL);CHKERRQ(ierr);
if (check) {
Mat E,Ae;
PetscReal n1,ni,n2;
PetscReal n1A,niA,n2A;
void (*normfunc)(void);
Ae = a->sampler->GetSamplingMat();
ierr = MatConvert(A,MATSHELL,MAT_INITIAL_MATRIX,&E);CHKERRQ(ierr);
ierr = MatShellSetOperation(E,MATOP_NORM,(void (*)(void))MatNorm_H2OPUS);CHKERRQ(ierr);
ierr = MatAXPY(E,-1.0,Ae,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
ierr = MatNorm(E,NORM_1,&n1);CHKERRQ(ierr);
ierr = MatNorm(E,NORM_INFINITY,&ni);CHKERRQ(ierr);
ierr = MatNorm(E,NORM_2,&n2);CHKERRQ(ierr);
if (checke) {
Mat eA,eE,eAe;
ierr = MatComputeOperator(A,MATAIJ,&eA);CHKERRQ(ierr);
ierr = MatComputeOperator(E,MATAIJ,&eE);CHKERRQ(ierr);
ierr = MatComputeOperator(Ae,MATAIJ,&eAe);CHKERRQ(ierr);
ierr = MatChop(eA,PETSC_SMALL);CHKERRQ(ierr);
ierr = MatChop(eE,PETSC_SMALL);CHKERRQ(ierr);
ierr = MatChop(eAe,PETSC_SMALL);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject)eA,"H2Mat");CHKERRQ(ierr);
ierr = MatView(eA,NULL);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject)eAe,"S");CHKERRQ(ierr);
ierr = MatView(eAe,NULL);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject)eE,"H2Mat - S");CHKERRQ(ierr);
ierr = MatView(eE,NULL);CHKERRQ(ierr);
ierr = MatDestroy(&eA);CHKERRQ(ierr);
ierr = MatDestroy(&eE);CHKERRQ(ierr);
ierr = MatDestroy(&eAe);CHKERRQ(ierr);
}
ierr = MatGetOperation(Ae,MATOP_NORM,&normfunc);CHKERRQ(ierr);
ierr = MatSetOperation(Ae,MATOP_NORM,(void (*)(void))MatNorm_H2OPUS);CHKERRQ(ierr);
ierr = MatNorm(Ae,NORM_1,&n1A);CHKERRQ(ierr);
ierr = MatNorm(Ae,NORM_INFINITY,&niA);CHKERRQ(ierr);
ierr = MatNorm(Ae,NORM_2,&n2A);CHKERRQ(ierr);
n1A = PetscMax(n1A,PETSC_SMALL);
n2A = PetscMax(n2A,PETSC_SMALL);
niA = PetscMax(niA,PETSC_SMALL);
ierr = MatSetOperation(Ae,MATOP_NORM,normfunc);CHKERRQ(ierr);
ierr = PetscPrintf(PetscObjectComm((PetscObject)A),"MATH2OPUS construction errors: NORM_1 %g, NORM_INFINITY %g, NORM_2 %g (%g %g %g)\n",(double)n1,(double)ni,(double)n2,(double)(n1/n1A),(double)(ni/niA),(double)(n2/n2A));
ierr = MatDestroy(&E);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatZeroEntries_H2OPUS(Mat A)
{
PetscErrorCode ierr;
PetscMPIInt size;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (size > 1) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not yet supported");
else {
a->hmatrix->clearData();
#if defined(PETSC_H2OPUS_USE_GPU)
if (a->hmatrix_gpu) a->hmatrix_gpu->clearData();
#endif
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatDuplicate_H2OPUS(Mat B, MatDuplicateOption op, Mat *nA)
{
Mat A;
Mat_H2OPUS *a, *b = (Mat_H2OPUS*)B->data;
#if defined(PETSC_H2OPUS_USE_GPU)
PetscBool iscpu = PETSC_FALSE;
#else
PetscBool iscpu = PETSC_TRUE;
#endif
PetscErrorCode ierr;
MPI_Comm comm;
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)B,&comm);CHKERRQ(ierr);
ierr = MatCreate(comm,&A);CHKERRQ(ierr);
ierr = MatSetSizes(A,B->rmap->n,B->cmap->n,B->rmap->N,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetType(A,MATH2OPUS);CHKERRQ(ierr);
ierr = MatPropagateSymmetryOptions(B,A);CHKERRQ(ierr);
a = (Mat_H2OPUS*)A->data;
a->eta = b->eta;
a->leafsize = b->leafsize;
a->basisord = b->basisord;
a->max_rank = b->max_rank;
a->bs = b->bs;
a->rtol = b->rtol;
a->norm_max_samples = b->norm_max_samples;
if (op == MAT_COPY_VALUES) a->s = b->s;
a->ptcloud = new PetscPointCloud<PetscReal>(*b->ptcloud);
if (op == MAT_COPY_VALUES && b->kernel) a->kernel = new PetscFunctionGenerator<PetscScalar>(*b->kernel);
#if defined(H2OPUS_USE_MPI)
if (b->dist_hmatrix) { a->dist_hmatrix = new DistributedHMatrix(*b->dist_hmatrix); }
#if defined(PETSC_H2OPUS_USE_GPU)
if (b->dist_hmatrix_gpu) { a->dist_hmatrix_gpu = new DistributedHMatrix_GPU(*b->dist_hmatrix_gpu); }
#endif
#endif
if (b->hmatrix) {
a->hmatrix = new HMatrix(*b->hmatrix);
if (op == MAT_DO_NOT_COPY_VALUES) a->hmatrix->clearData();
}
#if defined(PETSC_H2OPUS_USE_GPU)
if (b->hmatrix_gpu) {
a->hmatrix_gpu = new HMatrix_GPU(*b->hmatrix_gpu);
if (op == MAT_DO_NOT_COPY_VALUES) a->hmatrix_gpu->clearData();
}
#endif
if (b->sf) {
ierr = PetscObjectReference((PetscObject)b->sf);CHKERRQ(ierr);
a->sf = b->sf;
}
if (b->h2opus_indexmap) {
ierr = PetscObjectReference((PetscObject)b->h2opus_indexmap);CHKERRQ(ierr);
a->h2opus_indexmap = b->h2opus_indexmap;
}
ierr = MatSetUp(A);CHKERRQ(ierr);
ierr = MatSetUpMultiply_H2OPUS(A);CHKERRQ(ierr);
if (op == MAT_COPY_VALUES) {
A->assembled = PETSC_TRUE;
a->orthogonal = b->orthogonal;
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = B->offloadmask;
#endif
}
#if defined(PETSC_H2OPUS_USE_GPU)
iscpu = B->boundtocpu;
#endif
ierr = MatBindToCPU(A,iscpu);CHKERRQ(ierr);
*nA = A;
PetscFunctionReturn(0);
}
static PetscErrorCode MatView_H2OPUS(Mat A, PetscViewer view)
{
Mat_H2OPUS *h2opus = (Mat_H2OPUS*)A->data;
PetscBool isascii;
PetscErrorCode ierr;
PetscMPIInt size;
PetscViewerFormat format;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)view,PETSCVIEWERASCII,&isascii);CHKERRQ(ierr);
ierr = PetscViewerGetFormat(view,&format);CHKERRQ(ierr);
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (isascii) {
if (format == PETSC_VIEWER_ASCII_MATLAB) {
if (size == 1) {
FILE *fp;
ierr = PetscViewerASCIIGetPointer(view,&fp);CHKERRQ(ierr);
dumpHMatrix(*h2opus->hmatrix,6,fp);
}
} else {
ierr = PetscViewerASCIIPrintf(view," H-Matrix constructed from %s\n",h2opus->kernel ? "Kernel" : "Mat");CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(view," PointCloud dim %D\n",h2opus->ptcloud ? h2opus->ptcloud->getDimension() : 0);CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(view," Admissibility parameters: leaf size %D, eta %g\n",h2opus->leafsize,(double)h2opus->eta);CHKERRQ(ierr);
if (!h2opus->kernel) {
ierr = PetscViewerASCIIPrintf(view," Sampling parameters: max_rank %D, samples %D, tolerance %g\n",h2opus->max_rank,h2opus->bs,(double)h2opus->rtol);CHKERRQ(ierr);
} else {
ierr = PetscViewerASCIIPrintf(view," Offdiagonal blocks approximation order %D\n",h2opus->basisord);CHKERRQ(ierr);
}
ierr = PetscViewerASCIIPrintf(view," Number of samples for norms %D\n",h2opus->norm_max_samples);CHKERRQ(ierr);
if (size == 1) {
double dense_mem_cpu = h2opus->hmatrix ? h2opus->hmatrix->getDenseMemoryUsage() : 0;
double low_rank_cpu = h2opus->hmatrix ? h2opus->hmatrix->getLowRankMemoryUsage() : 0;
#if defined(PETSC_HAVE_CUDA)
double dense_mem_gpu = h2opus->hmatrix_gpu ? h2opus->hmatrix_gpu->getDenseMemoryUsage() : 0;
double low_rank_gpu = h2opus->hmatrix_gpu ? h2opus->hmatrix_gpu->getLowRankMemoryUsage() : 0;
#endif
ierr = PetscViewerASCIIPrintf(view," Memory consumption GB (CPU): %g (dense) %g (low rank) %g (total)\n", dense_mem_cpu, low_rank_cpu, low_rank_cpu + dense_mem_cpu);CHKERRQ(ierr);
#if defined(PETSC_HAVE_CUDA)
ierr = PetscViewerASCIIPrintf(view," Memory consumption GB (GPU): %g (dense) %g (low rank) %g (total)\n", dense_mem_gpu, low_rank_gpu, low_rank_gpu + dense_mem_gpu);CHKERRQ(ierr);
#endif
} else {
#if defined(PETSC_HAVE_CUDA)
double matrix_mem[4] = {0.,0.,0.,0.};
PetscMPIInt rsize = 4;
#else
double matrix_mem[2] = {0.,0.};
PetscMPIInt rsize = 2;
#endif
#if defined(H2OPUS_USE_MPI)
matrix_mem[0] = h2opus->dist_hmatrix ? h2opus->dist_hmatrix->getLocalDenseMemoryUsage() : 0;
matrix_mem[1] = h2opus->dist_hmatrix ? h2opus->dist_hmatrix->getLocalLowRankMemoryUsage() : 0;
#if defined(PETSC_HAVE_CUDA)
matrix_mem[2] = h2opus->dist_hmatrix_gpu ? h2opus->dist_hmatrix_gpu->getLocalDenseMemoryUsage() : 0;
matrix_mem[3] = h2opus->dist_hmatrix_gpu ? h2opus->dist_hmatrix_gpu->getLocalLowRankMemoryUsage() : 0;
#endif
#endif
ierr = MPIU_Allreduce(MPI_IN_PLACE,matrix_mem,rsize,MPI_DOUBLE_PRECISION,MPI_SUM,PetscObjectComm((PetscObject)A));CHKERRMPI(ierr);
ierr = PetscViewerASCIIPrintf(view," Memory consumption GB (CPU): %g (dense) %g (low rank) %g (total)\n", matrix_mem[0], matrix_mem[1], matrix_mem[0] + matrix_mem[1]);CHKERRQ(ierr);
#if defined(PETSC_HAVE_CUDA)
ierr = PetscViewerASCIIPrintf(view," Memory consumption GB (GPU): %g (dense) %g (low rank) %g (total)\n", matrix_mem[2], matrix_mem[3], matrix_mem[2] + matrix_mem[3]);CHKERRQ(ierr);
#endif
}
}
}
#if 0
if (size == 1) {
char filename[256];
const char *name;
ierr = PetscObjectGetName((PetscObject)A,&name);CHKERRQ(ierr);
ierr = PetscSNPrintf(filename,sizeof(filename),"%s_structure.eps",name);CHKERRQ(ierr);
outputEps(*h2opus->hmatrix,filename);
}
#endif
PetscFunctionReturn(0);
}
static PetscErrorCode MatH2OpusSetCoords_H2OPUS(Mat A, PetscInt spacedim, const PetscReal coords[], PetscBool cdist, MatH2OpusKernel kernel, void *kernelctx)
{
Mat_H2OPUS *h2opus = (Mat_H2OPUS*)A->data;
PetscReal *gcoords;
PetscInt N;
MPI_Comm comm;
PetscMPIInt size;
PetscBool cong;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
ierr = MatHasCongruentLayouts(A,&cong);CHKERRQ(ierr);
if (!cong) SETERRQ(comm,PETSC_ERR_SUP,"Only for square matrices with congruent layouts");
N = A->rmap->N;
ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
if (size > 1 && cdist) {
PetscSF sf;
MPI_Datatype dtype;
ierr = MPI_Type_contiguous(spacedim,MPIU_REAL,&dtype);CHKERRMPI(ierr);
ierr = MPI_Type_commit(&dtype);CHKERRMPI(ierr);
ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr);
ierr = PetscSFSetGraphWithPattern(sf,A->rmap,PETSCSF_PATTERN_ALLGATHER);CHKERRQ(ierr);
ierr = PetscMalloc1(spacedim*N,&gcoords);CHKERRQ(ierr);
ierr = PetscSFBcastBegin(sf,dtype,coords,gcoords,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(sf,dtype,coords,gcoords,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
ierr = MPI_Type_free(&dtype);CHKERRMPI(ierr);
} else gcoords = (PetscReal*)coords;
delete h2opus->ptcloud;
delete h2opus->kernel;
h2opus->ptcloud = new PetscPointCloud<PetscReal>(spacedim,N,gcoords);
if (kernel) h2opus->kernel = new PetscFunctionGenerator<PetscScalar>(kernel,spacedim,kernelctx);
if (gcoords != coords) { ierr = PetscFree(gcoords);CHKERRQ(ierr); }
A->preallocated = PETSC_TRUE;
PetscFunctionReturn(0);
}
#if defined(PETSC_H2OPUS_USE_GPU)
static PetscErrorCode MatBindToCPU_H2OPUS(Mat A, PetscBool flg)
{
PetscMPIInt size;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (flg && A->offloadmask == PETSC_OFFLOAD_GPU) {
if (size > 1) {
if (!a->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
#if defined(H2OPUS_USE_MPI)
if (!a->dist_hmatrix) a->dist_hmatrix = new DistributedHMatrix(*a->dist_hmatrix_gpu);
else *a->dist_hmatrix = *a->dist_hmatrix_gpu;
#endif
} else {
if (!a->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
if (!a->hmatrix) a->hmatrix = new HMatrix(*a->hmatrix_gpu);
else *a->hmatrix = *a->hmatrix_gpu;
}
delete a->hmatrix_gpu;
delete a->dist_hmatrix_gpu;
a->hmatrix_gpu = NULL;
a->dist_hmatrix_gpu = NULL;
A->offloadmask = PETSC_OFFLOAD_CPU;
} else if (!flg && A->offloadmask == PETSC_OFFLOAD_CPU) {
if (size > 1) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
#if defined(H2OPUS_USE_MPI)
if (!a->dist_hmatrix_gpu) a->dist_hmatrix_gpu = new DistributedHMatrix_GPU(*a->dist_hmatrix);
else *a->dist_hmatrix_gpu = *a->dist_hmatrix;
#endif
} else {
if (!a->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
if (!a->hmatrix_gpu) a->hmatrix_gpu = new HMatrix_GPU(*a->hmatrix);
else *a->hmatrix_gpu = *a->hmatrix;
}
delete a->hmatrix;
delete a->dist_hmatrix;
a->hmatrix = NULL;
a->dist_hmatrix = NULL;
A->offloadmask = PETSC_OFFLOAD_GPU;
}
ierr = PetscFree(A->defaultvectype);CHKERRQ(ierr);
if (!flg) {
ierr = PetscStrallocpy(VECCUDA,&A->defaultvectype);CHKERRQ(ierr);
} else {
ierr = PetscStrallocpy(VECSTANDARD,&A->defaultvectype);CHKERRQ(ierr);
}
A->boundtocpu = flg;
PetscFunctionReturn(0);
}
#endif
/*MC
MATH2OPUS = "h2opus" - A matrix type for hierarchical matrices using the H2Opus package.
Options Database Keys:
. -mat_type h2opus - matrix type to "h2opus" during a call to MatSetFromOptions()
Notes:
H2Opus implements hierarchical matrices in the H^2 flavour.
It supports CPU or NVIDIA GPUs.
For CPU only builds, use ./configure --download-h2opus --download-thrust to install PETSc to use H2Opus.
In order to run on NVIDIA GPUs, use ./configure --download-h2opus --download-magma --download-kblas.
For details and additional references, see
"H2Opus: A distributed-memory multi-GPU software package for non-local operators",
available at https://arxiv.org/abs/2109.05451.
Level: beginner
.seealso: MATHTOOL, MATDENSE, MatCreateH2OpusFromKernel(), MatCreateH2OpusFromMat()
M*/
PETSC_EXTERN PetscErrorCode MatCreate_H2OPUS(Mat A)
{
Mat_H2OPUS *a;
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
#if defined(PETSC_H2OPUS_USE_GPU)
ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr);
#endif
ierr = PetscNewLog(A,&a);CHKERRQ(ierr);
A->data = (void*)a;
a->eta = 0.9;
a->leafsize = 32;
a->basisord = 4;
a->max_rank = 64;
a->bs = 32;
a->rtol = 1.e-4;
a->s = 1.0;
a->norm_max_samples = 10;
#if defined(H2OPUS_USE_MPI)
h2opusCreateDistributedHandleComm(&a->handle,PetscObjectComm((PetscObject)A));
#else
h2opusCreateHandle(&a->handle);
#endif
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,MATH2OPUS);CHKERRQ(ierr);
ierr = PetscMemzero(A->ops,sizeof(struct _MatOps));CHKERRQ(ierr);
A->ops->destroy = MatDestroy_H2OPUS;
A->ops->view = MatView_H2OPUS;
A->ops->assemblyend = MatAssemblyEnd_H2OPUS;
A->ops->mult = MatMult_H2OPUS;
A->ops->multtranspose = MatMultTranspose_H2OPUS;
A->ops->multadd = MatMultAdd_H2OPUS;
A->ops->multtransposeadd = MatMultTransposeAdd_H2OPUS;
A->ops->scale = MatScale_H2OPUS;
A->ops->duplicate = MatDuplicate_H2OPUS;
A->ops->setfromoptions = MatSetFromOptions_H2OPUS;
A->ops->norm = MatNorm_H2OPUS;
A->ops->zeroentries = MatZeroEntries_H2OPUS;
#if defined(PETSC_H2OPUS_USE_GPU)
A->ops->bindtocpu = MatBindToCPU_H2OPUS;
#endif
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_seqdense_C",MatProductSetFromOptions_H2OPUS);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_seqdensecuda_C",MatProductSetFromOptions_H2OPUS);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_mpidense_C",MatProductSetFromOptions_H2OPUS);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_h2opus_mpidensecuda_C",MatProductSetFromOptions_H2OPUS);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
ierr = PetscFree(A->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&A->defaultvectype);CHKERRQ(ierr);
#endif
PetscFunctionReturn(0);
}
/*@C
MatH2OpusOrthogonalize - Orthogonalize the basis tree of a hierarchical matrix.
Input Parameter:
. A - the matrix
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel(), MatH2OpusCompress()
*/
PetscErrorCode MatH2OpusOrthogonalize(Mat A)
{
PetscErrorCode ierr;
PetscBool ish2opus;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscMPIInt size;
PetscBool boundtocpu = PETSC_TRUE;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) PetscFunctionReturn(0);
if (a->orthogonal) PetscFunctionReturn(0);
HLibProfile::clear();
ierr = PetscLogEventBegin(MAT_H2Opus_Orthog,A,0,0,0);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
#endif
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (size > 1) {
if (boundtocpu) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
#if defined(H2OPUS_USE_MPI)
distributed_horthog(*a->dist_hmatrix, a->handle);
#endif
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = PETSC_OFFLOAD_CPU;
} else {
if (!a->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if defined(H2OPUS_USE_MPI)
distributed_horthog(*a->dist_hmatrix_gpu, a->handle);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
}
} else {
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = a->handle->handle;
#else
h2opusHandle_t handle = a->handle;
#endif
if (boundtocpu) {
if (!a->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
horthog(*a->hmatrix, handle);
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = PETSC_OFFLOAD_CPU;
} else {
if (!a->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
horthog(*a->hmatrix_gpu, handle);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
}
}
a->orthogonal = PETSC_TRUE;
{ /* log flops */
double gops,time,perf,dev;
HLibProfile::getHorthogPerf(gops,time,perf,dev);
#if defined(PETSC_H2OPUS_USE_GPU)
if (boundtocpu) {
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(1e9*gops);CHKERRQ(ierr);
}
#else
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
#endif
}
ierr = PetscLogEventEnd(MAT_H2Opus_Orthog,A,0,0,0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
MatH2OpusCompress - Compress a hierarchical matrix.
Input Parameters:
+ A - the matrix
- tol - the absolute truncation threshold
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel(), MatH2OpusOrthogonalize()
*/
PetscErrorCode MatH2OpusCompress(Mat A, PetscReal tol)
{
PetscErrorCode ierr;
PetscBool ish2opus;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscMPIInt size;
PetscBool boundtocpu = PETSC_TRUE;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) PetscFunctionReturn(0);
ierr = MatH2OpusOrthogonalize(A);CHKERRQ(ierr);
HLibProfile::clear();
ierr = PetscLogEventBegin(MAT_H2Opus_Compress,A,0,0,0);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
boundtocpu = A->boundtocpu;
#endif
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRMPI(ierr);
if (size > 1) {
if (boundtocpu) {
if (!a->dist_hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
#if defined(H2OPUS_USE_MPI)
distributed_hcompress(*a->dist_hmatrix, tol, a->handle);
#endif
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = PETSC_OFFLOAD_CPU;
} else {
if (!a->dist_hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if defined(H2OPUS_USE_MPI)
distributed_hcompress(*a->dist_hmatrix_gpu, tol, a->handle);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
}
} else {
#if defined(H2OPUS_USE_MPI)
h2opusHandle_t handle = a->handle->handle;
#else
h2opusHandle_t handle = a->handle;
#endif
if (boundtocpu) {
if (!a->hmatrix) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing CPU matrix");
hcompress(*a->hmatrix, tol, handle);
#if defined(PETSC_H2OPUS_USE_GPU)
A->offloadmask = PETSC_OFFLOAD_CPU;
} else {
if (!a->hmatrix_gpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Missing GPU matrix");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
hcompress(*a->hmatrix_gpu, tol, handle);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
}
}
{ /* log flops */
double gops,time,perf,dev;
HLibProfile::getHcompressPerf(gops,time,perf,dev);
#if defined(PETSC_H2OPUS_USE_GPU)
if (boundtocpu) {
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(1e9*gops);CHKERRQ(ierr);
}
#else
ierr = PetscLogFlops(1e9*gops);CHKERRQ(ierr);
#endif
}
ierr = PetscLogEventEnd(MAT_H2Opus_Compress,A,0,0,0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
MatH2OpusSetSamplingMat - Set a matrix to be sampled from matrix vector product to construct a hierarchical matrix.
Input Parameters:
+ A - the hierarchical matrix
. B - the matrix to be sampled
. bs - maximum number of samples to be taken concurrently
- tol - relative tolerance for construction
Notes: Need to call MatAssemblyBegin/End() to update the hierarchical matrix.
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel(), MatH2OpusCompress(), MatH2OpusOrthogonalize()
*/
PetscErrorCode MatH2OpusSetSamplingMat(Mat A, Mat B, PetscInt bs, PetscReal tol)
{
PetscBool ish2opus;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
if (B) PetscValidHeaderSpecific(B,MAT_CLASSID,2);
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (ish2opus) {
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
if (!a->sampler) a->sampler = new PetscMatrixSampler();
a->sampler->SetSamplingMat(B);
if (bs > 0) a->bs = bs;
if (tol > 0.) a->rtol = tol;
delete a->kernel;
}
PetscFunctionReturn(0);
}
/*@C
MatCreateH2OpusFromKernel - Creates a MATH2OPUS from a user-supplied kernel.
Input Parameters:
+ comm - MPI communicator
. m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
. n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
. M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
. N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
. spacedim - dimension of the space coordinates
. coords - coordinates of the points
. cdist - whether or not coordinates are distributed
. kernel - computational kernel (or NULL)
. kernelctx - kernel context
. eta - admissibility condition tolerance
. leafsize - leaf size in cluster tree
- basisord - approximation order for Chebychev interpolation of low-rank blocks
Output Parameter:
. nA - matrix
Options Database Keys:
+ -mat_h2opus_leafsize <PetscInt>
. -mat_h2opus_eta <PetscReal>
. -mat_h2opus_order <PetscInt> - Chebychev approximation order
- -mat_h2opus_normsamples <PetscInt> - Maximum bumber of samples to be when estimating norms
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat()
@*/
PetscErrorCode MatCreateH2OpusFromKernel(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt spacedim, const PetscReal coords[], PetscBool cdist, MatH2OpusKernel kernel, void *kernelctx, PetscReal eta, PetscInt leafsize, PetscInt basisord, Mat* nA)
{
Mat A;
Mat_H2OPUS *h2opus;
#if defined(PETSC_H2OPUS_USE_GPU)
PetscBool iscpu = PETSC_FALSE;
#else
PetscBool iscpu = PETSC_TRUE;
#endif
PetscErrorCode ierr;
PetscFunctionBegin;
if (m != n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Different row and column local sizes are not supported");
ierr = MatCreate(comm,&A);CHKERRQ(ierr);
ierr = MatSetSizes(A,m,n,M,N);CHKERRQ(ierr);
if (M != N) SETERRQ(comm,PETSC_ERR_SUP,"Rectangular matrices are not supported");
ierr = MatSetType(A,MATH2OPUS);CHKERRQ(ierr);
ierr = MatBindToCPU(A,iscpu);CHKERRQ(ierr);
ierr = MatH2OpusSetCoords_H2OPUS(A,spacedim,coords,cdist,kernel,kernelctx);CHKERRQ(ierr);
h2opus = (Mat_H2OPUS*)A->data;
if (eta > 0.) h2opus->eta = eta;
if (leafsize > 0) h2opus->leafsize = leafsize;
if (basisord > 0) h2opus->basisord = basisord;
*nA = A;
PetscFunctionReturn(0);
}
/*@C
MatCreateH2OpusFromMat - Creates a MATH2OPUS sampling from a user-supplied operator.
Input Parameters:
+ B - the matrix to be sampled
. spacedim - dimension of the space coordinates
. coords - coordinates of the points
. cdist - whether or not coordinates are distributed
. eta - admissibility condition tolerance
. leafsize - leaf size in cluster tree
. maxrank - maximum rank allowed
. bs - maximum number of samples to be taken concurrently
- rtol - relative tolerance for construction
Output Parameter:
. nA - matrix
Options Database Keys:
+ -mat_h2opus_leafsize <PetscInt>
. -mat_h2opus_eta <PetscReal>
. -mat_h2opus_maxrank <PetscInt>
. -mat_h2opus_samples <PetscInt>
. -mat_h2opus_rtol <PetscReal>
. -mat_h2opus_check <PetscBool> - Check error when constructing from sampling during MatAssemblyEnd()
. -mat_h2opus_hara_verbose <PetscBool> - Verbose output from hara construction
- -mat_h2opus_normsamples <PetscInt> - Maximum bumber of samples to be when estimating norms
Notes: not available in parallel
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromKernel()
@*/
PetscErrorCode MatCreateH2OpusFromMat(Mat B, PetscInt spacedim, const PetscReal coords[], PetscBool cdist, PetscReal eta, PetscInt leafsize, PetscInt maxrank, PetscInt bs, PetscReal rtol, Mat *nA)
{
Mat A;
Mat_H2OPUS *h2opus;
MPI_Comm comm;
PetscBool boundtocpu = PETSC_TRUE;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(B,MAT_CLASSID,1);
PetscValidLogicalCollectiveInt(B,spacedim,2);
PetscValidLogicalCollectiveReal(B,eta,4);
PetscValidLogicalCollectiveInt(B,leafsize,5);
PetscValidLogicalCollectiveInt(B,maxrank,6);
PetscValidLogicalCollectiveInt(B,bs,7);
PetscValidLogicalCollectiveReal(B,rtol,8);
PetscValidPointer(nA,9);
ierr = PetscObjectGetComm((PetscObject)B,&comm);CHKERRQ(ierr);
if (B->rmap->n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Different row and column local sizes are not supported");
if (B->rmap->N != B->cmap->N) SETERRQ(comm,PETSC_ERR_SUP,"Rectangular matrices are not supported");
ierr = MatCreate(comm,&A);CHKERRQ(ierr);
ierr = MatSetSizes(A,B->rmap->n,B->cmap->n,B->rmap->N,B->cmap->N);CHKERRQ(ierr);
#if defined(PETSC_H2OPUS_USE_GPU)
{
PetscBool iscuda;
VecType vtype;
ierr = MatGetVecType(B,&vtype);CHKERRQ(ierr);
ierr = PetscStrcmp(vtype,VECCUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = PetscStrcmp(vtype,VECSEQCUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = PetscStrcmp(vtype,VECMPICUDA,&iscuda);CHKERRQ(ierr);
}
}
if (iscuda && !B->boundtocpu) boundtocpu = PETSC_FALSE;
}
#endif
ierr = MatSetType(A,MATH2OPUS);CHKERRQ(ierr);
ierr = MatBindToCPU(A,boundtocpu);CHKERRQ(ierr);
if (spacedim) {
ierr = MatH2OpusSetCoords_H2OPUS(A,spacedim,coords,cdist,NULL,NULL);CHKERRQ(ierr);
}
ierr = MatPropagateSymmetryOptions(B,A);CHKERRQ(ierr);
/* if (!A->symmetric) SETERRQ(comm,PETSC_ERR_SUP,"Unsymmetric sampling does not work"); */
h2opus = (Mat_H2OPUS*)A->data;
h2opus->sampler = new PetscMatrixSampler(B);
if (eta > 0.) h2opus->eta = eta;
if (leafsize > 0) h2opus->leafsize = leafsize;
if (maxrank > 0) h2opus->max_rank = maxrank;
if (bs > 0) h2opus->bs = bs;
if (rtol > 0.) h2opus->rtol = rtol;
*nA = A;
A->preallocated = PETSC_TRUE;
PetscFunctionReturn(0);
}
/*@C
MatH2OpusGetIndexMap - Access reordering index set.
Input Parameters:
. A - the matrix
Output Parameter:
. indexmap - the index set for the reordering
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel()
@*/
PetscErrorCode MatH2OpusGetIndexMap(Mat A, IS *indexmap)
{
PetscBool ish2opus;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
PetscValidPointer(indexmap,2);
if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not for type %s",((PetscObject)A)->type_name);
*indexmap = a->h2opus_indexmap;
PetscFunctionReturn(0);
}
/*@C
MatH2OpusMapVec - Maps a vector between PETSc and H2Opus ordering
Input Parameters:
+ A - the matrix
. nativetopetsc - if true, maps from H2Opus ordering to PETSc ordering. If false, applies the reverse map
- in - the vector to be mapped
Output Parameter:
. out - the newly created mapped vector
Level: intermediate
.seealso: MatCreate(), MATH2OPUS, MatCreateH2OpusFromMat(), MatCreateH2OpusFromKernel()
*/
PetscErrorCode MatH2OpusMapVec(Mat A, PetscBool nativetopetsc, Vec in, Vec* out)
{
PetscBool ish2opus;
Mat_H2OPUS *a = (Mat_H2OPUS*)A->data;
PetscScalar *xin,*xout;
PetscBool nm;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidType(A,1);
PetscValidLogicalCollectiveBool(A,nativetopetsc,2);
PetscValidHeaderSpecific(in,VEC_CLASSID,3);
PetscValidPointer(out,4);
if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
ierr = PetscObjectTypeCompare((PetscObject)A,MATH2OPUS,&ish2opus);CHKERRQ(ierr);
if (!ish2opus) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not for type %s",((PetscObject)A)->type_name);
nm = a->nativemult;
ierr = MatH2OpusSetNativeMult(A,(PetscBool)!nativetopetsc);CHKERRQ(ierr);
ierr = MatCreateVecs(A,out,NULL);CHKERRQ(ierr);
ierr = MatH2OpusSetNativeMult(A,nm);CHKERRQ(ierr);
if (!a->sf) { /* same ordering */
ierr = VecCopy(in,*out);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
ierr = VecGetArrayRead(in,(const PetscScalar**)&xin);CHKERRQ(ierr);
ierr = VecGetArrayWrite(*out,&xout);CHKERRQ(ierr);
if (nativetopetsc) {
ierr = PetscSFReduceBegin(a->sf,MPIU_SCALAR,xin,xout,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFReduceEnd(a->sf,MPIU_SCALAR,xin,xout,MPI_REPLACE);CHKERRQ(ierr);
} else {
ierr = PetscSFBcastBegin(a->sf,MPIU_SCALAR,xin,xout,MPI_REPLACE);CHKERRQ(ierr);
ierr = PetscSFBcastEnd(a->sf,MPIU_SCALAR,xin,xout,MPI_REPLACE);CHKERRQ(ierr);
}
ierr = VecRestoreArrayRead(in,(const PetscScalar**)&xin);CHKERRQ(ierr);
ierr = VecRestoreArrayWrite(*out,&xout);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#endif
|
073f7a19169c87bfaadc583d40e6c499d248ad03.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <c10/hip/HIPMathCompat.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <cuda_bf16.h>
#include <hip/hip_runtime_api.h>
#include "THH/THH.h"
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include <math.h>
#define CELL(a, b) (((a) + (b) - 1) / (b))
#if __cplusplus >= 201703L
#define IF_CONSTEXPR constexpr
#else
#define IF_CONSTEXPR
#endif
template <typename acc_t>
__device__ acc_t torch_gelu(acc_t y) {
return normcdff(y) * y;
}
template <typename acc_t>
__device__ acc_t fast_gelu(acc_t y) {
return y * 0.5 * (1.0 + tanhf(0.79788456 * y * (1 + 0.044715 * y * y)));
}
template <typename acc_t>
__device__ acc_t torch_gelu_back(acc_t y, acc_t g) {
constexpr acc_t kBeta = M_2_SQRTPI * M_SQRT1_2 * 0.5;
acc_t cdf = normcdff(y);
acc_t pdf = expf(-0.5f * y * y) * kBeta;
return g * (cdf + y * pdf);
}
template <typename acc_t>
__device__ acc_t fast_gelu_back(acc_t y, acc_t g) {
acc_t tanh_out = tanhf(0.79788456 * y * (1 + 0.044715 * y * y));
acc_t ff = 0.5 * y * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * y * y)) + 0.5 * (1 + tanh_out);
return ff * g;
}
template <typename index_t, typename input_t, typename output_t, typename acc_t, acc_t (*gelu_func)(acc_t)>
__global__ void bias_gelu_forward(output_t *dst, const input_t *src, const input_t *bias, index_t bsz, int dim) {
for (int j = threadIdx.x; j < dim; j += blockDim.x) {
if (blockIdx.x < bsz) {
index_t idx = blockIdx.x * dim + j;
acc_t y = (acc_t)(src[idx] + bias[j]);
dst[idx] = (output_t)gelu_func(y);
}
}
}
template <typename T>
struct VecTypeImpl;
template <>
struct VecTypeImpl<half> {
using type = half2;
};
template <>
struct VecTypeImpl<nv_bfloat16> {
using type = nv_bfloat162;
};
template <>
struct VecTypeImpl<float> {
using type = float2;
};
template <typename T>
using VecType = typename VecTypeImpl<T>::type;
template <typename index_t, typename input_t, typename output_t, typename acc_t, acc_t (*gelu_func)(acc_t)>
__global__ void bias_gelu_forward_vec(output_t *dst, const input_t *src, const input_t *bias, index_t bsz, int dim) {
using VecInType = VecType<input_t>;
using VecOutType = VecType<output_t>;
for (int j = threadIdx.x * 2; j < dim; j += blockDim.x * 2) {
if (blockIdx.x < bsz) {
index_t idx = blockIdx.x * dim + j;
VecInType s = *(VecInType *)(src + idx);
VecInType b = *(VecInType *)(bias + j);
acc_t y1 = s.x + b.x;
acc_t y2 = s.y + b.y;
VecOutType d;
d.x = gelu_func(y1);
d.y = gelu_func(y2);
*(VecOutType *)(dst + idx) = d;
}
}
}
template <typename index_t, typename input_t, typename output_t, typename acc_t, acc_t (*gelu_back_func)(acc_t, acc_t)>
__global__ void bias_gelu_backward(output_t *dst, const input_t *src, const input_t *bias,
const input_t *grad, index_t bsz, int dim) {
for (int j = threadIdx.x; j < dim; j += blockDim.x) {
if (blockIdx.x < bsz) {
index_t idx = blockIdx.x * dim + j;
acc_t y = (acc_t)(src[idx] + bias[j]);
acc_t g = grad[idx];
dst[idx] = (output_t)gelu_back_func(y, g);
}
}
}
template <typename index_t, typename input_t, typename output_t, typename acc_t, acc_t (*gelu_back_func)(acc_t, acc_t)>
__global__ void bias_gelu_backward_vec(output_t *dst, const input_t *src, const input_t *bias,
const input_t *grad, index_t bsz, int dim) {
using VecInType = VecType<input_t>;
using VecOutType = VecType<output_t>;
for (int j = threadIdx.x * 2; j < dim; j += blockDim.x * 2) {
if (blockIdx.x < bsz) {
index_t idx = blockIdx.x * dim + j;
VecInType s = *(VecInType *)(src + idx);
VecInType b = *(VecInType *)(bias + j);
VecInType g = *(VecInType *)(grad + idx);
acc_t y1 = s.x + b.x;
acc_t y2 = s.y + b.y;
VecOutType d;
d.x = gelu_back_func(y1, g.x);
d.y = gelu_back_func(y2, g.y);
*(VecOutType *)(dst + idx) = d;
}
}
}
template <float (*gelu_func)(float)>
torch::Tensor bias_gelu_forward_cuda(const torch::Tensor &x, const torch::Tensor &bias) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
auto sizes = x.sizes();
size_t bsz = 1;
for (size_t i = 0; i + 1 < sizes.size(); ++i) {
bsz *= sizes[i];
}
int dim = sizes[sizes.size() - 1];
auto dst_options = x.options().requires_grad(false);
torch::Tensor results = torch::empty(sizes, dst_options);
auto type = x.scalar_type();
const int ThreadsPerBlock = 256;
int ThreadsPerBlockVec = CELL(dim, 256) * 256 % 512 == 0 ? 256 : 128;
if (type == at::ScalarType::BFloat16) {
if (dim % 2 == 0) {
hipLaunchKernelGGL(( bias_gelu_forward_vec<size_t, nv_bfloat16, nv_bfloat16, float, gelu_func>), dim3(bsz), dim3(ThreadsPerBlockVec), 0, stream,
(nv_bfloat16 *)results.data_ptr(),
(const nv_bfloat16 *)x.data_ptr(),
(const nv_bfloat16 *)bias.data_ptr(),
bsz,
dim);
} else {
hipLaunchKernelGGL(( bias_gelu_forward<size_t, nv_bfloat16, nv_bfloat16, float, gelu_func>), dim3(bsz), dim3(ThreadsPerBlock), 0, stream,
(nv_bfloat16 *)results.data_ptr(),
(const nv_bfloat16 *)x.data_ptr(),
(const nv_bfloat16 *)bias.data_ptr(),
bsz,
dim);
}
} else if (type == at::ScalarType::Half) {
if (dim % 2 == 0) {
hipLaunchKernelGGL(( bias_gelu_forward_vec<size_t, half, half, float, gelu_func>), dim3(bsz), dim3(ThreadsPerBlockVec), 0, stream,
(half *)results.data_ptr(),
(const half *)x.data_ptr(),
(const half *)bias.data_ptr(),
bsz,
dim);
} else {
hipLaunchKernelGGL(( bias_gelu_forward<size_t, half, half, float, gelu_func>), dim3(bsz), dim3(ThreadsPerBlock), 0, stream,
(half *)results.data_ptr(),
(const half *)x.data_ptr(),
(const half *)bias.data_ptr(),
bsz,
dim);
}
} else if (type == at::ScalarType::Float) {
hipLaunchKernelGGL(( bias_gelu_forward<size_t, float, float, float, gelu_func>), dim3(bsz), dim3(ThreadsPerBlock), 0, stream,
(float *)results.data_ptr(),
(const float *)x.data_ptr(),
(const float *)bias.data_ptr(),
bsz,
dim);
}
return results;
}
template <float (*gelu_back_func)(float, float)>
torch::Tensor bias_gelu_backward_cuda(const torch::Tensor &x, const torch::Tensor &bias, const torch::Tensor &grad) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
auto sizes = x.sizes();
size_t bsz = 1;
for (size_t i = 0; i + 1 < sizes.size(); ++i) {
bsz *= sizes[i];
}
int dim = sizes[sizes.size() - 1];
auto dst_options = x.options().requires_grad(false);
torch::Tensor results = torch::empty(sizes, dst_options);
auto type = x.scalar_type();
const int ThreadsPerBlock = 256;
int ThreadsPerBlockVec = CELL(dim, 256) * 256 % 512 == 0 ? 256 : 128;
if (type == at::ScalarType::BFloat16) {
if (dim % 2 == 0) {
hipLaunchKernelGGL(( bias_gelu_backward_vec<size_t, nv_bfloat16, nv_bfloat16, float, gelu_back_func>), dim3(bsz), dim3(ThreadsPerBlockVec), 0, stream,
(nv_bfloat16 *)results.data_ptr(),
(const nv_bfloat16 *)x.data_ptr(),
(const nv_bfloat16 *)bias.data_ptr(),
(const nv_bfloat16 *)grad.data_ptr(),
bsz,
dim);
} else {
hipLaunchKernelGGL(( bias_gelu_backward<size_t, nv_bfloat16, nv_bfloat16, float, gelu_back_func>), dim3(bsz), dim3(ThreadsPerBlock), 0, stream,
(nv_bfloat16 *)results.data_ptr(),
(const nv_bfloat16 *)x.data_ptr(),
(const nv_bfloat16 *)bias.data_ptr(),
(const nv_bfloat16 *)grad.data_ptr(),
bsz,
dim);
}
} else if (type == at::ScalarType::Half) {
if (dim % 2 == 0) {
hipLaunchKernelGGL(( bias_gelu_backward_vec<size_t, half, half, float, gelu_back_func>), dim3(bsz), dim3(ThreadsPerBlockVec), 0, stream,
(half *)results.data_ptr(),
(const half *)x.data_ptr(),
(const half *)bias.data_ptr(),
(const half *)grad.data_ptr(),
bsz,
dim);
} else {
hipLaunchKernelGGL(( bias_gelu_backward<size_t, half, half, float, gelu_back_func>), dim3(bsz), dim3(ThreadsPerBlock), 0, stream,
(half *)results.data_ptr(),
(const half *)x.data_ptr(),
(const half *)bias.data_ptr(),
(const half *)grad.data_ptr(),
bsz,
dim);
}
} else if (type == at::ScalarType::Float) {
hipLaunchKernelGGL(( bias_gelu_backward<size_t, float, float, float, gelu_back_func>), dim3(bsz), dim3(ThreadsPerBlock), 0, stream,
(float *)results.data_ptr(),
(const float *)x.data_ptr(),
(const float *)bias.data_ptr(),
(const float *)grad.data_ptr(),
bsz,
dim);
}
return results;
}
using ForwardFunc = torch::Tensor (*)(const torch::Tensor &, const torch::Tensor &);
ForwardFunc bias_gelu_torch_forward_cuda = bias_gelu_forward_cuda<torch_gelu>;
ForwardFunc bias_gelu_fast_forward_cuda = bias_gelu_forward_cuda<fast_gelu>;
using BackwardFunc = torch::Tensor (*)(const torch::Tensor &, const torch::Tensor &, const torch::Tensor &);
BackwardFunc bias_gelu_torch_backward_cuda = bias_gelu_backward_cuda<torch_gelu_back>;
BackwardFunc bias_gelu_fast_backward_cuda = bias_gelu_backward_cuda<fast_gelu_back>; | 073f7a19169c87bfaadc583d40e6c499d248ad03.cu | #include <ATen/ATen.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cuda_bf16.h>
#include <cuda_profiler_api.h>
#include "THC/THC.h"
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include <math.h>
#define CELL(a, b) (((a) + (b) - 1) / (b))
#if __cplusplus >= 201703L
#define IF_CONSTEXPR constexpr
#else
#define IF_CONSTEXPR
#endif
template <typename acc_t>
__device__ acc_t torch_gelu(acc_t y) {
return normcdff(y) * y;
}
template <typename acc_t>
__device__ acc_t fast_gelu(acc_t y) {
return y * 0.5 * (1.0 + tanhf(0.79788456 * y * (1 + 0.044715 * y * y)));
}
template <typename acc_t>
__device__ acc_t torch_gelu_back(acc_t y, acc_t g) {
constexpr acc_t kBeta = M_2_SQRTPI * M_SQRT1_2 * 0.5;
acc_t cdf = normcdff(y);
acc_t pdf = expf(-0.5f * y * y) * kBeta;
return g * (cdf + y * pdf);
}
template <typename acc_t>
__device__ acc_t fast_gelu_back(acc_t y, acc_t g) {
acc_t tanh_out = tanhf(0.79788456 * y * (1 + 0.044715 * y * y));
acc_t ff = 0.5 * y * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * y * y)) + 0.5 * (1 + tanh_out);
return ff * g;
}
template <typename index_t, typename input_t, typename output_t, typename acc_t, acc_t (*gelu_func)(acc_t)>
__global__ void bias_gelu_forward(output_t *dst, const input_t *src, const input_t *bias, index_t bsz, int dim) {
for (int j = threadIdx.x; j < dim; j += blockDim.x) {
if (blockIdx.x < bsz) {
index_t idx = blockIdx.x * dim + j;
acc_t y = (acc_t)(src[idx] + bias[j]);
dst[idx] = (output_t)gelu_func(y);
}
}
}
template <typename T>
struct VecTypeImpl;
template <>
struct VecTypeImpl<half> {
using type = half2;
};
template <>
struct VecTypeImpl<nv_bfloat16> {
using type = nv_bfloat162;
};
template <>
struct VecTypeImpl<float> {
using type = float2;
};
template <typename T>
using VecType = typename VecTypeImpl<T>::type;
template <typename index_t, typename input_t, typename output_t, typename acc_t, acc_t (*gelu_func)(acc_t)>
__global__ void bias_gelu_forward_vec(output_t *dst, const input_t *src, const input_t *bias, index_t bsz, int dim) {
using VecInType = VecType<input_t>;
using VecOutType = VecType<output_t>;
for (int j = threadIdx.x * 2; j < dim; j += blockDim.x * 2) {
if (blockIdx.x < bsz) {
index_t idx = blockIdx.x * dim + j;
VecInType s = *(VecInType *)(src + idx);
VecInType b = *(VecInType *)(bias + j);
acc_t y1 = s.x + b.x;
acc_t y2 = s.y + b.y;
VecOutType d;
d.x = gelu_func(y1);
d.y = gelu_func(y2);
*(VecOutType *)(dst + idx) = d;
}
}
}
template <typename index_t, typename input_t, typename output_t, typename acc_t, acc_t (*gelu_back_func)(acc_t, acc_t)>
__global__ void bias_gelu_backward(output_t *dst, const input_t *src, const input_t *bias,
const input_t *grad, index_t bsz, int dim) {
for (int j = threadIdx.x; j < dim; j += blockDim.x) {
if (blockIdx.x < bsz) {
index_t idx = blockIdx.x * dim + j;
acc_t y = (acc_t)(src[idx] + bias[j]);
acc_t g = grad[idx];
dst[idx] = (output_t)gelu_back_func(y, g);
}
}
}
template <typename index_t, typename input_t, typename output_t, typename acc_t, acc_t (*gelu_back_func)(acc_t, acc_t)>
__global__ void bias_gelu_backward_vec(output_t *dst, const input_t *src, const input_t *bias,
const input_t *grad, index_t bsz, int dim) {
using VecInType = VecType<input_t>;
using VecOutType = VecType<output_t>;
for (int j = threadIdx.x * 2; j < dim; j += blockDim.x * 2) {
if (blockIdx.x < bsz) {
index_t idx = blockIdx.x * dim + j;
VecInType s = *(VecInType *)(src + idx);
VecInType b = *(VecInType *)(bias + j);
VecInType g = *(VecInType *)(grad + idx);
acc_t y1 = s.x + b.x;
acc_t y2 = s.y + b.y;
VecOutType d;
d.x = gelu_back_func(y1, g.x);
d.y = gelu_back_func(y2, g.y);
*(VecOutType *)(dst + idx) = d;
}
}
}
template <float (*gelu_func)(float)>
torch::Tensor bias_gelu_forward_cuda(const torch::Tensor &x, const torch::Tensor &bias) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
auto sizes = x.sizes();
size_t bsz = 1;
for (size_t i = 0; i + 1 < sizes.size(); ++i) {
bsz *= sizes[i];
}
int dim = sizes[sizes.size() - 1];
auto dst_options = x.options().requires_grad(false);
torch::Tensor results = torch::empty(sizes, dst_options);
auto type = x.scalar_type();
const int ThreadsPerBlock = 256;
int ThreadsPerBlockVec = CELL(dim, 256) * 256 % 512 == 0 ? 256 : 128;
if (type == at::ScalarType::BFloat16) {
if (dim % 2 == 0) {
bias_gelu_forward_vec<size_t, nv_bfloat16, nv_bfloat16, float, gelu_func><<<bsz, ThreadsPerBlockVec, 0, stream>>>(
(nv_bfloat16 *)results.data_ptr(),
(const nv_bfloat16 *)x.data_ptr(),
(const nv_bfloat16 *)bias.data_ptr(),
bsz,
dim);
} else {
bias_gelu_forward<size_t, nv_bfloat16, nv_bfloat16, float, gelu_func><<<bsz, ThreadsPerBlock, 0, stream>>>(
(nv_bfloat16 *)results.data_ptr(),
(const nv_bfloat16 *)x.data_ptr(),
(const nv_bfloat16 *)bias.data_ptr(),
bsz,
dim);
}
} else if (type == at::ScalarType::Half) {
if (dim % 2 == 0) {
bias_gelu_forward_vec<size_t, half, half, float, gelu_func><<<bsz, ThreadsPerBlockVec, 0, stream>>>(
(half *)results.data_ptr(),
(const half *)x.data_ptr(),
(const half *)bias.data_ptr(),
bsz,
dim);
} else {
bias_gelu_forward<size_t, half, half, float, gelu_func><<<bsz, ThreadsPerBlock, 0, stream>>>(
(half *)results.data_ptr(),
(const half *)x.data_ptr(),
(const half *)bias.data_ptr(),
bsz,
dim);
}
} else if (type == at::ScalarType::Float) {
bias_gelu_forward<size_t, float, float, float, gelu_func><<<bsz, ThreadsPerBlock, 0, stream>>>(
(float *)results.data_ptr(),
(const float *)x.data_ptr(),
(const float *)bias.data_ptr(),
bsz,
dim);
}
return results;
}
template <float (*gelu_back_func)(float, float)>
torch::Tensor bias_gelu_backward_cuda(const torch::Tensor &x, const torch::Tensor &bias, const torch::Tensor &grad) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
auto sizes = x.sizes();
size_t bsz = 1;
for (size_t i = 0; i + 1 < sizes.size(); ++i) {
bsz *= sizes[i];
}
int dim = sizes[sizes.size() - 1];
auto dst_options = x.options().requires_grad(false);
torch::Tensor results = torch::empty(sizes, dst_options);
auto type = x.scalar_type();
const int ThreadsPerBlock = 256;
int ThreadsPerBlockVec = CELL(dim, 256) * 256 % 512 == 0 ? 256 : 128;
if (type == at::ScalarType::BFloat16) {
if (dim % 2 == 0) {
bias_gelu_backward_vec<size_t, nv_bfloat16, nv_bfloat16, float, gelu_back_func><<<bsz, ThreadsPerBlockVec, 0, stream>>>(
(nv_bfloat16 *)results.data_ptr(),
(const nv_bfloat16 *)x.data_ptr(),
(const nv_bfloat16 *)bias.data_ptr(),
(const nv_bfloat16 *)grad.data_ptr(),
bsz,
dim);
} else {
bias_gelu_backward<size_t, nv_bfloat16, nv_bfloat16, float, gelu_back_func><<<bsz, ThreadsPerBlock, 0, stream>>>(
(nv_bfloat16 *)results.data_ptr(),
(const nv_bfloat16 *)x.data_ptr(),
(const nv_bfloat16 *)bias.data_ptr(),
(const nv_bfloat16 *)grad.data_ptr(),
bsz,
dim);
}
} else if (type == at::ScalarType::Half) {
if (dim % 2 == 0) {
bias_gelu_backward_vec<size_t, half, half, float, gelu_back_func><<<bsz, ThreadsPerBlockVec, 0, stream>>>(
(half *)results.data_ptr(),
(const half *)x.data_ptr(),
(const half *)bias.data_ptr(),
(const half *)grad.data_ptr(),
bsz,
dim);
} else {
bias_gelu_backward<size_t, half, half, float, gelu_back_func><<<bsz, ThreadsPerBlock, 0, stream>>>(
(half *)results.data_ptr(),
(const half *)x.data_ptr(),
(const half *)bias.data_ptr(),
(const half *)grad.data_ptr(),
bsz,
dim);
}
} else if (type == at::ScalarType::Float) {
bias_gelu_backward<size_t, float, float, float, gelu_back_func><<<bsz, ThreadsPerBlock, 0, stream>>>(
(float *)results.data_ptr(),
(const float *)x.data_ptr(),
(const float *)bias.data_ptr(),
(const float *)grad.data_ptr(),
bsz,
dim);
}
return results;
}
using ForwardFunc = torch::Tensor (*)(const torch::Tensor &, const torch::Tensor &);
ForwardFunc bias_gelu_torch_forward_cuda = bias_gelu_forward_cuda<torch_gelu>;
ForwardFunc bias_gelu_fast_forward_cuda = bias_gelu_forward_cuda<fast_gelu>;
using BackwardFunc = torch::Tensor (*)(const torch::Tensor &, const torch::Tensor &, const torch::Tensor &);
BackwardFunc bias_gelu_torch_backward_cuda = bias_gelu_backward_cuda<torch_gelu_back>;
BackwardFunc bias_gelu_fast_backward_cuda = bias_gelu_backward_cuda<fast_gelu_back>; |
4e17d3bd0b77f8485f3727bf3adfd37b515bef47.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) by Contributors 2020
*/
#include <gtest/gtest.h>
#include <cmath>
#include "xgboost/metric.h"
#include "../helpers.h"
#include "../../../src/common/survival_util.h"
/** Tests for Survival metrics that should run both on CPU and GPU **/
namespace xgboost {
namespace common {
TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
/**
* Test aggregate output from the AFT metric over a small test data set.
* This is unlike AFTLoss.* tests, which verify metric values over individual data points.
**/
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector()
= { 100.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector()
= { 100.0f, 20.0f, std::numeric_limits<bst_float>::infinity(), 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, ::log(64));
struct TestCase {
std::string dist_type;
bst_float reference_value;
};
for (const auto& test_case : std::vector<TestCase>{ {"normal", 2.1508f}, {"logistic", 2.1804f},
{"extreme", 2.0706f} }) {
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
{"aft_loss_distribution_scale", "1.0"} });
EXPECT_NEAR(metric->Eval(preds, info, false), test_case.reference_value, 1e-4);
}
}
TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector() = { 20.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector() = { 80.0f, 20.0f, 80.0f, 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, ::log(60.0f));
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.75f);
info.labels_lower_bound_.HostVector()[2] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_lower_bound_.HostVector()[0] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.25f);
}
// Test configuration of AFT metric
TEST(AFTNegLogLikMetric, DeclareUnifiedTest(Configuration)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({{"aft_loss_distribution", "normal"}, {"aft_loss_distribution_scale", "10"}});
// Configuration round-trip test
Json j_obj{ Object() };
metric->SaveConfig(&j_obj);
auto aft_param_json = j_obj["aft_loss_param"];
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution"]), "normal");
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution_scale"]), "10");
}
} // namespace common
} // namespace xgboost
| 4e17d3bd0b77f8485f3727bf3adfd37b515bef47.cu | /*!
* Copyright (c) by Contributors 2020
*/
#include <gtest/gtest.h>
#include <cmath>
#include "xgboost/metric.h"
#include "../helpers.h"
#include "../../../src/common/survival_util.h"
/** Tests for Survival metrics that should run both on CPU and GPU **/
namespace xgboost {
namespace common {
TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
/**
* Test aggregate output from the AFT metric over a small test data set.
* This is unlike AFTLoss.* tests, which verify metric values over individual data points.
**/
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector()
= { 100.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector()
= { 100.0f, 20.0f, std::numeric_limits<bst_float>::infinity(), 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, std::log(64));
struct TestCase {
std::string dist_type;
bst_float reference_value;
};
for (const auto& test_case : std::vector<TestCase>{ {"normal", 2.1508f}, {"logistic", 2.1804f},
{"extreme", 2.0706f} }) {
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
{"aft_loss_distribution_scale", "1.0"} });
EXPECT_NEAR(metric->Eval(preds, info, false), test_case.reference_value, 1e-4);
}
}
TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector() = { 20.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector() = { 80.0f, 20.0f, 80.0f, 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, std::log(60.0f));
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.75f);
info.labels_lower_bound_.HostVector()[2] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_lower_bound_.HostVector()[0] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.25f);
}
// Test configuration of AFT metric
TEST(AFTNegLogLikMetric, DeclareUnifiedTest(Configuration)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({{"aft_loss_distribution", "normal"}, {"aft_loss_distribution_scale", "10"}});
// Configuration round-trip test
Json j_obj{ Object() };
metric->SaveConfig(&j_obj);
auto aft_param_json = j_obj["aft_loss_param"];
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution"]), "normal");
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution_scale"]), "10");
}
} // namespace common
} // namespace xgboost
|
c53781f114462cc44be8987b6aa87d74a5cea25a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <optix.h>
#include <optixu/optixu_math_namespace.h>
#include "helpers.h"
#include "random.h"
#include "ColorXYZ.h"
#include "Packet.h"
using namespace optix;
/*
struct PerRayData_pathtrace
{
float3 result;
float importance;
int depth;
};
rtDeclareVariable(float3, eye, , );
rtDeclareVariable(float3, U, , );
rtDeclareVariable(float3, V, , );
rtDeclareVariable(float3, W, , );
rtDeclareVariable(float3, bad_color, , );
rtDeclareVariable(float, scene_epsilon, , );
rtBuffer<uchar4, 2> output_buffer;
rtBuffer<float4, 2> accum_buffer;
rtDeclareVariable(rtObject, top_object, , );
rtDeclareVariable(unsigned int, radiance_ray_type, , );
rtDeclareVariable(unsigned int, frame, , );
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
RT_PROGRAM void pinhole_camera()
{
size_t2 screen = output_buffer.size();
unsigned int seed = tea<16>(screen.x*launch_index.y+launch_index.x, frame);
// Subpixel jitter: send the ray through a different position inside the pixel each time,
// to provide antialiasing.
float2 subpixel_jitter = frame == 0 ? make_float2(0.0f, 0.0f) : make_float2(rnd( seed ) - 0.5f, rnd( seed ) - 0.5f);
float2 d = (make_float2(launch_index) + subpixel_jitter) / make_float2(screen) * 2.f - 1.f;
float3 ray_origin = eye;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
optix::Ray ray(ray_origin, ray_direction, radiance_ray_type, scene_epsilon );
PerRayData_pathtrace prd;
prd.importance = 1.f;
prd.depth = 0;
rtTrace(top_object, ray, prd);
float4 acc_val = accum_buffer[launch_index];
if( frame > 0 ) {
acc_val += make_float4(prd.result, 0.f);////lerp( acc_val, make_float4( prd.result, 0.f), 1.0f / static_cast<float>( frame+1 ) );
} else {
acc_val = make_float4(prd.result, 0.f);
}
output_buffer[launch_index] = make_color( make_float3( acc_val )/(frame+1) );
accum_buffer[launch_index] = acc_val;
}
*/
// Scene wide variables
rtDeclareVariable(float, scene_epsilon, , );
rtDeclareVariable(rtObject, top_object, , );
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable(uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable(PerRayData_pathtrace, prd_path, rtPayload, );
rtDeclareVariable(float3, eye, , );
rtDeclareVariable(float3, U, , );
rtDeclareVariable(float3, V, , );
rtDeclareVariable(float3, W, , );
rtDeclareVariable(float3, bad_color, , );
rtDeclareVariable(unsigned int, frame_number, , );
rtDeclareVariable(unsigned int, sqrt_num_samples, , );
rtDeclareVariable(unsigned int, rr_begin_depth, , );
rtDeclareVariable(unsigned int, pathtrace_ray_type, , );
rtDeclareVariable(unsigned int, pathtrace_shadow_ray_type, , );
rtBuffer<float4, 2> output_buffer;
rtBuffer<float4, 2> accum_buffer;
rtBuffer<float4, 2> depth_buffer;
__device__ float computeClipDepth(float eyeDist, float n, float f)
{
float clipDepth = (f + n) / (f - n) - (1 / eyeDist)*2.0f*f*n / (f - n);
clipDepth = clipDepth*0.5 + 0.5f;
return clipDepth;
}
RT_PROGRAM void pathtrace_camera()
{
size_t2 screen = output_buffer.size();
float2 inv_screen = 1.0f / make_float2(screen) * 2.f;
float2 pixel = (make_float2(launch_index)) * inv_screen - 1.f;
float2 jitter_scale = inv_screen / sqrt_num_samples;
unsigned int samples_per_pixel = sqrt_num_samples*sqrt_num_samples;
float3 result = make_float3(0.0f);
hiprandState_t state;
hiprand_init(tea<16>(screen.x*launch_index.y + launch_index.x, frame_number), 0, 0, &state);
float depthHit;
do
{
//
// Sample pixel using jittering
//
unsigned int x = samples_per_pixel%sqrt_num_samples;
unsigned int y = samples_per_pixel / sqrt_num_samples;
float2 jitter = make_float2(x - hiprand_uniform(&state), y - hiprand_uniform(&state));
float2 d = pixel + jitter*jitter_scale;
float3 ray_origin = eye;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
// Initialze per-ray data
PerRayData_pathtrace prd;
prd.result = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.state = &state;
// Each iteration is a segment of the ray path. The closest hit will
// return new segments to be traced here.
/*if (launch_index.x == 0 && launch_index.y == 0){
printf("%d %d \n", launch_index.x, launch_index.y);
printf("%d \n", sizeof(PerRayData_pathtrace));
PerRayData_pathtrace_shadow shadow_prd;
shadow_prd.inShadow = false;
Ray shadow_ray = make_Ray(ray_origin, ray_direction, pathtrace_shadow_ray_type, scene_epsilon, RT_DEFAULT_MAX);
rtTrace(top_object, shadow_ray, shadow_prd);
}*/
Ray ray = make_Ray(ray_origin, ray_direction, pathtrace_ray_type, 0, RT_DEFAULT_MAX);
rtTrace(top_object, ray, prd);
prd.result += prd.radiance;
depthHit = prd.depth;
/*for (;;)
{
Ray ray = make_Ray(ray_origin, ray_direction, pathtrace_ray_type, scene_epsilon, RT_DEFAULT_MAX);
rtTrace(top_object, ray, prd);
if (prd.done)
{
// We have hit the background or a luminaire
prd.result += prd.radiance * prd.attenuation;
break;
}
// Russian roulette termination
if (prd.depth >= rr_begin_depth)
{
float pcont = fmaxf(prd.attenuation);
if (rnd(prd.seed) >= pcont)
break;
prd.attenuation /= pcont;
}
prd.depth++;
prd.result += prd.radiance * prd.attenuation;
// Update ray data for the next path segment
ray_origin = prd.origin;
ray_direction = prd.direction;
}*/
result += prd.result;
} while (--samples_per_pixel);
//
// Update the output buffer
//
float3 pixel_color = result / (sqrt_num_samples*sqrt_num_samples);
float4 acc_val = accum_buffer[launch_index];
if (frame_number > 0) {
acc_val += make_float4(pixel_color, 0.f);
}
else
{
acc_val = make_float4(pixel_color, 0.f);
}
output_buffer[launch_index] = acc_val / (frame_number + 1);
accum_buffer[launch_index] = acc_val;
depth_buffer[launch_index] = make_float4(computeClipDepth(depthHit, .1f, 750.0f));
/*if (frame_number > 1)
{
float a = 1.0f / (float)frame_number;
float3 old_color = make_float3(output_buffer[launch_index]);
output_buffer[launch_index] = make_float4(lerp(old_color, pixel_color, 1.0f), 1.0f);
}
else
{
output_buffer[launch_index] = make_float4(pixel_color, 1.0f);
accum_buffer[launch_index] = acc_val;
}*/
}
RT_PROGRAM void exception()
{
const unsigned int code = rtGetExceptionCode();
rtPrintf("Caught exception 0x%X at launch index (%d,%d)\n", code, launch_index.x, launch_index.y);
output_buffer[launch_index] = make_float4(bad_color);
}
| c53781f114462cc44be8987b6aa87d74a5cea25a.cu | /*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <optix.h>
#include <optixu/optixu_math_namespace.h>
#include "helpers.h"
#include "random.h"
#include "ColorXYZ.h"
#include "Packet.h"
using namespace optix;
/*
struct PerRayData_pathtrace
{
float3 result;
float importance;
int depth;
};
rtDeclareVariable(float3, eye, , );
rtDeclareVariable(float3, U, , );
rtDeclareVariable(float3, V, , );
rtDeclareVariable(float3, W, , );
rtDeclareVariable(float3, bad_color, , );
rtDeclareVariable(float, scene_epsilon, , );
rtBuffer<uchar4, 2> output_buffer;
rtBuffer<float4, 2> accum_buffer;
rtDeclareVariable(rtObject, top_object, , );
rtDeclareVariable(unsigned int, radiance_ray_type, , );
rtDeclareVariable(unsigned int, frame, , );
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
RT_PROGRAM void pinhole_camera()
{
size_t2 screen = output_buffer.size();
unsigned int seed = tea<16>(screen.x*launch_index.y+launch_index.x, frame);
// Subpixel jitter: send the ray through a different position inside the pixel each time,
// to provide antialiasing.
float2 subpixel_jitter = frame == 0 ? make_float2(0.0f, 0.0f) : make_float2(rnd( seed ) - 0.5f, rnd( seed ) - 0.5f);
float2 d = (make_float2(launch_index) + subpixel_jitter) / make_float2(screen) * 2.f - 1.f;
float3 ray_origin = eye;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
optix::Ray ray(ray_origin, ray_direction, radiance_ray_type, scene_epsilon );
PerRayData_pathtrace prd;
prd.importance = 1.f;
prd.depth = 0;
rtTrace(top_object, ray, prd);
float4 acc_val = accum_buffer[launch_index];
if( frame > 0 ) {
acc_val += make_float4(prd.result, 0.f);////lerp( acc_val, make_float4( prd.result, 0.f), 1.0f / static_cast<float>( frame+1 ) );
} else {
acc_val = make_float4(prd.result, 0.f);
}
output_buffer[launch_index] = make_color( make_float3( acc_val )/(frame+1) );
accum_buffer[launch_index] = acc_val;
}
*/
// Scene wide variables
rtDeclareVariable(float, scene_epsilon, , );
rtDeclareVariable(rtObject, top_object, , );
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable(uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable(PerRayData_pathtrace, prd_path, rtPayload, );
rtDeclareVariable(float3, eye, , );
rtDeclareVariable(float3, U, , );
rtDeclareVariable(float3, V, , );
rtDeclareVariable(float3, W, , );
rtDeclareVariable(float3, bad_color, , );
rtDeclareVariable(unsigned int, frame_number, , );
rtDeclareVariable(unsigned int, sqrt_num_samples, , );
rtDeclareVariable(unsigned int, rr_begin_depth, , );
rtDeclareVariable(unsigned int, pathtrace_ray_type, , );
rtDeclareVariable(unsigned int, pathtrace_shadow_ray_type, , );
rtBuffer<float4, 2> output_buffer;
rtBuffer<float4, 2> accum_buffer;
rtBuffer<float4, 2> depth_buffer;
__device__ float computeClipDepth(float eyeDist, float n, float f)
{
float clipDepth = (f + n) / (f - n) - (1 / eyeDist)*2.0f*f*n / (f - n);
clipDepth = clipDepth*0.5 + 0.5f;
return clipDepth;
}
RT_PROGRAM void pathtrace_camera()
{
size_t2 screen = output_buffer.size();
float2 inv_screen = 1.0f / make_float2(screen) * 2.f;
float2 pixel = (make_float2(launch_index)) * inv_screen - 1.f;
float2 jitter_scale = inv_screen / sqrt_num_samples;
unsigned int samples_per_pixel = sqrt_num_samples*sqrt_num_samples;
float3 result = make_float3(0.0f);
curandState_t state;
curand_init(tea<16>(screen.x*launch_index.y + launch_index.x, frame_number), 0, 0, &state);
float depthHit;
do
{
//
// Sample pixel using jittering
//
unsigned int x = samples_per_pixel%sqrt_num_samples;
unsigned int y = samples_per_pixel / sqrt_num_samples;
float2 jitter = make_float2(x - curand_uniform(&state), y - curand_uniform(&state));
float2 d = pixel + jitter*jitter_scale;
float3 ray_origin = eye;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
// Initialze per-ray data
PerRayData_pathtrace prd;
prd.result = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.state = &state;
// Each iteration is a segment of the ray path. The closest hit will
// return new segments to be traced here.
/*if (launch_index.x == 0 && launch_index.y == 0){
printf("%d %d \n", launch_index.x, launch_index.y);
printf("%d \n", sizeof(PerRayData_pathtrace));
PerRayData_pathtrace_shadow shadow_prd;
shadow_prd.inShadow = false;
Ray shadow_ray = make_Ray(ray_origin, ray_direction, pathtrace_shadow_ray_type, scene_epsilon, RT_DEFAULT_MAX);
rtTrace(top_object, shadow_ray, shadow_prd);
}*/
Ray ray = make_Ray(ray_origin, ray_direction, pathtrace_ray_type, 0, RT_DEFAULT_MAX);
rtTrace(top_object, ray, prd);
prd.result += prd.radiance;
depthHit = prd.depth;
/*for (;;)
{
Ray ray = make_Ray(ray_origin, ray_direction, pathtrace_ray_type, scene_epsilon, RT_DEFAULT_MAX);
rtTrace(top_object, ray, prd);
if (prd.done)
{
// We have hit the background or a luminaire
prd.result += prd.radiance * prd.attenuation;
break;
}
// Russian roulette termination
if (prd.depth >= rr_begin_depth)
{
float pcont = fmaxf(prd.attenuation);
if (rnd(prd.seed) >= pcont)
break;
prd.attenuation /= pcont;
}
prd.depth++;
prd.result += prd.radiance * prd.attenuation;
// Update ray data for the next path segment
ray_origin = prd.origin;
ray_direction = prd.direction;
}*/
result += prd.result;
} while (--samples_per_pixel);
//
// Update the output buffer
//
float3 pixel_color = result / (sqrt_num_samples*sqrt_num_samples);
float4 acc_val = accum_buffer[launch_index];
if (frame_number > 0) {
acc_val += make_float4(pixel_color, 0.f);
}
else
{
acc_val = make_float4(pixel_color, 0.f);
}
output_buffer[launch_index] = acc_val / (frame_number + 1);
accum_buffer[launch_index] = acc_val;
depth_buffer[launch_index] = make_float4(computeClipDepth(depthHit, .1f, 750.0f));
/*if (frame_number > 1)
{
float a = 1.0f / (float)frame_number;
float3 old_color = make_float3(output_buffer[launch_index]);
output_buffer[launch_index] = make_float4(lerp(old_color, pixel_color, 1.0f), 1.0f);
}
else
{
output_buffer[launch_index] = make_float4(pixel_color, 1.0f);
accum_buffer[launch_index] = acc_val;
}*/
}
RT_PROGRAM void exception()
{
const unsigned int code = rtGetExceptionCode();
rtPrintf("Caught exception 0x%X at launch index (%d,%d)\n", code, launch_index.x, launch_index.y);
output_buffer[launch_index] = make_float4(bad_color);
}
|
3518277fad5ca5831910fdb3c9f1ad6f3e74a5b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void CalcMass(double *Mass_d, double *GlobalMass_d, double *Rho_d, double A, double *Altitudeh_d, double *lonlat_d, double *areasT, int num, bool DeepModel) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int nv = gridDim.y;
int lev = blockIdx.y;
if (id < num) {
//calculate control volume
double zup, zlow, Vol;
zup = Altitudeh_d[lev + 1] + A;
zlow = Altitudeh_d[lev] + A;
if (DeepModel) {
Vol = areasT[id] / pow(A, 2) * (pow(zup, 3) - pow(zlow, 3)) / 3;
}
else {
Vol = areasT[id] * (zup - zlow);
}
//mass in control volume = density*volume
Mass_d[id * nv + lev] = Rho_d[id * nv + lev] * Vol;
}
} | 3518277fad5ca5831910fdb3c9f1ad6f3e74a5b3.cu | #include "includes.h"
__global__ void CalcMass(double *Mass_d, double *GlobalMass_d, double *Rho_d, double A, double *Altitudeh_d, double *lonlat_d, double *areasT, int num, bool DeepModel) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int nv = gridDim.y;
int lev = blockIdx.y;
if (id < num) {
//calculate control volume
double zup, zlow, Vol;
zup = Altitudeh_d[lev + 1] + A;
zlow = Altitudeh_d[lev] + A;
if (DeepModel) {
Vol = areasT[id] / pow(A, 2) * (pow(zup, 3) - pow(zlow, 3)) / 3;
}
else {
Vol = areasT[id] * (zup - zlow);
}
//mass in control volume = density*volume
Mass_d[id * nv + lev] = Rho_d[id * nv + lev] * Vol;
}
} |
c82bf139507b7a0d982211a8b6aa2b3417f107b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <bits/stdc++.h>
using namespace std;
int main(int argc,char* argv[]){
// class1M1 class1M2 class2M1 class2M2
if(argc!=5){
cout<<"\nThere should be 4 file class1TestWM1 - class1TestWM2 - class2TestWM1 - class2TestWM2\n";
exit(23);
}
// Retriving file Name
char* class1M1 = argv[1];
char* class1M2 = argv[2];
char* class2M1 = argv[3];
char* class2M2 = argv[4];
// Declaring object for the file for input
FILE* c1m1 = fopen(class1M1, "r");
FILE* c1m2 = fopen(class1M2, "r");
FILE* c2m1 = fopen(class2M1, "r");
FILE* c2m2 = fopen(class2M2, "r");
// For reading no of line in each files
int n1,n2,n3,n4;
// Scaning no of lines in each stream
fscanf(c1m1,"%d",&n1);
fscanf(c1m2,"%d",&n2);
fscanf(c2m1,"%d",&n3);
fscanf(c2m2,"%d",&n4);
int i=0,j=0;
// Declaring memory
double *sc1m1 = (double*)malloc(n1*sizeof(double));
double *sc1m2 = (double*)malloc(n2*sizeof(double));
double *sc2m1 = (double*)malloc(n3*sizeof(double));
double *sc2m2 = (double*)malloc(n4*sizeof(double));
for(i=0;i<n1;i++){
fscanf(c1m1,"%lf",&sc1m1[i]);
}
for(i=0;i<n2;i++){
fscanf(c1m2,"%lf",&sc1m2[i]);
}
for(i=0;i<n3;i++){
fscanf(c2m1,"%lf",&sc2m1[i]);
}
for(i=0;i<n4;i++){
fscanf(c2m2,"%lf",&sc2m2[i]);
}
int* confusion = (int*) calloc(4,sizeof(int));
for(i=0;i<n1;i++){
if(sc1m1[i]>sc1m2[i]){
confusion[0] = confusion[0] + 1;
}
else{
confusion[1] = confusion[1] + 1;
}
}
for(i=0;i<n3;i++){
if(sc2m1[i]>sc2m2[i]){
confusion[2] = confusion[2] + 1;
}
else{
confusion[3] = confusion[3] + 1;
}
}
for(i=0;i<2;i++){
for(j=0;j<2;j++){
printf("%d ",confusion[i*2+j]);
}
printf("\n");
}
fclose(c1m1);
fclose(c1m2);
fclose(c2m1);
fclose(c2m2);
return 0;
}
| c82bf139507b7a0d982211a8b6aa2b3417f107b9.cu | #include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <bits/stdc++.h>
using namespace std;
int main(int argc,char* argv[]){
// class1M1 class1M2 class2M1 class2M2
if(argc!=5){
cout<<"\nThere should be 4 file class1TestWM1 - class1TestWM2 - class2TestWM1 - class2TestWM2\n";
exit(23);
}
// Retriving file Name
char* class1M1 = argv[1];
char* class1M2 = argv[2];
char* class2M1 = argv[3];
char* class2M2 = argv[4];
// Declaring object for the file for input
FILE* c1m1 = fopen(class1M1, "r");
FILE* c1m2 = fopen(class1M2, "r");
FILE* c2m1 = fopen(class2M1, "r");
FILE* c2m2 = fopen(class2M2, "r");
// For reading no of line in each files
int n1,n2,n3,n4;
// Scaning no of lines in each stream
fscanf(c1m1,"%d",&n1);
fscanf(c1m2,"%d",&n2);
fscanf(c2m1,"%d",&n3);
fscanf(c2m2,"%d",&n4);
int i=0,j=0;
// Declaring memory
double *sc1m1 = (double*)malloc(n1*sizeof(double));
double *sc1m2 = (double*)malloc(n2*sizeof(double));
double *sc2m1 = (double*)malloc(n3*sizeof(double));
double *sc2m2 = (double*)malloc(n4*sizeof(double));
for(i=0;i<n1;i++){
fscanf(c1m1,"%lf",&sc1m1[i]);
}
for(i=0;i<n2;i++){
fscanf(c1m2,"%lf",&sc1m2[i]);
}
for(i=0;i<n3;i++){
fscanf(c2m1,"%lf",&sc2m1[i]);
}
for(i=0;i<n4;i++){
fscanf(c2m2,"%lf",&sc2m2[i]);
}
int* confusion = (int*) calloc(4,sizeof(int));
for(i=0;i<n1;i++){
if(sc1m1[i]>sc1m2[i]){
confusion[0] = confusion[0] + 1;
}
else{
confusion[1] = confusion[1] + 1;
}
}
for(i=0;i<n3;i++){
if(sc2m1[i]>sc2m2[i]){
confusion[2] = confusion[2] + 1;
}
else{
confusion[3] = confusion[3] + 1;
}
}
for(i=0;i<2;i++){
for(j=0;j<2;j++){
printf("%d ",confusion[i*2+j]);
}
printf("\n");
}
fclose(c1m1);
fclose(c1m2);
fclose(c2m1);
fclose(c2m2);
return 0;
}
|
8f31b399f7ba8dc249db063046e198bff4beae46.hip | // !!! This is a file automatically generated by hipify!!!
#include "psrdada_cpp/meerkat/fbfuse/test/CoherentBeamformerTester.cuh"
#include "psrdada_cpp/meerkat/fbfuse/fbfuse_constants.hpp"
#include "psrdada_cpp/common.hpp"
#include "psrdada_cpp/cuda_utils.hpp"
#include <random>
#include <cmath>
#include <complex>
namespace psrdada_cpp {
namespace meerkat {
namespace fbfuse {
namespace test {
CoherentBeamformerTester::CoherentBeamformerTester()
: ::testing::Test()
, _stream(0)
{
}
CoherentBeamformerTester::~CoherentBeamformerTester()
{
}
void CoherentBeamformerTester::SetUp()
{
CUDA_ERROR_CHECK(hipStreamCreate(&_stream));
}
void CoherentBeamformerTester::TearDown()
{
CUDA_ERROR_CHECK(hipStreamDestroy(_stream));
}
void CoherentBeamformerTester::beamformer_c_reference(
HostVoltageVectorType const& ftpa_voltages,
HostWeightsVectorType const& fbpa_weights,
HostPowerVectorType& tbtf_powers,
int nchannels,
int tscrunch,
int fscrunch,
int nsamples,
int nbeams,
int nantennas,
int npol,
float scale,
float offset)
{
float xx,yy,xy,yx;
double power_sum = 0.0;
double power_sq_sum = 0.0;
std::size_t count = 0;
for (int channel_idx = 0; channel_idx < nchannels; channel_idx += fscrunch)
{
BOOST_LOG_TRIVIAL(debug) << "Beamformer C reference: "
<< static_cast<int>(100.0f * (channel_idx + 1.0f) / nchannels)
<< "% complete";
for (int sample_idx = 0; sample_idx < nsamples; sample_idx+=tscrunch)
{
for (int beam_idx = 0; beam_idx < nbeams; ++beam_idx)
{
float power = 0.0f;
for (int sub_channel_idx = channel_idx;
sub_channel_idx < channel_idx + fscrunch;
++sub_channel_idx)
{
for (int sample_offset = 0; sample_offset < tscrunch; ++sample_offset)
{
for (int pol_idx = 0; pol_idx < npol; ++pol_idx)
{
float2 accumulator = {0,0};
for (int antenna_idx = 0; antenna_idx < nantennas; ++antenna_idx)
{
int ftpa_voltages_idx = nantennas * npol * nsamples * sub_channel_idx
+ nantennas * npol * (sample_idx + sample_offset)
+ nantennas * pol_idx
+ antenna_idx;
char2 datum = ftpa_voltages[ftpa_voltages_idx];
int fbpa_weights_idx = nantennas * nbeams * sub_channel_idx
+ nantennas * beam_idx
+ antenna_idx;
char2 weight = fbpa_weights[fbpa_weights_idx];
xx = datum.x * weight.x;
yy = datum.y * weight.y;
xy = datum.x * weight.y;
yx = datum.y * weight.x;
accumulator.x += xx - yy;
accumulator.y += xy + yx;
}
float r = accumulator.x;
float i = accumulator.y;
power += r*r + i*i;
}
}
}
int tf_size = FBFUSE_CB_NSAMPLES_PER_HEAP * nchannels/fscrunch;
int btf_size = nbeams * tf_size;
int output_sample_idx = sample_idx / tscrunch;
int tbtf_powers_idx = (output_sample_idx / FBFUSE_CB_NSAMPLES_PER_HEAP * btf_size
+ beam_idx * tf_size
+ (output_sample_idx % FBFUSE_CB_NSAMPLES_PER_HEAP) * nchannels/fscrunch
+ channel_idx/fscrunch);
power_sum += power;
power_sq_sum += power * power;
++count;
tbtf_powers[tbtf_powers_idx] = (int8_t) ((power - offset)/scale);
}
}
}
double power_mean = power_sum / count;
BOOST_LOG_TRIVIAL(debug) << "Average power level: " << power_mean;
BOOST_LOG_TRIVIAL(debug) << "Power variance: " << power_sq_sum / count - power_mean * power_mean;
}
void CoherentBeamformerTester::compare_against_host(
DeviceVoltageVectorType const& ftpa_voltages_gpu,
DeviceWeightsVectorType const& fbpa_weights_gpu,
DevicePowerVectorType& btf_powers_gpu,
int nsamples)
{
HostVoltageVectorType ftpa_voltages_host = ftpa_voltages_gpu;
HostWeightsVectorType fbpa_weights_host = fbpa_weights_gpu;
HostPowerVectorType btf_powers_cuda = btf_powers_gpu;
HostPowerVectorType btf_powers_host(btf_powers_gpu.size());
beamformer_c_reference(ftpa_voltages_host,
fbpa_weights_host,
btf_powers_host,
_config.nchans(),
_config.cb_tscrunch(),
_config.cb_fscrunch(),
nsamples,
_config.cb_nbeams(),
_config.cb_nantennas(),
_config.npol(),
_config.cb_power_scaling(),
_config.cb_power_offset());
for (size_t ii = 0; ii < btf_powers_host.size(); ++ii)
{
ASSERT_TRUE(std::abs(static_cast<int>(btf_powers_host[ii]) - btf_powers_cuda[ii]) <= 1);
}
}
TEST_F(CoherentBeamformerTester, representative_noise_test)
{
const float input_level = 32.0f;
const double pi = std::acos(-1);
_config.input_level(input_level);
_config.output_level(32.0f);
std::default_random_engine generator;
std::normal_distribution<float> normal_dist(0.0, input_level);
std::uniform_real_distribution<float> uniform_dist(0.0, 2*pi);
CoherentBeamformer coherent_beamformer(_config);
std::size_t ntimestamps = max(1L, FBFUSE_CB_PACKET_SIZE/(_config.nchans()/_config.cb_fscrunch())/(_config.nsamples_per_heap()/_config.cb_tscrunch()));
ntimestamps = max(ntimestamps, FBFUSE_CB_NSAMPLES_PER_BLOCK / _config.nsamples_per_heap());
printf("Using %ld timestamps\n",ntimestamps);
std::size_t input_size = (ntimestamps * _config.cb_nantennas()
* _config.nchans() * _config.nsamples_per_heap() * _config.npol());
int nsamples = _config.nsamples_per_heap() * ntimestamps;
std::size_t weights_size = _config.cb_nantennas() * _config.nchans() * _config.cb_nbeams();
HostVoltageVectorType ftpa_voltages_host(input_size);
for (size_t ii = 0; ii < ftpa_voltages_host.size(); ++ii)
{
ftpa_voltages_host[ii].x = static_cast<int8_t>(std::lround(normal_dist(generator)));
ftpa_voltages_host[ii].y = static_cast<int8_t>(std::lround(normal_dist(generator)));
}
HostWeightsVectorType fbpa_weights_host(weights_size);
for (size_t ii = 0; ii < fbpa_weights_host.size(); ++ii)
{
// Build complex weight as C * exp(i * theta).
std::complex<double> val = 127.0f * ::exp(std::complex<float>(0.0f, uniform_dist(generator)));
fbpa_weights_host[ii].x = static_cast<int8_t>(std::lround(val.real()));
fbpa_weights_host[ii].y = static_cast<int8_t>(std::lround(val.imag()));
}
DeviceVoltageVectorType ftpa_voltages_gpu = ftpa_voltages_host;
DeviceWeightsVectorType fbpa_weights_gpu = fbpa_weights_host;
DevicePowerVectorType btf_powers_gpu;
coherent_beamformer.beamform(ftpa_voltages_gpu, fbpa_weights_gpu, btf_powers_gpu, _stream);
compare_against_host(ftpa_voltages_gpu, fbpa_weights_gpu, btf_powers_gpu, nsamples);
}
} //namespace test
} //namespace fbfuse
} //namespace meerkat
} //namespace psrdada_cpp
| 8f31b399f7ba8dc249db063046e198bff4beae46.cu | #include "psrdada_cpp/meerkat/fbfuse/test/CoherentBeamformerTester.cuh"
#include "psrdada_cpp/meerkat/fbfuse/fbfuse_constants.hpp"
#include "psrdada_cpp/common.hpp"
#include "psrdada_cpp/cuda_utils.hpp"
#include <random>
#include <cmath>
#include <complex>
namespace psrdada_cpp {
namespace meerkat {
namespace fbfuse {
namespace test {
CoherentBeamformerTester::CoherentBeamformerTester()
: ::testing::Test()
, _stream(0)
{
}
CoherentBeamformerTester::~CoherentBeamformerTester()
{
}
void CoherentBeamformerTester::SetUp()
{
CUDA_ERROR_CHECK(cudaStreamCreate(&_stream));
}
void CoherentBeamformerTester::TearDown()
{
CUDA_ERROR_CHECK(cudaStreamDestroy(_stream));
}
void CoherentBeamformerTester::beamformer_c_reference(
HostVoltageVectorType const& ftpa_voltages,
HostWeightsVectorType const& fbpa_weights,
HostPowerVectorType& tbtf_powers,
int nchannels,
int tscrunch,
int fscrunch,
int nsamples,
int nbeams,
int nantennas,
int npol,
float scale,
float offset)
{
float xx,yy,xy,yx;
double power_sum = 0.0;
double power_sq_sum = 0.0;
std::size_t count = 0;
for (int channel_idx = 0; channel_idx < nchannels; channel_idx += fscrunch)
{
BOOST_LOG_TRIVIAL(debug) << "Beamformer C reference: "
<< static_cast<int>(100.0f * (channel_idx + 1.0f) / nchannels)
<< "% complete";
for (int sample_idx = 0; sample_idx < nsamples; sample_idx+=tscrunch)
{
for (int beam_idx = 0; beam_idx < nbeams; ++beam_idx)
{
float power = 0.0f;
for (int sub_channel_idx = channel_idx;
sub_channel_idx < channel_idx + fscrunch;
++sub_channel_idx)
{
for (int sample_offset = 0; sample_offset < tscrunch; ++sample_offset)
{
for (int pol_idx = 0; pol_idx < npol; ++pol_idx)
{
float2 accumulator = {0,0};
for (int antenna_idx = 0; antenna_idx < nantennas; ++antenna_idx)
{
int ftpa_voltages_idx = nantennas * npol * nsamples * sub_channel_idx
+ nantennas * npol * (sample_idx + sample_offset)
+ nantennas * pol_idx
+ antenna_idx;
char2 datum = ftpa_voltages[ftpa_voltages_idx];
int fbpa_weights_idx = nantennas * nbeams * sub_channel_idx
+ nantennas * beam_idx
+ antenna_idx;
char2 weight = fbpa_weights[fbpa_weights_idx];
xx = datum.x * weight.x;
yy = datum.y * weight.y;
xy = datum.x * weight.y;
yx = datum.y * weight.x;
accumulator.x += xx - yy;
accumulator.y += xy + yx;
}
float r = accumulator.x;
float i = accumulator.y;
power += r*r + i*i;
}
}
}
int tf_size = FBFUSE_CB_NSAMPLES_PER_HEAP * nchannels/fscrunch;
int btf_size = nbeams * tf_size;
int output_sample_idx = sample_idx / tscrunch;
int tbtf_powers_idx = (output_sample_idx / FBFUSE_CB_NSAMPLES_PER_HEAP * btf_size
+ beam_idx * tf_size
+ (output_sample_idx % FBFUSE_CB_NSAMPLES_PER_HEAP) * nchannels/fscrunch
+ channel_idx/fscrunch);
power_sum += power;
power_sq_sum += power * power;
++count;
tbtf_powers[tbtf_powers_idx] = (int8_t) ((power - offset)/scale);
}
}
}
double power_mean = power_sum / count;
BOOST_LOG_TRIVIAL(debug) << "Average power level: " << power_mean;
BOOST_LOG_TRIVIAL(debug) << "Power variance: " << power_sq_sum / count - power_mean * power_mean;
}
void CoherentBeamformerTester::compare_against_host(
DeviceVoltageVectorType const& ftpa_voltages_gpu,
DeviceWeightsVectorType const& fbpa_weights_gpu,
DevicePowerVectorType& btf_powers_gpu,
int nsamples)
{
HostVoltageVectorType ftpa_voltages_host = ftpa_voltages_gpu;
HostWeightsVectorType fbpa_weights_host = fbpa_weights_gpu;
HostPowerVectorType btf_powers_cuda = btf_powers_gpu;
HostPowerVectorType btf_powers_host(btf_powers_gpu.size());
beamformer_c_reference(ftpa_voltages_host,
fbpa_weights_host,
btf_powers_host,
_config.nchans(),
_config.cb_tscrunch(),
_config.cb_fscrunch(),
nsamples,
_config.cb_nbeams(),
_config.cb_nantennas(),
_config.npol(),
_config.cb_power_scaling(),
_config.cb_power_offset());
for (size_t ii = 0; ii < btf_powers_host.size(); ++ii)
{
ASSERT_TRUE(std::abs(static_cast<int>(btf_powers_host[ii]) - btf_powers_cuda[ii]) <= 1);
}
}
TEST_F(CoherentBeamformerTester, representative_noise_test)
{
const float input_level = 32.0f;
const double pi = std::acos(-1);
_config.input_level(input_level);
_config.output_level(32.0f);
std::default_random_engine generator;
std::normal_distribution<float> normal_dist(0.0, input_level);
std::uniform_real_distribution<float> uniform_dist(0.0, 2*pi);
CoherentBeamformer coherent_beamformer(_config);
std::size_t ntimestamps = max(1L, FBFUSE_CB_PACKET_SIZE/(_config.nchans()/_config.cb_fscrunch())/(_config.nsamples_per_heap()/_config.cb_tscrunch()));
ntimestamps = max(ntimestamps, FBFUSE_CB_NSAMPLES_PER_BLOCK / _config.nsamples_per_heap());
printf("Using %ld timestamps\n",ntimestamps);
std::size_t input_size = (ntimestamps * _config.cb_nantennas()
* _config.nchans() * _config.nsamples_per_heap() * _config.npol());
int nsamples = _config.nsamples_per_heap() * ntimestamps;
std::size_t weights_size = _config.cb_nantennas() * _config.nchans() * _config.cb_nbeams();
HostVoltageVectorType ftpa_voltages_host(input_size);
for (size_t ii = 0; ii < ftpa_voltages_host.size(); ++ii)
{
ftpa_voltages_host[ii].x = static_cast<int8_t>(std::lround(normal_dist(generator)));
ftpa_voltages_host[ii].y = static_cast<int8_t>(std::lround(normal_dist(generator)));
}
HostWeightsVectorType fbpa_weights_host(weights_size);
for (size_t ii = 0; ii < fbpa_weights_host.size(); ++ii)
{
// Build complex weight as C * exp(i * theta).
std::complex<double> val = 127.0f * std::exp(std::complex<float>(0.0f, uniform_dist(generator)));
fbpa_weights_host[ii].x = static_cast<int8_t>(std::lround(val.real()));
fbpa_weights_host[ii].y = static_cast<int8_t>(std::lround(val.imag()));
}
DeviceVoltageVectorType ftpa_voltages_gpu = ftpa_voltages_host;
DeviceWeightsVectorType fbpa_weights_gpu = fbpa_weights_host;
DevicePowerVectorType btf_powers_gpu;
coherent_beamformer.beamform(ftpa_voltages_gpu, fbpa_weights_gpu, btf_powers_gpu, _stream);
compare_against_host(ftpa_voltages_gpu, fbpa_weights_gpu, btf_powers_gpu, nsamples);
}
} //namespace test
} //namespace fbfuse
} //namespace meerkat
} //namespace psrdada_cpp
|
75e5cf617d19bf283938c0bf7249542343f009fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int blocksize = blockDim.x;
unsigned int i = blockIdx.x * (blocksize * 2) + threadIdx.x;
unsigned int gridsize = gridDim.x *(blocksize * 2) ;
unsigned int tid = threadIdx.x;
scratch[tid] = 0 ;
while(i < n) {
scratch[tid] += g_idata[i] + g_idata[i + blocksize];
i += gridsize;
}
__syncthreads ();
if(blocksize >= 256){
if(tid < 128){
scratch[tid] += scratch[tid + 128];
}
__syncthreads();
}
if(blocksize >= 128){
if(tid < 64){
scratch[tid ] += scratch[tid + 64];
}
__syncthreads();
}
if(tid < 32){
if(blocksize >= 64){
scratch[tid ] += scratch[tid + 32];
}
if(blocksize >= 32){
scratch[tid ] += scratch[tid + 16];
}
if(blocksize >= 16){
scratch[tid ] += scratch[tid + 8];
}
if(blocksize >= 8){
scratch[tid ] += scratch[tid + 4];
}
if(blocksize >= 4){
scratch[tid ] += scratch[tid + 2];
}
if(blocksize >= 2){
scratch[tid ] += scratch[tid + 1];
}
}
if(threadIdx.x == 0) {
g_odata[blockIdx.x] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_5, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 5;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_5 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute multiple add GPU reduction kernel: %Lg secs\n", t_kernel_5);
double bw = (N * sizeof(dtype)) / (t_kernel_5 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 75e5cf617d19bf283938c0bf7249542343f009fe.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int blocksize = blockDim.x;
unsigned int i = blockIdx.x * (blocksize * 2) + threadIdx.x;
unsigned int gridsize = gridDim.x *(blocksize * 2) ;
unsigned int tid = threadIdx.x;
scratch[tid] = 0 ;
while(i < n) {
scratch[tid] += g_idata[i] + g_idata[i + blocksize];
i += gridsize;
}
__syncthreads ();
if(blocksize >= 256){
if(tid < 128){
scratch[tid] += scratch[tid + 128];
}
__syncthreads();
}
if(blocksize >= 128){
if(tid < 64){
scratch[tid ] += scratch[tid + 64];
}
__syncthreads();
}
if(tid < 32){
if(blocksize >= 64){
scratch[tid ] += scratch[tid + 32];
}
if(blocksize >= 32){
scratch[tid ] += scratch[tid + 16];
}
if(blocksize >= 16){
scratch[tid ] += scratch[tid + 8];
}
if(blocksize >= 8){
scratch[tid ] += scratch[tid + 4];
}
if(blocksize >= 4){
scratch[tid ] += scratch[tid + 2];
}
if(blocksize >= 2){
scratch[tid ] += scratch[tid + 1];
}
}
if(threadIdx.x == 0) {
g_odata[blockIdx.x] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_5, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 5;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel5 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel5 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
kernel5 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_5 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute multiple add GPU reduction kernel: %Lg secs\n", t_kernel_5);
double bw = (N * sizeof(dtype)) / (t_kernel_5 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
d7e771f5b0afd2f0a0f6fc8ed3590ff2c2178495.hip | // !!! This is a file automatically generated by hipify!!!
#include "io_network.h"
void save(const std::string &path, const Mat* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid matrix..."<<std::endl;
exit(0);
}
ofstream outfile(path.c_str(), ios::out);
outfile.precision(16);
if (outfile.is_open()){
outfile<<"path = "<<path<<"\n";
outfile<<"rows = "<<m -> rows<<"\n";
outfile<<"cols = "<<m -> cols<<"\n";
outfile<<"channels = "<<m -> channels<<"\n";
outfile<<"data =\n";
float *data = (float*)malloc(m -> getLength() * sizeof(float));
checkCudaErrors(hipMemcpy(data, m -> Data, m -> getLength() * sizeof(float), hipMemcpyDeviceToHost));
for(int i = 0; i < m -> getLength(); ++i){
outfile<<data[i]<<" ";
}
outfile<<"\n";
outfile.close();
free(data);
}else {
std::cout<<"unable to open file..."<<std::endl;
exit(0);
}
}
void save(const std::string &path, const cpuMat* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid matrix..."<<std::endl;
exit(0);
}
ofstream outfile(path.c_str(), ios::out);
outfile.precision(16);
if (outfile.is_open()){
outfile<<"path = "<<path<<"\n";
outfile<<"rows = "<<m -> rows<<"\n";
outfile<<"cols = "<<m -> cols<<"\n";
outfile<<"channels = "<<m -> channels<<"\n";
outfile<<"data =\n";
float *data = (float*)malloc(m -> getLength() * sizeof(float));
memcpy(data, m -> Data, m -> getLength() * sizeof(float));
for(int i = 0; i < m -> getLength(); ++i){
outfile<<data[i]<<" ";
}
outfile<<"\n";
outfile.close();
free(data);
}else {
std::cout<<"unable to open file..."<<std::endl;
exit(0);
}
}
void save(const std::string &path, const vector3f* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid vector3f..."<<std::endl;
exit(0);
}
ofstream outfile(path.c_str(), ios::out);
outfile.precision(16);
if (outfile.is_open()){
outfile<<"path = "<<path<<"\n";
outfile<<"data =\n";
float *data = (float*)malloc(3 * sizeof(float));
memcpy(data, m -> Data, 3 * sizeof(float));
for(int i = 0; i < 3; ++i){
outfile<<data[i]<<" ";
}
outfile<<"\n";
outfile.close();
free(data);
}else {
std::cout<<"unable to open file..."<<std::endl;
exit(0);
}
}
void save(const std::string &path, const vector2i* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid vector3f..."<<std::endl;
exit(0);
}
ofstream outfile(path.c_str(), ios::out);
outfile.precision(16);
if (outfile.is_open()){
outfile<<"path = "<<path<<"\n";
outfile<<"data =\n";
int *data = (int*)malloc(2 * sizeof(int));
memcpy(data, m -> Data, 2 * sizeof(int));
for(int i = 0; i < 2; ++i){
outfile<<data[i]<<" ";
}
outfile<<"\n";
outfile.close();
free(data);
}else {
std::cout<<"unable to open file..."<<std::endl;
exit(0);
}
}
void read(const std::string &path, Mat* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid matrix..."<<std::endl;
exit(0);
}
std::ifstream infile(path.c_str());
std::string line, tmpstr;
getline(infile, line); // path = XX
getline(infile, line); // rows = XX
std::istringstream iss_row(line);
if(!(iss_row >> tmpstr >> tmpstr >> tmpstr)){
std::cout<<"invalid file...1"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> rows){
std::cout<<"invalid file...2"<<std::endl;
exit(0);
}
getline(infile, line); // cols = XX
std::istringstream iss_col(line);
if(!(iss_col >> tmpstr >> tmpstr >> tmpstr)){
cout<<tmpstr<<"---#########"<<endl;
std::cout<<"invalid file...3"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> cols){
std::cout<<"invalid file...4"<<std::endl;
exit(0);
}
getline(infile, line); // channels = XX
std::istringstream iss_channels(line);
if(!(iss_channels >> tmpstr >> tmpstr >> tmpstr)){
std::cout<<"invalid file...5"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> channels){
std::cout<<"invalid file...6"<<std::endl;
exit(0);
}
getline(infile, line); // data =
std::istringstream iss_data_title(line);
if(!(iss_data_title >> tmpstr >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
getline(infile, line); // [data]
std::istringstream iss_data(line);
float *hostData = (float*)malloc(m -> getLength() * sizeof(float));
for(int i = 0; i < m -> getLength(); ++i){
if(!(iss_data >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
hostData[i] = std::stof(tmpstr);
}
checkCudaErrors(hipMemcpy(m -> Data, hostData, m -> getLength() * sizeof(float), hipMemcpyHostToDevice));
free(hostData);
}
void read(const std::string &path, cpuMat* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid matrix..."<<std::endl;
exit(0);
}
std::ifstream infile(path.c_str());
std::string line, tmpstr;
getline(infile, line); // path = XX
getline(infile, line); // rows = XX
std::istringstream iss_row(line);
if(!(iss_row >> tmpstr >> tmpstr >> tmpstr)){
std::cout<<"invalid file...1"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> rows){
std::cout<<"invalid file...2"<<std::endl;
exit(0);
}
getline(infile, line); // cols = XX
std::istringstream iss_col(line);
if(!(iss_col >> tmpstr >> tmpstr >> tmpstr)){
cout<<tmpstr<<"---#########"<<endl;
std::cout<<"invalid file...3"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> cols){
std::cout<<"invalid file...4"<<std::endl;
exit(0);
}
getline(infile, line); // channels = XX
std::istringstream iss_channels(line);
if(!(iss_channels >> tmpstr >> tmpstr >> tmpstr)){
std::cout<<"invalid file...5"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> channels){
std::cout<<"invalid file...6"<<std::endl;
exit(0);
}
getline(infile, line); // data =
std::istringstream iss_data_title(line);
if(!(iss_data_title >> tmpstr >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
getline(infile, line); // [data]
std::istringstream iss_data(line);
float *hostData = (float*)malloc(m -> getLength() * sizeof(float));
for(int i = 0; i < m -> getLength(); ++i){
if(!(iss_data >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
hostData[i] = std::stof(tmpstr);
}
memcpy(m -> Data, hostData, m -> getLength() * sizeof(float));
free(hostData);
}
void read(const std::string &path, vector3f* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid vector3f..."<<std::endl;
exit(0);
}
std::ifstream infile(path.c_str());
std::string line, tmpstr;
getline(infile, line); // path = XX
getline(infile, line); // data =
std::istringstream iss_data_title(line);
if(!(iss_data_title >> tmpstr >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
getline(infile, line); // [data]
std::istringstream iss_data(line);
float *hostData = (float*)malloc(3 * sizeof(float));
for(int i = 0; i < 3; ++i){
if(!(iss_data >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
hostData[i] = std::stof(tmpstr);
}
memcpy(m -> Data, hostData, 3 * sizeof(float));
free(hostData);
}
void read(const std::string &path, vector2i* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid vector3f..."<<std::endl;
exit(0);
}
std::ifstream infile(path.c_str());
std::string line, tmpstr;
getline(infile, line); // path = XX
getline(infile, line); // data =
std::istringstream iss_data_title(line);
if(!(iss_data_title >> tmpstr >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
getline(infile, line); // [data]
std::istringstream iss_data(line);
int *hostData = (int*)malloc(2 * sizeof(int));
for(int i = 0; i < 2; ++i){
if(!(iss_data >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
hostData[i] = std::stoi(tmpstr);
}
memcpy(m -> Data, hostData, 2 * sizeof(int));
free(hostData);
}
void saveNetwork(const std::string &path, const std::vector<network_layer*> &flow){
mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
string pathw = "";
string pathb = "";
for(int i = 0; i < flow.size(); ++i){
string layerpath = path + "/layer_" + std::to_string(i);
if(flow[i] -> layer_type == "convolutional"){
for(int k = 0; k < ((convolutional_layer*)flow[i]) -> kernels.size(); ++k){
pathw = layerpath + "_kernel_" + std::to_string(k) + "_w.txt";
pathb = layerpath + "_kernel_" + std::to_string(k) + "_b.txt";
save(pathw, ((convolutional_layer*)flow[i]) -> kernels[k] -> w);
save(pathb, ((convolutional_layer*)flow[i]) -> kernels[k] -> b);
}
}elif(flow[i] -> layer_type == "fully_connected"){
pathw = layerpath + "_w.txt";
pathb = layerpath + "_b.txt";
save(pathw, ((fully_connected_layer*)flow[i]) -> w);
save(pathb, ((fully_connected_layer*)flow[i]) -> b);
}elif(flow[i] -> layer_type == "softmax"){
pathw = layerpath + "_w.txt";
pathb = layerpath + "_b.txt";
save(pathw, ((softmax_layer*)flow[i]) -> w);
save(pathb, ((softmax_layer*)flow[i]) -> b);
}
}
}
// READ NETWORK
void readNetwork(const string &path, std::vector<network_layer*> &flow){
string pathw = "";
string pathb = "";
for(int i = 0; i < flow.size(); ++i){
string layerpath = path + "/layer_" + std::to_string(i);
if(flow[i] -> layer_type == "convolutional"){
for(int k = 0; k < ((convolutional_layer*)flow[i]) -> kernels.size(); ++k){
pathw = layerpath + "_kernel_" + std::to_string(k) + "_w.txt";
pathb = layerpath + "_kernel_" + std::to_string(k) + "_b.txt";
read(pathw, ((convolutional_layer*)flow[i]) -> kernels[k] -> w);
read(pathb, ((convolutional_layer*)flow[i]) -> kernels[k] -> b);
}
}elif(flow[i] -> layer_type == "fully_connected"){
pathw = layerpath + "_w.txt";
pathb = layerpath + "_b.txt";
read(pathw, ((fully_connected_layer*)flow[i]) -> w);
read(pathb, ((fully_connected_layer*)flow[i]) -> b);
}elif(flow[i] -> layer_type == "softmax"){
pathw = layerpath + "_w.txt";
pathb = layerpath + "_b.txt";
read(pathw, ((softmax_layer*)flow[i]) -> w);
read(pathb, ((softmax_layer*)flow[i]) -> b);
}
}
}
| d7e771f5b0afd2f0a0f6fc8ed3590ff2c2178495.cu | #include "io_network.h"
void save(const std::string &path, const Mat* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid matrix..."<<std::endl;
exit(0);
}
ofstream outfile(path.c_str(), ios::out);
outfile.precision(16);
if (outfile.is_open()){
outfile<<"path = "<<path<<"\n";
outfile<<"rows = "<<m -> rows<<"\n";
outfile<<"cols = "<<m -> cols<<"\n";
outfile<<"channels = "<<m -> channels<<"\n";
outfile<<"data =\n";
float *data = (float*)malloc(m -> getLength() * sizeof(float));
checkCudaErrors(cudaMemcpy(data, m -> Data, m -> getLength() * sizeof(float), cudaMemcpyDeviceToHost));
for(int i = 0; i < m -> getLength(); ++i){
outfile<<data[i]<<" ";
}
outfile<<"\n";
outfile.close();
free(data);
}else {
std::cout<<"unable to open file..."<<std::endl;
exit(0);
}
}
void save(const std::string &path, const cpuMat* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid matrix..."<<std::endl;
exit(0);
}
ofstream outfile(path.c_str(), ios::out);
outfile.precision(16);
if (outfile.is_open()){
outfile<<"path = "<<path<<"\n";
outfile<<"rows = "<<m -> rows<<"\n";
outfile<<"cols = "<<m -> cols<<"\n";
outfile<<"channels = "<<m -> channels<<"\n";
outfile<<"data =\n";
float *data = (float*)malloc(m -> getLength() * sizeof(float));
memcpy(data, m -> Data, m -> getLength() * sizeof(float));
for(int i = 0; i < m -> getLength(); ++i){
outfile<<data[i]<<" ";
}
outfile<<"\n";
outfile.close();
free(data);
}else {
std::cout<<"unable to open file..."<<std::endl;
exit(0);
}
}
void save(const std::string &path, const vector3f* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid vector3f..."<<std::endl;
exit(0);
}
ofstream outfile(path.c_str(), ios::out);
outfile.precision(16);
if (outfile.is_open()){
outfile<<"path = "<<path<<"\n";
outfile<<"data =\n";
float *data = (float*)malloc(3 * sizeof(float));
memcpy(data, m -> Data, 3 * sizeof(float));
for(int i = 0; i < 3; ++i){
outfile<<data[i]<<" ";
}
outfile<<"\n";
outfile.close();
free(data);
}else {
std::cout<<"unable to open file..."<<std::endl;
exit(0);
}
}
void save(const std::string &path, const vector2i* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid vector3f..."<<std::endl;
exit(0);
}
ofstream outfile(path.c_str(), ios::out);
outfile.precision(16);
if (outfile.is_open()){
outfile<<"path = "<<path<<"\n";
outfile<<"data =\n";
int *data = (int*)malloc(2 * sizeof(int));
memcpy(data, m -> Data, 2 * sizeof(int));
for(int i = 0; i < 2; ++i){
outfile<<data[i]<<" ";
}
outfile<<"\n";
outfile.close();
free(data);
}else {
std::cout<<"unable to open file..."<<std::endl;
exit(0);
}
}
void read(const std::string &path, Mat* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid matrix..."<<std::endl;
exit(0);
}
std::ifstream infile(path.c_str());
std::string line, tmpstr;
getline(infile, line); // path = XX
getline(infile, line); // rows = XX
std::istringstream iss_row(line);
if(!(iss_row >> tmpstr >> tmpstr >> tmpstr)){
std::cout<<"invalid file...1"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> rows){
std::cout<<"invalid file...2"<<std::endl;
exit(0);
}
getline(infile, line); // cols = XX
std::istringstream iss_col(line);
if(!(iss_col >> tmpstr >> tmpstr >> tmpstr)){
cout<<tmpstr<<"---#########"<<endl;
std::cout<<"invalid file...3"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> cols){
std::cout<<"invalid file...4"<<std::endl;
exit(0);
}
getline(infile, line); // channels = XX
std::istringstream iss_channels(line);
if(!(iss_channels >> tmpstr >> tmpstr >> tmpstr)){
std::cout<<"invalid file...5"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> channels){
std::cout<<"invalid file...6"<<std::endl;
exit(0);
}
getline(infile, line); // data =
std::istringstream iss_data_title(line);
if(!(iss_data_title >> tmpstr >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
getline(infile, line); // [data]
std::istringstream iss_data(line);
float *hostData = (float*)malloc(m -> getLength() * sizeof(float));
for(int i = 0; i < m -> getLength(); ++i){
if(!(iss_data >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
hostData[i] = std::stof(tmpstr);
}
checkCudaErrors(cudaMemcpy(m -> Data, hostData, m -> getLength() * sizeof(float), cudaMemcpyHostToDevice));
free(hostData);
}
void read(const std::string &path, cpuMat* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid matrix..."<<std::endl;
exit(0);
}
std::ifstream infile(path.c_str());
std::string line, tmpstr;
getline(infile, line); // path = XX
getline(infile, line); // rows = XX
std::istringstream iss_row(line);
if(!(iss_row >> tmpstr >> tmpstr >> tmpstr)){
std::cout<<"invalid file...1"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> rows){
std::cout<<"invalid file...2"<<std::endl;
exit(0);
}
getline(infile, line); // cols = XX
std::istringstream iss_col(line);
if(!(iss_col >> tmpstr >> tmpstr >> tmpstr)){
cout<<tmpstr<<"---#########"<<endl;
std::cout<<"invalid file...3"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> cols){
std::cout<<"invalid file...4"<<std::endl;
exit(0);
}
getline(infile, line); // channels = XX
std::istringstream iss_channels(line);
if(!(iss_channels >> tmpstr >> tmpstr >> tmpstr)){
std::cout<<"invalid file...5"<<std::endl;
exit(0);
}
if(std::stoi(tmpstr) != m -> channels){
std::cout<<"invalid file...6"<<std::endl;
exit(0);
}
getline(infile, line); // data =
std::istringstream iss_data_title(line);
if(!(iss_data_title >> tmpstr >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
getline(infile, line); // [data]
std::istringstream iss_data(line);
float *hostData = (float*)malloc(m -> getLength() * sizeof(float));
for(int i = 0; i < m -> getLength(); ++i){
if(!(iss_data >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
hostData[i] = std::stof(tmpstr);
}
memcpy(m -> Data, hostData, m -> getLength() * sizeof(float));
free(hostData);
}
void read(const std::string &path, vector3f* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid vector3f..."<<std::endl;
exit(0);
}
std::ifstream infile(path.c_str());
std::string line, tmpstr;
getline(infile, line); // path = XX
getline(infile, line); // data =
std::istringstream iss_data_title(line);
if(!(iss_data_title >> tmpstr >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
getline(infile, line); // [data]
std::istringstream iss_data(line);
float *hostData = (float*)malloc(3 * sizeof(float));
for(int i = 0; i < 3; ++i){
if(!(iss_data >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
hostData[i] = std::stof(tmpstr);
}
memcpy(m -> Data, hostData, 3 * sizeof(float));
free(hostData);
}
void read(const std::string &path, vector2i* m){
if(NULL == m || NULL == m -> Data){
std::cout<<"invalid vector3f..."<<std::endl;
exit(0);
}
std::ifstream infile(path.c_str());
std::string line, tmpstr;
getline(infile, line); // path = XX
getline(infile, line); // data =
std::istringstream iss_data_title(line);
if(!(iss_data_title >> tmpstr >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
getline(infile, line); // [data]
std::istringstream iss_data(line);
int *hostData = (int*)malloc(2 * sizeof(int));
for(int i = 0; i < 2; ++i){
if(!(iss_data >> tmpstr)){
std::cout<<"invalid file..."<<std::endl;
exit(0);
}
hostData[i] = std::stoi(tmpstr);
}
memcpy(m -> Data, hostData, 2 * sizeof(int));
free(hostData);
}
void saveNetwork(const std::string &path, const std::vector<network_layer*> &flow){
mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
string pathw = "";
string pathb = "";
for(int i = 0; i < flow.size(); ++i){
string layerpath = path + "/layer_" + std::to_string(i);
if(flow[i] -> layer_type == "convolutional"){
for(int k = 0; k < ((convolutional_layer*)flow[i]) -> kernels.size(); ++k){
pathw = layerpath + "_kernel_" + std::to_string(k) + "_w.txt";
pathb = layerpath + "_kernel_" + std::to_string(k) + "_b.txt";
save(pathw, ((convolutional_layer*)flow[i]) -> kernels[k] -> w);
save(pathb, ((convolutional_layer*)flow[i]) -> kernels[k] -> b);
}
}elif(flow[i] -> layer_type == "fully_connected"){
pathw = layerpath + "_w.txt";
pathb = layerpath + "_b.txt";
save(pathw, ((fully_connected_layer*)flow[i]) -> w);
save(pathb, ((fully_connected_layer*)flow[i]) -> b);
}elif(flow[i] -> layer_type == "softmax"){
pathw = layerpath + "_w.txt";
pathb = layerpath + "_b.txt";
save(pathw, ((softmax_layer*)flow[i]) -> w);
save(pathb, ((softmax_layer*)flow[i]) -> b);
}
}
}
// READ NETWORK
void readNetwork(const string &path, std::vector<network_layer*> &flow){
string pathw = "";
string pathb = "";
for(int i = 0; i < flow.size(); ++i){
string layerpath = path + "/layer_" + std::to_string(i);
if(flow[i] -> layer_type == "convolutional"){
for(int k = 0; k < ((convolutional_layer*)flow[i]) -> kernels.size(); ++k){
pathw = layerpath + "_kernel_" + std::to_string(k) + "_w.txt";
pathb = layerpath + "_kernel_" + std::to_string(k) + "_b.txt";
read(pathw, ((convolutional_layer*)flow[i]) -> kernels[k] -> w);
read(pathb, ((convolutional_layer*)flow[i]) -> kernels[k] -> b);
}
}elif(flow[i] -> layer_type == "fully_connected"){
pathw = layerpath + "_w.txt";
pathb = layerpath + "_b.txt";
read(pathw, ((fully_connected_layer*)flow[i]) -> w);
read(pathb, ((fully_connected_layer*)flow[i]) -> b);
}elif(flow[i] -> layer_type == "softmax"){
pathw = layerpath + "_w.txt";
pathb = layerpath + "_b.txt";
read(pathw, ((softmax_layer*)flow[i]) -> w);
read(pathb, ((softmax_layer*)flow[i]) -> b);
}
}
}
|
864036b5832a349a91b684e1d4065e9150c5028e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <vector>
#include <string.h>
#include <hip/hip_runtime.h>
#include <sys/mman.h>
#include <unistd.h>
/* Comanche Common */
#include <common/dump_utils.h>
#include "caffe/layers/gpu_direct_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void GPUDirectDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
#ifndef CPU_ONLY
//#ifdef USE_KVSTORE
std::string data_key = lines_[lines_id_].first;
std::string label_key = lines_[lines_id_].second;
lines_id_++;
lines_id_ %= lines_.size();
//LOG(INFO) << "kvstore_get_direct data";
top[0]->ReshapeLike(data_);
kvstore_->get_direct(data_key, gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_data_._d_ptr, gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype)));
//_mm_sfence();
//CU_CHECK(hipCtxSynchronize());
/*
void * tmp_d_ptr;
void * tmp_buffer;
size_t tmp_buffer_len;
kvstore_->get(data_key, tmp_buffer, tmp_buffer_len);
LOG(INFO) << "get length" << tmp_buffer_len;
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_data_._d_ptr, tmp_buffer, tmp_buffer_len));
//hipDeviceSynchronize();
void * h_tmp = (void *) malloc(top[0]->count()*sizeof(Dtype));
CUDA_CHECK(hipMalloc(&tmp_d_ptr, MB(2)));
copy_kernel<<<1,1024>>>((uint8_t *)gdr_ptr_data_._d_ptr, (uint8_t *) tmp_d_ptr, top[0]->count()*sizeof(Dtype));
hipDeviceSynchronize();
CU_CHECK(cuMemcpyDtoH(h_tmp, (hipDeviceptr_t) tmp_d_ptr, top[0]->count()*sizeof(Dtype)));
hipDeviceSynchronize();
compare_buf((uint32_t*) h_tmp, (uint32_t*) tmp_buffer, top[0]->count()*sizeof(Dtype));
printf("Pass GPU copy test\n");
compare_buf((uint32_t*) gdr_ptr_data_._h_ptr, (uint32_t*) tmp_buffer, top[0]->count()*sizeof(Dtype));
free(h_tmp);
free(tmp_buffer);
*/
//_mm_sfence();
//msync(gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype) + MB(2), MS_SYNC);
//sleep(1);
//LOG(INFO) << "Dump the host pinned GPU memory for Data";
//hexdump(gdr_ptr_data_._h_ptr, 64);
//assert(tmp_buffer_len == top[0]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp, gdr_ptr_data_._d_ptr, top[0]->count()*sizeof(Dtype)));
//hexdump(h_tmp, 64);
//compare_buf((uint32_t*) h_tmp, (uint32_t*) tmp_buffer, top[0]->count()*sizeof(Dtype));
//compare_buf((uint32_t*) h_tmp, (uint32_t*) gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype));
//free(h_tmp);
//kvstore_->get_direct(data_key, (void*) data_.mutable_cpu_data(), top[0]->count()*sizeof(Dtype));
//hexdump((void *)((char *)tmp_buffer+top[0]->count()*sizeof(Dtype)-256), 256);
//memcpy(gdr_ptr_data_._h_ptr, tmp_buffer, tmp_buffer_len);
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_data_._d_ptr, tmp_buffer, tmp_buffer_len));
//hexdump_kernel<<<1,256>>>((uint8_t*)gdr_ptr_data_._d_ptr, top[0]->count()*sizeof(Dtype)-256);
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_data_._d_ptr, tmp_buffer, tmp_buffer_len));
//void* h_tmp1 = (void *) malloc(top[0]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp1, (hipDeviceptr_t) data_.mutable_gpu_data(), top[0]->count()*sizeof(Dtype)));
//compare_buf((uint32_t*) tmp_buffer, (uint32_t*) h_tmp1, top[0]->count()*sizeof(Dtype));
//compare_buf((uint32_t*) tmp_buffer, (uint32_t*) gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype));
top[0]->set_gpu_data((Dtype*)gdr_ptr_data_._d_ptr);
//free(h_tmp);
//free(h_tmp1);
if(this->output_labels_) {
vector<int> label_shape(1, batch_size_);
//LOG(INFO) << "kvstore_get_direct_label";
top[1]->ReshapeLike(label_);
kvstore_->get_direct(label_key, gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_label_._d_ptr, gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype)));
//_mm_sfence();
//mmiowcwb();
//CU_CHECK(hipCtxSynchronize());
//msync(gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype) + MB(2), MS_SYNC);
//sleep(1);
//kvstore_->get(label_key, tmp_buffer, tmp_buffer_len);
//assert(tmp_buffer_len == top[1]->count()*sizeof(Dtype));
//h_tmp = (void *) malloc(top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp, gdr_ptr_label_._d_ptr, top[1]->count()*sizeof(Dtype)));
//compare_buf((uint32_t*) h_tmp, (uint32_t*) tmp_buffer, top[1]->count()*sizeof(Dtype));
//compare_buf((uint32_t*) h_tmp, (uint32_t*) gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype));
//kvstore_->get_direct(label_key, label_.mutable_cpu_data(), top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_label_._d_ptr, tmp_buffer, tmp_buffer_len));
//h_tmp1 = (void *) malloc(top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp1, (hipDeviceptr_t) label_.mutable_gpu_data(), top[1]->count()*sizeof(Dtype)));
//compare_buf((uint32_t*) tmp_buffer, (uint32_t*) h_tmp1, top[1]->count()*sizeof(Dtype));
//LOG(INFO) << "Dump the host pinned GPU memory for Label";
//hexdump(gdr_ptr_label_._h_ptr, 64);
//h_tmp = (void *) malloc(top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp, gdr_ptr_label_._d_ptr, top[1]->count()*sizeof(Dtype)));
//hexdump(h_tmp, 64);
//compare_buf((uint32_t*) tmp_buffer, (uint32_t*) gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype));
//free(h_tmp);
//free(h_tmp1);
top[1]->set_gpu_data((Dtype*)gdr_ptr_label_._d_ptr);
}
//hipDeviceSynchronize();
//#endif
#endif
}
INSTANTIATE_LAYER_GPU_FUNCS(GPUDirectDataLayer);
} // namespace caffe
| 864036b5832a349a91b684e1d4065e9150c5028e.cu | #include <stdint.h>
#include <vector>
#include <string.h>
#include <cuda.h>
#include <sys/mman.h>
#include <unistd.h>
/* Comanche Common */
#include <common/dump_utils.h>
#include "caffe/layers/gpu_direct_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void GPUDirectDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
#ifndef CPU_ONLY
//#ifdef USE_KVSTORE
std::string data_key = lines_[lines_id_].first;
std::string label_key = lines_[lines_id_].second;
lines_id_++;
lines_id_ %= lines_.size();
//LOG(INFO) << "kvstore_get_direct data";
top[0]->ReshapeLike(data_);
kvstore_->get_direct(data_key, gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_data_._d_ptr, gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype)));
//_mm_sfence();
//CU_CHECK(cuCtxSynchronize());
/*
void * tmp_d_ptr;
void * tmp_buffer;
size_t tmp_buffer_len;
kvstore_->get(data_key, tmp_buffer, tmp_buffer_len);
LOG(INFO) << "get length" << tmp_buffer_len;
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_data_._d_ptr, tmp_buffer, tmp_buffer_len));
//cudaDeviceSynchronize();
void * h_tmp = (void *) malloc(top[0]->count()*sizeof(Dtype));
CUDA_CHECK(cudaMalloc(&tmp_d_ptr, MB(2)));
copy_kernel<<<1,1024>>>((uint8_t *)gdr_ptr_data_._d_ptr, (uint8_t *) tmp_d_ptr, top[0]->count()*sizeof(Dtype));
cudaDeviceSynchronize();
CU_CHECK(cuMemcpyDtoH(h_tmp, (CUdeviceptr) tmp_d_ptr, top[0]->count()*sizeof(Dtype)));
cudaDeviceSynchronize();
compare_buf((uint32_t*) h_tmp, (uint32_t*) tmp_buffer, top[0]->count()*sizeof(Dtype));
printf("Pass GPU copy test\n");
compare_buf((uint32_t*) gdr_ptr_data_._h_ptr, (uint32_t*) tmp_buffer, top[0]->count()*sizeof(Dtype));
free(h_tmp);
free(tmp_buffer);
*/
//_mm_sfence();
//msync(gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype) + MB(2), MS_SYNC);
//sleep(1);
//LOG(INFO) << "Dump the host pinned GPU memory for Data";
//hexdump(gdr_ptr_data_._h_ptr, 64);
//assert(tmp_buffer_len == top[0]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp, gdr_ptr_data_._d_ptr, top[0]->count()*sizeof(Dtype)));
//hexdump(h_tmp, 64);
//compare_buf((uint32_t*) h_tmp, (uint32_t*) tmp_buffer, top[0]->count()*sizeof(Dtype));
//compare_buf((uint32_t*) h_tmp, (uint32_t*) gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype));
//free(h_tmp);
//kvstore_->get_direct(data_key, (void*) data_.mutable_cpu_data(), top[0]->count()*sizeof(Dtype));
//hexdump((void *)((char *)tmp_buffer+top[0]->count()*sizeof(Dtype)-256), 256);
//memcpy(gdr_ptr_data_._h_ptr, tmp_buffer, tmp_buffer_len);
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_data_._d_ptr, tmp_buffer, tmp_buffer_len));
//hexdump_kernel<<<1,256>>>((uint8_t*)gdr_ptr_data_._d_ptr, top[0]->count()*sizeof(Dtype)-256);
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_data_._d_ptr, tmp_buffer, tmp_buffer_len));
//void* h_tmp1 = (void *) malloc(top[0]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp1, (CUdeviceptr) data_.mutable_gpu_data(), top[0]->count()*sizeof(Dtype)));
//compare_buf((uint32_t*) tmp_buffer, (uint32_t*) h_tmp1, top[0]->count()*sizeof(Dtype));
//compare_buf((uint32_t*) tmp_buffer, (uint32_t*) gdr_ptr_data_._h_ptr, top[0]->count()*sizeof(Dtype));
top[0]->set_gpu_data((Dtype*)gdr_ptr_data_._d_ptr);
//free(h_tmp);
//free(h_tmp1);
if(this->output_labels_) {
vector<int> label_shape(1, batch_size_);
//LOG(INFO) << "kvstore_get_direct_label";
top[1]->ReshapeLike(label_);
kvstore_->get_direct(label_key, gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_label_._d_ptr, gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype)));
//_mm_sfence();
//mmiowcwb();
//CU_CHECK(cuCtxSynchronize());
//msync(gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype) + MB(2), MS_SYNC);
//sleep(1);
//kvstore_->get(label_key, tmp_buffer, tmp_buffer_len);
//assert(tmp_buffer_len == top[1]->count()*sizeof(Dtype));
//h_tmp = (void *) malloc(top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp, gdr_ptr_label_._d_ptr, top[1]->count()*sizeof(Dtype)));
//compare_buf((uint32_t*) h_tmp, (uint32_t*) tmp_buffer, top[1]->count()*sizeof(Dtype));
//compare_buf((uint32_t*) h_tmp, (uint32_t*) gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype));
//kvstore_->get_direct(label_key, label_.mutable_cpu_data(), top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyHtoD(gdr_ptr_label_._d_ptr, tmp_buffer, tmp_buffer_len));
//h_tmp1 = (void *) malloc(top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp1, (CUdeviceptr) label_.mutable_gpu_data(), top[1]->count()*sizeof(Dtype)));
//compare_buf((uint32_t*) tmp_buffer, (uint32_t*) h_tmp1, top[1]->count()*sizeof(Dtype));
//LOG(INFO) << "Dump the host pinned GPU memory for Label";
//hexdump(gdr_ptr_label_._h_ptr, 64);
//h_tmp = (void *) malloc(top[1]->count()*sizeof(Dtype));
//CU_CHECK(cuMemcpyDtoH(h_tmp, gdr_ptr_label_._d_ptr, top[1]->count()*sizeof(Dtype)));
//hexdump(h_tmp, 64);
//compare_buf((uint32_t*) tmp_buffer, (uint32_t*) gdr_ptr_label_._h_ptr, top[1]->count()*sizeof(Dtype));
//free(h_tmp);
//free(h_tmp1);
top[1]->set_gpu_data((Dtype*)gdr_ptr_label_._d_ptr);
}
//cudaDeviceSynchronize();
//#endif
#endif
}
INSTANTIATE_LAYER_GPU_FUNCS(GPUDirectDataLayer);
} // namespace caffe
|
37c352066952ad0ad3e06bc97851f008477b71d5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This is the jacobi relaxation method in gpu
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define SIZE 2048
#define BLOCK_SIZE 32
#define NITER 1000
float ratio(float*u, float ant, int iter){
float tmp=0.0;
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
if(u[j*SIZE+i]>tmp)
tmp=u[j*SIZE+i];
}
}
printf(" iter=%d ratio=%f ant=%f max=%f\n",iter,tmp/ant,ant,tmp);
return tmp;
}
__global__ void jacobi(float *d_u_new, float *d_u, float *d_f, float h2){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
d_u_new[i*SIZE+j]=0.25*(
h2*d_f[i *SIZE+j ]+
d_u[(i-1)*SIZE+j ]+
d_u[(i+1)*SIZE+j ]+
d_u[i *SIZE+j-1 ]+
d_u[i *SIZE+j+1 ]);
}
int main(){
float * h_u, *h_f;
float * d_u, *d_u_new, *d_f;
float * tmp;
float ant = 1.0;
int i,j;
size_t size;
float h = 1.0/SIZE;
/* Host memory malloc */
size = SIZE*SIZE*sizeof(float);
printf("We need %dmb of memory\n",3*size/1024/1024);
h_u = (float*)malloc(size);
h_f = (float*)malloc(size);
/* memory for the gpu */
hipMalloc(&d_u, size);
hipMalloc(&d_u_new, size);
hipMalloc(&d_f, size);
/* Initialization */
for(i=0;i<SIZE; i++){
for(j=0; j<SIZE; j++){
h_f[i*SIZE+j]=0.0;
h_u[i*SIZE+j]=rand();
}
}
/* Bounds */
for(i=0;i<SIZE;i++){
h_u[i]=0.0;
h_u[i*SIZE]=0.0;
h_u[i*SIZE+SIZE-1]=0.0;
h_u[SIZE*(SIZE-1)+i]=0.0;
}
/* Copy from host to device */
hipMemcpy(d_f,h_f,size,hipMemcpyHostToDevice);
hipMemcpy(d_u,h_u,size,hipMemcpyHostToDevice);
hipMemcpy(d_u_new,h_u,size,hipMemcpyHostToDevice);
/* Grid dimension */
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(SIZE/BLOCK_SIZE,SIZE/BLOCK_SIZE);
float h2=h*h;
/* Call NITER times to the jacobi method */
for(i=0;i<NITER;i++)
{
hipLaunchKernelGGL(( jacobi), dim3(dimGrid),dim3(dimBlock), 0, 0, d_u_new,d_u,d_f,h2);
hipDeviceSynchronize;
if(i%100==0){
hipMemcpy(h_u, d_u_new, size, hipMemcpyDeviceToHost);
ant=ratio(h_u,ant,i);
}
tmp=d_u_new;
d_u_new=d_u;
d_u=tmp;
}
/* free memory */
free(h_u);
free(h_f);
hipFree(d_u_new);
hipFree(d_u);
hipFree(d_f);
}
| 37c352066952ad0ad3e06bc97851f008477b71d5.cu |
/*
* This is the jacobi relaxation method in gpu
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define SIZE 2048
#define BLOCK_SIZE 32
#define NITER 1000
float ratio(float*u, float ant, int iter){
float tmp=0.0;
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
if(u[j*SIZE+i]>tmp)
tmp=u[j*SIZE+i];
}
}
printf(" iter=%d ratio=%f ant=%f max=%f\n",iter,tmp/ant,ant,tmp);
return tmp;
}
__global__ void jacobi(float *d_u_new, float *d_u, float *d_f, float h2){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
d_u_new[i*SIZE+j]=0.25*(
h2*d_f[i *SIZE+j ]+
d_u[(i-1)*SIZE+j ]+
d_u[(i+1)*SIZE+j ]+
d_u[i *SIZE+j-1 ]+
d_u[i *SIZE+j+1 ]);
}
int main(){
float * h_u, *h_f;
float * d_u, *d_u_new, *d_f;
float * tmp;
float ant = 1.0;
int i,j;
size_t size;
float h = 1.0/SIZE;
/* Host memory malloc */
size = SIZE*SIZE*sizeof(float);
printf("We need %dmb of memory\n",3*size/1024/1024);
h_u = (float*)malloc(size);
h_f = (float*)malloc(size);
/* memory for the gpu */
cudaMalloc(&d_u, size);
cudaMalloc(&d_u_new, size);
cudaMalloc(&d_f, size);
/* Initialization */
for(i=0;i<SIZE; i++){
for(j=0; j<SIZE; j++){
h_f[i*SIZE+j]=0.0;
h_u[i*SIZE+j]=rand();
}
}
/* Bounds */
for(i=0;i<SIZE;i++){
h_u[i]=0.0;
h_u[i*SIZE]=0.0;
h_u[i*SIZE+SIZE-1]=0.0;
h_u[SIZE*(SIZE-1)+i]=0.0;
}
/* Copy from host to device */
cudaMemcpy(d_f,h_f,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_u,h_u,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_u_new,h_u,size,cudaMemcpyHostToDevice);
/* Grid dimension */
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(SIZE/BLOCK_SIZE,SIZE/BLOCK_SIZE);
float h2=h*h;
/* Call NITER times to the jacobi method */
for(i=0;i<NITER;i++)
{
jacobi<<<dimGrid,dimBlock>>>(d_u_new,d_u,d_f,h2);
cudaDeviceSynchronize;
if(i%100==0){
cudaMemcpy(h_u, d_u_new, size, cudaMemcpyDeviceToHost);
ant=ratio(h_u,ant,i);
}
tmp=d_u_new;
d_u_new=d_u;
d_u=tmp;
}
/* free memory */
free(h_u);
free(h_f);
cudaFree(d_u_new);
cudaFree(d_u);
cudaFree(d_f);
}
|
356c8767daad39a92f156f171c8b3dd356a2bba4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include "timer.h"
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
__global__ void encryptKernel(char* deviceDataIn, char* deviceDataOut) {
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
deviceDataOut[index] = deviceDataIn[index];
}
__global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut) {
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
deviceDataOut[index] = deviceDataIn[index];
}
int fileSize() {
int size;
ifstream file ("original.data", ios::in|ios::binary|ios::ate);
if (file.is_open())
{
size = file.tellg();
file.close();
}
else {
cout << "Unable to open file";
size = -1;
}
return size;
}
int readData(char *fileName, char *data) {
streampos size;
ifstream file (fileName, ios::in|ios::binary|ios::ate);
if (file.is_open())
{
size = file.tellg();
file.seekg (0, ios::beg);
file.read (data, size);
file.close();
cout << "The entire file content is in memory." << endl;
}
else cout << "Unable to open file" << endl;
return 0;
}
int writeData(int size, char *fileName, char *data) {
ofstream file (fileName, ios::out|ios::binary|ios::trunc);
if (file.is_open())
{
file.write (data, size);
file.close();
cout << "The entire file content was written to file." << endl;
return 0;
}
else cout << "Unable to open file";
return -1;
}
int EncryptSeq (int n, char* data_in, char* data_out)
{
int i;
char b;
timer sequentialTime = timer("Sequential encryption");
sequentialTime.start();
// change the functionality of the loop
// by changing the char value of data_in[i]
// encryption is done by shifting one ascii value to the right
for (i=0; i<n; i++) {
b = (data_in[i] - 1);
cout << b << endl;
data_out[i]=data_in[i];
}
sequentialTime.stop();
cout << fixed << setprecision(6);
cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl;
return 0;
}
int DecryptSeq (int n, char* data_in, char* data_out)
{
int i;
timer sequentialTime = timer("Sequential decryption");
sequentialTime.start();
// change the functionality of the loop
// by changing the char value of data_in[i]
// decryption is done by shifting one ascii value to the left
for (i=0; i<n; i++) { data_out[i]=data_in[i]; }
sequentialTime.stop();
cout << fixed << setprecision(6);
cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl;
return 0;
}
int EncryptCuda (int n, char* data_in, char* data_out) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
char* deviceDataIn = NULL;
checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char)));
if (deviceDataIn == NULL) {
cout << "could not allocate memory!" << endl;
return -1;
}
char* deviceDataOut = NULL;
checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char)));
if (deviceDataOut == NULL) {
checkCudaCall(hipFree(deviceDataIn));
cout << "could not allocate memory!" << endl;
return -1;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
hipLaunchKernelGGL(( encryptKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceDataIn, deviceDataOut);
hipDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(hipFree(deviceDataIn));
checkCudaCall(hipFree(deviceDataOut));
cout << fixed << setprecision(6);
cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl;
cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl;
return 0;
}
int DecryptCuda (int n, char* data_in, char* data_out) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
char* deviceDataIn = NULL;
checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char)));
if (deviceDataIn == NULL) {
cout << "could not allocate memory!" << endl;
return -1;
}
char* deviceDataOut = NULL;
checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char)));
if (deviceDataOut == NULL) {
checkCudaCall(hipFree(deviceDataIn));
cout << "could not allocate memory!" << endl;
return -1;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
hipLaunchKernelGGL(( decryptKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceDataIn, deviceDataOut);
hipDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(hipFree(deviceDataIn));
checkCudaCall(hipFree(deviceDataOut));
cout << fixed << setprecision(6);
cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl;
cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl;
return 0;
}
int main(int argc, char* argv[]) {
int n;
n = fileSize();
if (n == -1) {
cout << "File not found! Exiting ... " << endl;
exit(0);
}
char* data_in = new char[n];
char* data_out = new char[n];
readData("test.data", data_in);
cout << "Encrypting a file of " << n << " characters." << endl;
EncryptSeq(n, data_in, data_out);
writeData(n, "sequential.data", data_out);
EncryptCuda(n, data_in, data_out);
writeData(n, "cuda.data", data_out);
readData("cuda.data", data_in);
cout << "Decrypting a file of " << n << "characters" << endl;
DecryptSeq(n, data_in, data_out);
writeData(n, "sequential_decrypted.data", data_out);
DecryptCuda(n, data_in, data_out);
writeData(n, "recovered.data", data_out);
delete[] data_in;
delete[] data_out;
return 0;
}
| 356c8767daad39a92f156f171c8b3dd356a2bba4.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include "timer.h"
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
__global__ void encryptKernel(char* deviceDataIn, char* deviceDataOut) {
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
deviceDataOut[index] = deviceDataIn[index];
}
__global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut) {
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
deviceDataOut[index] = deviceDataIn[index];
}
int fileSize() {
int size;
ifstream file ("original.data", ios::in|ios::binary|ios::ate);
if (file.is_open())
{
size = file.tellg();
file.close();
}
else {
cout << "Unable to open file";
size = -1;
}
return size;
}
int readData(char *fileName, char *data) {
streampos size;
ifstream file (fileName, ios::in|ios::binary|ios::ate);
if (file.is_open())
{
size = file.tellg();
file.seekg (0, ios::beg);
file.read (data, size);
file.close();
cout << "The entire file content is in memory." << endl;
}
else cout << "Unable to open file" << endl;
return 0;
}
int writeData(int size, char *fileName, char *data) {
ofstream file (fileName, ios::out|ios::binary|ios::trunc);
if (file.is_open())
{
file.write (data, size);
file.close();
cout << "The entire file content was written to file." << endl;
return 0;
}
else cout << "Unable to open file";
return -1;
}
int EncryptSeq (int n, char* data_in, char* data_out)
{
int i;
char b;
timer sequentialTime = timer("Sequential encryption");
sequentialTime.start();
// change the functionality of the loop
// by changing the char value of data_in[i]
// encryption is done by shifting one ascii value to the right
for (i=0; i<n; i++) {
b = (data_in[i] - 1);
cout << b << endl;
data_out[i]=data_in[i];
}
sequentialTime.stop();
cout << fixed << setprecision(6);
cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl;
return 0;
}
int DecryptSeq (int n, char* data_in, char* data_out)
{
int i;
timer sequentialTime = timer("Sequential decryption");
sequentialTime.start();
// change the functionality of the loop
// by changing the char value of data_in[i]
// decryption is done by shifting one ascii value to the left
for (i=0; i<n; i++) { data_out[i]=data_in[i]; }
sequentialTime.stop();
cout << fixed << setprecision(6);
cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl;
return 0;
}
int EncryptCuda (int n, char* data_in, char* data_out) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
char* deviceDataIn = NULL;
checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char)));
if (deviceDataIn == NULL) {
cout << "could not allocate memory!" << endl;
return -1;
}
char* deviceDataOut = NULL;
checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char)));
if (deviceDataOut == NULL) {
checkCudaCall(cudaFree(deviceDataIn));
cout << "could not allocate memory!" << endl;
return -1;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
encryptKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceDataIn, deviceDataOut);
cudaDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(cudaFree(deviceDataIn));
checkCudaCall(cudaFree(deviceDataOut));
cout << fixed << setprecision(6);
cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl;
cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl;
return 0;
}
int DecryptCuda (int n, char* data_in, char* data_out) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
char* deviceDataIn = NULL;
checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char)));
if (deviceDataIn == NULL) {
cout << "could not allocate memory!" << endl;
return -1;
}
char* deviceDataOut = NULL;
checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char)));
if (deviceDataOut == NULL) {
checkCudaCall(cudaFree(deviceDataIn));
cout << "could not allocate memory!" << endl;
return -1;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
decryptKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceDataIn, deviceDataOut);
cudaDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(cudaFree(deviceDataIn));
checkCudaCall(cudaFree(deviceDataOut));
cout << fixed << setprecision(6);
cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl;
cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl;
return 0;
}
int main(int argc, char* argv[]) {
int n;
n = fileSize();
if (n == -1) {
cout << "File not found! Exiting ... " << endl;
exit(0);
}
char* data_in = new char[n];
char* data_out = new char[n];
readData("test.data", data_in);
cout << "Encrypting a file of " << n << " characters." << endl;
EncryptSeq(n, data_in, data_out);
writeData(n, "sequential.data", data_out);
EncryptCuda(n, data_in, data_out);
writeData(n, "cuda.data", data_out);
readData("cuda.data", data_in);
cout << "Decrypting a file of " << n << "characters" << endl;
DecryptSeq(n, data_in, data_out);
writeData(n, "sequential_decrypted.data", data_out);
DecryptCuda(n, data_in, data_out);
writeData(n, "recovered.data", data_out);
delete[] data_in;
delete[] data_out;
return 0;
}
|
b5115fa10264e674de894a7a18d36766b51a4ca4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 Paddle
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "SwitchOp.h"
#include "hl_base.h"
namespace paddle {
__global__ void KeNCHW2NHWC(real* outputs,
const real* inputs,
int inC,
int inH,
int inW,
int nthreads,
int argType) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * inH + h) * inW + w) * inC + c;
if (argType == ADD_TO) {
outputs[off] += inputs[idx];
} else {
outputs[off] = inputs[idx];
}
}
}
template <>
void NCHW2NHWC<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int argType) {
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeNCHW2NHWC), dim3(gridSize), dim3(blockSize), 0, STREAM_DEFAULT,
outputs, inputs, inC, inH, inW, nth, argType);
CHECK_SYNC("NCHW2NHWC");
}
__global__ void KeNHWC2NCHW(real* outputs,
const real* inputs,
int inH,
int inW,
int inC,
int nthreads,
int argType) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int c = idx % inC;
const int w = (idx / inC) % inW;
const int h = (idx / inC / inW) % inH;
const int n = idx / inW / inH / inC;
const int off = ((n * inC + c) * inH + h) * inW + w;
if (argType == ADD_TO) {
outputs[off] += inputs[idx];
} else {
outputs[off] = inputs[idx];
}
}
}
template <>
void NHWC2NCHW<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inH,
const int inW,
const int inC,
const int argType) {
int nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeNHWC2NCHW), dim3(gridSize), dim3(blockSize), 0, STREAM_DEFAULT,
outputs, inputs, inH, inW, inC, nth, argType);
CHECK_SYNC("NHWC2NCHW");
}
} // namespace paddle
| b5115fa10264e674de894a7a18d36766b51a4ca4.cu | /* Copyright (c) 2016 Paddle
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "SwitchOp.h"
#include "hl_base.h"
namespace paddle {
__global__ void KeNCHW2NHWC(real* outputs,
const real* inputs,
int inC,
int inH,
int inW,
int nthreads,
int argType) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * inH + h) * inW + w) * inC + c;
if (argType == ADD_TO) {
outputs[off] += inputs[idx];
} else {
outputs[off] = inputs[idx];
}
}
}
template <>
void NCHW2NHWC<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int argType) {
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
KeNCHW2NHWC<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
outputs, inputs, inC, inH, inW, nth, argType);
CHECK_SYNC("NCHW2NHWC");
}
__global__ void KeNHWC2NCHW(real* outputs,
const real* inputs,
int inH,
int inW,
int inC,
int nthreads,
int argType) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int c = idx % inC;
const int w = (idx / inC) % inW;
const int h = (idx / inC / inW) % inH;
const int n = idx / inW / inH / inC;
const int off = ((n * inC + c) * inH + h) * inW + w;
if (argType == ADD_TO) {
outputs[off] += inputs[idx];
} else {
outputs[off] = inputs[idx];
}
}
}
template <>
void NHWC2NCHW<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inH,
const int inW,
const int inC,
const int argType) {
int nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
KeNHWC2NCHW<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
outputs, inputs, inH, inW, inC, nth, argType);
CHECK_SYNC("NHWC2NCHW");
}
} // namespace paddle
|
303b8505c4a40f3ff62e69000bd5ffa018f515f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void CalcInput(float* screen, float* weight, float* d_Votes, int stride){
//Current implementation, idk if it works. Probably doesn't, but it is worth a try, I think.
int id = threadIdx.x + blockDim.x * blockIdx.x;
d_Votes[id] = 0;
d_Votes[id] += screen[id] * weight[id];
d_Votes[id] += screen[id + 1] * weight[id + 1];
d_Votes[id] += screen[stride] * weight[stride];
d_Votes[id] += screen[stride + 1] * weight[stride + 1];
d_Votes[id] /= 4;
} | 303b8505c4a40f3ff62e69000bd5ffa018f515f2.cu | #include "includes.h"
__global__ void CalcInput(float* screen, float* weight, float* d_Votes, int stride){
//Current implementation, idk if it works. Probably doesn't, but it is worth a try, I think.
int id = threadIdx.x + blockDim.x * blockIdx.x;
d_Votes[id] = 0;
d_Votes[id] += screen[id] * weight[id];
d_Votes[id] += screen[id + 1] * weight[id + 1];
d_Votes[id] += screen[stride] * weight[stride];
d_Votes[id] += screen[stride + 1] * weight[stride + 1];
d_Votes[id] /= 4;
} |
635134fc3305205dedb13ff38290e58a9c114da7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <cmath>
namespace at {
namespace native {
namespace {
__global__ void ChooseQuantizationParamsKernelImpl(
const int64_t* fake_quant_on,
const float* x_min,
const float* x_max,
int32_t qmin,
int32_t qmax,
int size,
bool preserve_sparsity,
float* scale,
int32_t* zero_point) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size && *fake_quant_on == 1) {
float min_val = x_min[i];
float max_val = x_max[i];
if (min_val < 0 && max_val > 0 && preserve_sparsity) {
int symmetric_qmin = -((qmax - qmin) / 2 + 1);
int symmetric_qmax = (qmax - qmin) / 2;
double max_scale = ::max(
fabs(min_val / symmetric_qmin), fabs(max_val / symmetric_qmax));
min_val = max_scale * symmetric_qmin;
max_val = max_scale * symmetric_qmax;
}
// We extend the [min, max] interval to ensure that it contains 0.
// Otherwise, we would not meet the requirement that 0 be an exactly
// representable value.
min_val = ::min(min_val, 0.f);
max_val = ::max(max_val, 0.f);
scale[i] = (static_cast<double>(max_val) - min_val) / (qmax - qmin);
// Moving this check outside this function would result in extra Device to
// Host copy of the min and max val which would result in a perf hit.
if (scale[i] == 0.0f || ::isinf(1.0f / scale[i])) {
scale[i] = 0.1;
}
double zero_point_from_min = qmin - min_val / static_cast<double>(scale[i]);
double zero_point_from_max = qmax - max_val / static_cast<double>(scale[i]);
double zero_point_from_min_error =
std::abs(qmin) + std::abs(min_val / static_cast<double>(scale[i]));
double zero_point_from_max_error =
std::abs(qmax) + std::abs(max_val / static_cast<double>(scale[i]));
double initial_zero_point =
zero_point_from_min_error < zero_point_from_max_error
? zero_point_from_min
: zero_point_from_max;
// Note: preserve_sparsity here means symmetric quantization.
// for symmetric quantization, we force zero_point
// to be a middle value between qmin and qmax.
// If either min or max is 0, then we just use 0 as zero_point.
if (min_val < 0 && max_val > 0 && preserve_sparsity) {
initial_zero_point = static_cast<double>(qmin + qmax) / 2;
}
// Now we need to nudge the zero point to be an integer
// (our zero points are integer, and this is motivated by the
// requirement to be able to represent the real value "0" exactly as a
// quantized value, which is required in multiple places, for example in
// Im2col with zero padding).
int32_t nudged_zero_point = 0;
if (initial_zero_point < qmin) {
nudged_zero_point = qmin;
} else if (initial_zero_point > qmax) {
nudged_zero_point = qmax;
} else {
nudged_zero_point = nearbyint(initial_zero_point);
}
zero_point[i] = nudged_zero_point;
}
}
// CUDA kernel to compute Moving Average Min/Max of the tensor.
// It uses the running_min and running_max along with averaging const, c.
// The formula used to compute the new min/max is as follows
//
// running_min = (1 - c) * running_min + c * x_min, if running_min != inf
// running_min = x_min, if running_min == inf
__global__ void MovingAverageMinMax(
const int64_t* observer_on,
const float* x_min,
const float* x_max,
float* running_min,
float* running_max,
const float averaging_const,
const int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (*observer_on == 1) {
if (i < size) {
float curr_min = x_min[i];
float curr_max = x_max[i];
float adjusted_min = ::isinf(running_min[i])
? curr_min
: (running_min[i]) + averaging_const * (curr_min - (running_min[i]));
float adjusted_max = ::isinf(running_max[i])
? curr_max
: (running_max[i]) + averaging_const * (curr_max - (running_max[i]));
running_min[i] = adjusted_min;
running_max[i] = adjusted_max;
}
}
}
void _calculate_moving_average(
const at::Tensor& x,
const at::Tensor& observer_on,
at::Tensor& running_min,
at::Tensor& running_max,
const float averaging_const,
const int64_t size,
bool per_row_fq) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(x.get_device());
at::Tensor x_min, x_max;
int64_t* observer_on_data = observer_on.data_ptr<int64_t>();
float* running_min_data = running_min.data_ptr<float>();
float* running_max_data = running_max.data_ptr<float>();
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (per_row_fq) {
std::tie(x_min, x_max) = at::_aminmax(x, 1);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
int num_threads = ::min(size, (int64_t)512);
const uint64_t num_blocks = cuda::ATenCeilDiv<uint64_t>(size, num_threads);
// Moving Average Min/Max observer for activations
hipLaunchKernelGGL(( MovingAverageMinMax), dim3(num_blocks), dim3(num_threads), 0, cuda_stream,
observer_on_data,
x_min_data,
x_max_data,
running_min_data,
running_max_data,
averaging_const,
size);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
std::tie(x_min, x_max) = at::_aminmax(x);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
// Moving Average Min/Max observer for activations
hipLaunchKernelGGL(( MovingAverageMinMax), dim3(1), dim3(1), 0, cuda_stream,
observer_on_data,
x_min_data,
x_max_data,
running_min_data,
running_max_data,
averaging_const,
1 /*size*/);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
void _calc_moving_avg_qparams_helper(
const at::Tensor& x,
const at::Tensor fake_quant_on,
at::Tensor& running_min,
at::Tensor& running_max,
float* scale_ptr,
int32_t* zp_ptr,
int32_t qmin,
int32_t qmax,
bool symmetric_quant,
const int64_t size,
bool per_row_fq = false) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(x.get_device());
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t* fake_quant_on_data = fake_quant_on.data_ptr<int64_t>();
if (per_row_fq) {
float* running_min_data = running_min.data_ptr<float>();
float* running_max_data = running_max.data_ptr<float>();
int num_threads = ::min(size, (int64_t)512);
const uint64_t num_blocks = cuda::ATenCeilDiv<uint64_t>(size, num_threads);
hipLaunchKernelGGL(( ChooseQuantizationParamsKernelImpl), dim3(num_blocks), dim3(num_threads), 0, cuda_stream,
fake_quant_on_data,
running_min_data,
running_max_data,
qmin,
qmax,
size,
symmetric_quant,
scale_ptr,
zp_ptr);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
float* running_min_data = running_min.data_ptr<float>();
float* running_max_data = running_max.data_ptr<float>();
hipLaunchKernelGGL(( ChooseQuantizationParamsKernelImpl), dim3(1), dim3(1), 0, cuda_stream,
fake_quant_on_data,
running_min_data,
running_max_data,
qmin,
qmax,
1, // size
symmetric_quant, // preserve_sparsity
scale_ptr,
zp_ptr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
} // namespace
std::tuple<at::Tensor, at::Tensor> fused_moving_avg_obs_fake_quant_cuda(
const at::Tensor& x,
const at::Tensor& observer_on,
const at::Tensor& fake_quant_on,
at::Tensor& running_min,
at::Tensor& running_max,
at::Tensor& scale,
at::Tensor& zero_point,
const double averaging_const,
const int64_t qmin,
const int64_t qmax,
const int64_t ch_axis,
bool per_row_fq,
bool symmetric_quant) {
const auto x_contig = x.contiguous();
int64_t size = per_row_fq ? x.size(0) : 1;
_calculate_moving_average(
x_contig,
observer_on,
running_min,
running_max,
averaging_const,
size,
per_row_fq);
float* scale_ptr = scale.data_ptr<float>();
int32_t* zp_ptr = zero_point.data_ptr<int32_t>();
_calc_moving_avg_qparams_helper(
x_contig,
fake_quant_on,
running_min,
running_max,
scale_ptr,
zp_ptr,
qmin,
qmax,
symmetric_quant,
size,
per_row_fq);
if (per_row_fq) {
if (fake_quant_on.item().toInt()) {
return at::fake_quantize_per_channel_affine_cachemask(
x, scale, zero_point, 0, qmin, qmax);
} else {
auto mask = at::ones_like(x, at::kBool, MemoryFormat::Preserve);
return std::make_tuple(x.clone(), mask);
}
} else {
return at::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
x, scale, zero_point, fake_quant_on, qmin, qmax);
}
}
} // namespace native
} // namespace at
| 635134fc3305205dedb13ff38290e58a9c114da7.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <c10/cuda/CUDAGuard.h>
#include <cmath>
namespace at {
namespace native {
namespace {
__global__ void ChooseQuantizationParamsKernelImpl(
const int64_t* fake_quant_on,
const float* x_min,
const float* x_max,
int32_t qmin,
int32_t qmax,
int size,
bool preserve_sparsity,
float* scale,
int32_t* zero_point) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size && *fake_quant_on == 1) {
float min_val = x_min[i];
float max_val = x_max[i];
if (min_val < 0 && max_val > 0 && preserve_sparsity) {
int symmetric_qmin = -((qmax - qmin) / 2 + 1);
int symmetric_qmax = (qmax - qmin) / 2;
double max_scale = std::max(
fabs(min_val / symmetric_qmin), fabs(max_val / symmetric_qmax));
min_val = max_scale * symmetric_qmin;
max_val = max_scale * symmetric_qmax;
}
// We extend the [min, max] interval to ensure that it contains 0.
// Otherwise, we would not meet the requirement that 0 be an exactly
// representable value.
min_val = std::min(min_val, 0.f);
max_val = std::max(max_val, 0.f);
scale[i] = (static_cast<double>(max_val) - min_val) / (qmax - qmin);
// Moving this check outside this function would result in extra Device to
// Host copy of the min and max val which would result in a perf hit.
if (scale[i] == 0.0f || ::isinf(1.0f / scale[i])) {
scale[i] = 0.1;
}
double zero_point_from_min = qmin - min_val / static_cast<double>(scale[i]);
double zero_point_from_max = qmax - max_val / static_cast<double>(scale[i]);
double zero_point_from_min_error =
std::abs(qmin) + std::abs(min_val / static_cast<double>(scale[i]));
double zero_point_from_max_error =
std::abs(qmax) + std::abs(max_val / static_cast<double>(scale[i]));
double initial_zero_point =
zero_point_from_min_error < zero_point_from_max_error
? zero_point_from_min
: zero_point_from_max;
// Note: preserve_sparsity here means symmetric quantization.
// for symmetric quantization, we force zero_point
// to be a middle value between qmin and qmax.
// If either min or max is 0, then we just use 0 as zero_point.
if (min_val < 0 && max_val > 0 && preserve_sparsity) {
initial_zero_point = static_cast<double>(qmin + qmax) / 2;
}
// Now we need to nudge the zero point to be an integer
// (our zero points are integer, and this is motivated by the
// requirement to be able to represent the real value "0" exactly as a
// quantized value, which is required in multiple places, for example in
// Im2col with zero padding).
int32_t nudged_zero_point = 0;
if (initial_zero_point < qmin) {
nudged_zero_point = qmin;
} else if (initial_zero_point > qmax) {
nudged_zero_point = qmax;
} else {
nudged_zero_point = nearbyint(initial_zero_point);
}
zero_point[i] = nudged_zero_point;
}
}
// CUDA kernel to compute Moving Average Min/Max of the tensor.
// It uses the running_min and running_max along with averaging const, c.
// The formula used to compute the new min/max is as follows
//
// running_min = (1 - c) * running_min + c * x_min, if running_min != inf
// running_min = x_min, if running_min == inf
__global__ void MovingAverageMinMax(
const int64_t* observer_on,
const float* x_min,
const float* x_max,
float* running_min,
float* running_max,
const float averaging_const,
const int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (*observer_on == 1) {
if (i < size) {
float curr_min = x_min[i];
float curr_max = x_max[i];
float adjusted_min = ::isinf(running_min[i])
? curr_min
: (running_min[i]) + averaging_const * (curr_min - (running_min[i]));
float adjusted_max = ::isinf(running_max[i])
? curr_max
: (running_max[i]) + averaging_const * (curr_max - (running_max[i]));
running_min[i] = adjusted_min;
running_max[i] = adjusted_max;
}
}
}
void _calculate_moving_average(
const at::Tensor& x,
const at::Tensor& observer_on,
at::Tensor& running_min,
at::Tensor& running_max,
const float averaging_const,
const int64_t size,
bool per_row_fq) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(x.get_device());
at::Tensor x_min, x_max;
int64_t* observer_on_data = observer_on.data_ptr<int64_t>();
float* running_min_data = running_min.data_ptr<float>();
float* running_max_data = running_max.data_ptr<float>();
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
if (per_row_fq) {
std::tie(x_min, x_max) = at::_aminmax(x, 1);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
int num_threads = std::min(size, (int64_t)512);
const uint64_t num_blocks = cuda::ATenCeilDiv<uint64_t>(size, num_threads);
// Moving Average Min/Max observer for activations
MovingAverageMinMax<<<num_blocks, num_threads, 0, cuda_stream>>>(
observer_on_data,
x_min_data,
x_max_data,
running_min_data,
running_max_data,
averaging_const,
size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
std::tie(x_min, x_max) = at::_aminmax(x);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
// Moving Average Min/Max observer for activations
MovingAverageMinMax<<<1, 1, 0, cuda_stream>>>(
observer_on_data,
x_min_data,
x_max_data,
running_min_data,
running_max_data,
averaging_const,
1 /*size*/);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
void _calc_moving_avg_qparams_helper(
const at::Tensor& x,
const at::Tensor fake_quant_on,
at::Tensor& running_min,
at::Tensor& running_max,
float* scale_ptr,
int32_t* zp_ptr,
int32_t qmin,
int32_t qmax,
bool symmetric_quant,
const int64_t size,
bool per_row_fq = false) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(x.get_device());
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
int64_t* fake_quant_on_data = fake_quant_on.data_ptr<int64_t>();
if (per_row_fq) {
float* running_min_data = running_min.data_ptr<float>();
float* running_max_data = running_max.data_ptr<float>();
int num_threads = std::min(size, (int64_t)512);
const uint64_t num_blocks = cuda::ATenCeilDiv<uint64_t>(size, num_threads);
ChooseQuantizationParamsKernelImpl<<<num_blocks, num_threads, 0, cuda_stream>>>(
fake_quant_on_data,
running_min_data,
running_max_data,
qmin,
qmax,
size,
symmetric_quant,
scale_ptr,
zp_ptr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
float* running_min_data = running_min.data_ptr<float>();
float* running_max_data = running_max.data_ptr<float>();
ChooseQuantizationParamsKernelImpl<<<1, 1, 0, cuda_stream>>>(
fake_quant_on_data,
running_min_data,
running_max_data,
qmin,
qmax,
1, // size
symmetric_quant, // preserve_sparsity
scale_ptr,
zp_ptr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
} // namespace
std::tuple<at::Tensor, at::Tensor> fused_moving_avg_obs_fake_quant_cuda(
const at::Tensor& x,
const at::Tensor& observer_on,
const at::Tensor& fake_quant_on,
at::Tensor& running_min,
at::Tensor& running_max,
at::Tensor& scale,
at::Tensor& zero_point,
const double averaging_const,
const int64_t qmin,
const int64_t qmax,
const int64_t ch_axis,
bool per_row_fq,
bool symmetric_quant) {
const auto x_contig = x.contiguous();
int64_t size = per_row_fq ? x.size(0) : 1;
_calculate_moving_average(
x_contig,
observer_on,
running_min,
running_max,
averaging_const,
size,
per_row_fq);
float* scale_ptr = scale.data_ptr<float>();
int32_t* zp_ptr = zero_point.data_ptr<int32_t>();
_calc_moving_avg_qparams_helper(
x_contig,
fake_quant_on,
running_min,
running_max,
scale_ptr,
zp_ptr,
qmin,
qmax,
symmetric_quant,
size,
per_row_fq);
if (per_row_fq) {
if (fake_quant_on.item().toInt()) {
return at::fake_quantize_per_channel_affine_cachemask(
x, scale, zero_point, 0, qmin, qmax);
} else {
auto mask = at::ones_like(x, at::kBool, MemoryFormat::Preserve);
return std::make_tuple(x.clone(), mask);
}
} else {
return at::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
x, scale, zero_point, fake_quant_on, qmin, qmax);
}
}
} // namespace native
} // namespace at
|
07bbdbd9bb20c632eee8a82df0151082414759e6.hip | // !!! This is a file automatically generated by hipify!!!
/*-----------
*
* atomics.cu
*
* This is the source file of antomic operations.
*
* This kernel is based on CUDA samples. simpleAtomicIntrinsics.cuh
*
* streamsOptBenchmark/atomics.cu
*
* By Hao Li
*
*------------
*/
#include <time.h>
#include <hip/hip_runtime.h>
// #include "functions_hip.cuh"
__global__ void atomicFunc(float *g_idata, float *g_odata)
{
for(int l = 0; l < 100000; l++)
{
// access thread id
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Test various atomic instructions
// Arithmetic atomic instructions
int i = 0;
while(g_odata[i] != NULL)
{
g_odata[i] = g_idata[i];
// Atomic addition
atomicAdd(&g_odata[i], 10.0);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic subtraction (final should be 0)
atomicSub((int *)&g_odata[i], 10);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic exchange
atomicExch(&g_odata[i], (float)tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic maximum
atomicMax((int *)&g_odata[i], tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic minimum
atomicMin((int *)&g_odata[i], tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic increment (modulo 17+1)
atomicInc((unsigned int *)&g_odata[i], 17);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic decrement
atomicDec((unsigned int *)&g_odata[i], 137);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic compare-and-swap
atomicCAS((int *)&g_odata[i], tid-1, tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Bitwise atomic instructions
// Atomic AND
atomicAnd((int *)&g_odata[i], 2*tid+7);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic OR
atomicOr((int *)&g_odata[i], 1 << tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic XOR
atomicXor((int *)&g_odata[i], tid);
i++;
}
}
}
// int main(int argc, char **argv)
// {
// unsigned int numThreads = 256;
// unsigned int numBlocks = 64;
// unsigned int numData = 1000000;
// unsigned int memSize = sizeof(int) * numData;
// //allocate mem for the result on host side
// int *hOData = (int *) malloc(memSize);
// //initalize the memory
// for (unsigned int i = 0; i < numData; i++)
// hOData[i] = 0;
// //To make the AND and XOR tests generate something other than 0...
// hOData[8] = hOData[10] = 0xff;
// // allocate device memory for result
// float *dOData;
// hipMalloc((void **) &dOData, sizeof(float) * memSize);
// // copy host memory to device to initialize to zers
// hipMemcpy(dOData, hOData, sizeof(float) * memSize, hipMemcpyHostToDevice);
// hipEvent_t start;
// error = hipEventCreate(&start);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// hipEvent_t stop;
// error = hipEventCreate(&stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Record the start event
// error = hipEventRecord(start, NULL);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // execute the kernel
// atomicFunc<<<numBlocks, numThreads>>>(dOData);
// // Record the stop event
// error = hipEventRecord(stop, NULL);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Wait for the stop event to complete
// error = hipEventSynchronize(stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// float msecTotal = 0.0f;
// error = hipEventElapsedTime(&msecTotal, start, stop);
// printf("Running Time: %f ms\n", msecTotal);
// hipMemcpy(hOData, dOData, memSize, hipMemcpyDeviceToHost);
// free(hOData);
// hipFree(dOData);
// return 0;
// }
| 07bbdbd9bb20c632eee8a82df0151082414759e6.cu | /*-----------
*
* atomics.cu
*
* This is the source file of antomic operations.
*
* This kernel is based on CUDA samples. simpleAtomicIntrinsics.cuh
*
* streamsOptBenchmark/atomics.cu
*
* By Hao Li
*
*------------
*/
#include <time.h>
#include <cuda_runtime.h>
// #include "functions.cuh"
__global__ void atomicFunc(float *g_idata, float *g_odata)
{
for(int l = 0; l < 100000; l++)
{
// access thread id
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Test various atomic instructions
// Arithmetic atomic instructions
int i = 0;
while(g_odata[i] != NULL)
{
g_odata[i] = g_idata[i];
// Atomic addition
atomicAdd(&g_odata[i], 10.0);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic subtraction (final should be 0)
atomicSub((int *)&g_odata[i], 10);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic exchange
atomicExch(&g_odata[i], (float)tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic maximum
atomicMax((int *)&g_odata[i], tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic minimum
atomicMin((int *)&g_odata[i], tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic increment (modulo 17+1)
atomicInc((unsigned int *)&g_odata[i], 17);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic decrement
atomicDec((unsigned int *)&g_odata[i], 137);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic compare-and-swap
atomicCAS((int *)&g_odata[i], tid-1, tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Bitwise atomic instructions
// Atomic AND
atomicAnd((int *)&g_odata[i], 2*tid+7);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic OR
atomicOr((int *)&g_odata[i], 1 << tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic XOR
atomicXor((int *)&g_odata[i], tid);
i++;
}
}
}
// int main(int argc, char **argv)
// {
// unsigned int numThreads = 256;
// unsigned int numBlocks = 64;
// unsigned int numData = 1000000;
// unsigned int memSize = sizeof(int) * numData;
// //allocate mem for the result on host side
// int *hOData = (int *) malloc(memSize);
// //initalize the memory
// for (unsigned int i = 0; i < numData; i++)
// hOData[i] = 0;
// //To make the AND and XOR tests generate something other than 0...
// hOData[8] = hOData[10] = 0xff;
// // allocate device memory for result
// float *dOData;
// cudaMalloc((void **) &dOData, sizeof(float) * memSize);
// // copy host memory to device to initialize to zers
// cudaMemcpy(dOData, hOData, sizeof(float) * memSize, cudaMemcpyHostToDevice);
// cudaEvent_t start;
// error = cudaEventCreate(&start);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// cudaEvent_t stop;
// error = cudaEventCreate(&stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Record the start event
// error = cudaEventRecord(start, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // execute the kernel
// atomicFunc<<<numBlocks, numThreads>>>(dOData);
// // Record the stop event
// error = cudaEventRecord(stop, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Wait for the stop event to complete
// error = cudaEventSynchronize(stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// float msecTotal = 0.0f;
// error = cudaEventElapsedTime(&msecTotal, start, stop);
// printf("Running Time: %f ms\n", msecTotal);
// cudaMemcpy(hOData, dOData, memSize, cudaMemcpyDeviceToHost);
// free(hOData);
// cudaFree(dOData);
// return 0;
// }
|
88fa5f677ee0adf2fe5b56817f09747533c2c69e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <numeric>
#include "../../comm/csr.cuh"
#include "pr_cta_discrete.cuh"
#include "validation.cuh"
#include "../../util/time.cuh"
#define FETCH_SIZE (FETCHSIZE)
#define BLOCK_SIZE (512)
using namespace std;
__global__ void print(float *array, int size)
{
if(TID ==0)
{
for(int i=0; i<size; i++)
printf("%d: %f, ", i, array[i]);
printf("\n");
}
}
struct prof_entry {
uint32_t start;
uint32_t end;
int stream_id;
uint32_t size;
prof_entry() {}
prof_entry(uint32_t start_, uint32_t end_, int id, uint32_t size_){
start =start_;
end = end_;
stream_id = id;
size = size_;
}
};
int main(int argc, char *argv[])
{
char *input_file = NULL;
float lambda=0.85;
float epsilon=0.01;
bool start_from_0 = false;
bool write_profile = false;
uint32_t min_iter = 300;
bool check =false;
int rounds=1;
if(argc == 1)
{
cout<< "./test -l <lambda=0.85> -f <file> -p <epsilon=0.005> -s <file vertex ID start from 0?=false> -w <write profile?=false> -i <min iteration for queue=2500>\n";
exit(0);
}
if(argc > 1)
for(int i=1; i<argc; i++) {
if(string(argv[i]) == "-l")
lambda = stof(argv[i+1]);
else if(string(argv[i]) == "-f")
input_file = argv[i+1];
else if(string(argv[i]) == "-p")
epsilon = stof(argv[i+1]);
else if(string(argv[i]) == "-s")
start_from_0 = stoi(argv[i+1]);
else if(string(argv[i]) == "-w")
write_profile = stoi(argv[i+1]);
else if(string(argv[i]) == "-i")
min_iter = stoi(argv[i+1]);
else if(string(argv[i]) == "-check")
check = stoi(argv[i+1]);
else if(string(argv[i]) == "-rounds")
rounds = stoi(argv[i+1]);
}
if(input_file == NULL)
{
cout << "input file is needed\n";
cout<< "./test -l <lambda=0.85> -f <file> -p <epsilon=0.005> -s <file vertex ID start from 0?=false> -w <write profile?=false> -i <min iteration for queue=2500> \n";
exit(0);
}
int numBlock = 80;
int numThread = 1024;
std::cout << "file: "<< input_file << " lambda: " << lambda << " epsilon: "<< epsilon << " start from 0: " << start_from_0 << " write profile file: "<< write_profile << " Fetch size: " << FETCH_SIZE << " Block size: "<< BLOCK_SIZE << " min iter "<< min_iter<<std::endl;
std::string str_file(input_file);
Csr<int, int> csr;
if(str_file.substr(str_file.length()-4) == ".csr")
{
csr.ReadFromBinary(input_file);
}
else { std::cout << "file type not supported\n"; exit(1);}
csr.PrintCsr();
GpuTimer timer;
prof_entry *profile_output;
uint32_t profile_size = 3000000;
hipEvent_t *event_start;
hipEvent_t *event_stop;
hipEvent_t event_begin[32];
int event_counter[32];
if(write_profile)
{
profile_output = (prof_entry *)malloc(sizeof(prof_entry)*profile_size);
event_start = (hipEvent_t *)malloc(sizeof(hipEvent_t)*32*1000);
event_stop = (hipEvent_t *)malloc(sizeof(hipEvent_t)*32*1000);
for(int i=0; i<32*1000; i++) {
hipEventCreate(event_start+i);
hipEventCreate(event_stop+i);
if(i<32) {
event_counter[i]=0;
hipEventCreate(event_begin+i);
}
}
}
PageRank<int, int, float> pr(csr, lambda, epsilon, min_iter);
hipStream_t streams[33];
for(int i=0; i<33; i++)
CUDA_CHECK(hipStreamCreateWithFlags(streams+i, hipStreamNonBlocking));
if(write_profile)
for(int i=0; i<32; i++)
CUDA_CHECK(hipEventRecord(event_begin[i], streams[i]));
int write_file_log_iter=0;
std::vector<float> times;
std::vector<uint64_t> workloads;
std::vector<uint64_t> launched_kernels;
#ifdef COMPUTE_EDGES
std::vector<uint64_t> workload_edges;
#endif
for(int iteration=0; iteration < rounds; iteration++) {
pr.reset();
pr.PrInit(numBlock, numThread);
//host::PrInitValid<int, int, float>(csr, pr);
//pr.worklists.print();
uint32_t end_host, start_host;
int end_iter = min_iter;
int iter = 0;
int log_iter = 0;
start_host = 0;
timer.Start();
while(iter < end_iter)
{
CUDA_CHECK(hipMemcpyAsync(&end_host, (uint32_t *)pr.worklists.end, sizeof(uint32_t), hipMemcpyDeviceToHost, streams[32]));
CUDA_CHECK(hipStreamSynchronize(streams[32]));
int size = end_host-start_host;
int stream_id = log_iter%32;
if(size > 0)
//if(size > 256|| (iter > 5 && size > 0))
{
if(write_profile)
hipEventRecord(event_start[stream_id*1000+event_counter[stream_id]], streams[stream_id]);
pr.PrStart<FETCH_SIZE, BLOCK_SIZE>(start_host, size, 0, streams[stream_id]);
if(write_profile) {
profile_output[log_iter] = prof_entry(start_host, end_host, stream_id, size);
hipEventRecord(event_stop[stream_id*1000+event_counter[stream_id]], streams[stream_id]);
event_counter[stream_id]++;
}
log_iter++;
start_host = end_host;
iter = 0;
}
else {
if(iter > end_iter/2)
hipLaunchKernelGGL(( checkEnd), dim3(1),dim3(32),0, streams[32], pr.worklists);
iter++;
}
}
CUDA_CHECK(hipDeviceSynchronize());
timer.Stop();
float elapsed = timer.ElapsedMillis();
CUDA_CHECK(hipMemcpy((uint32_t *)pr.worklists.start, &start_host, sizeof(uint32_t), hipMemcpyHostToDevice));
pr.worklists.print();
std::cout << "Time: " << elapsed << std::endl;
std::cout << "workload vertices: " << start_host << std::endl;
std::cout << "kernels launched: "<< log_iter << std::endl;
times.push_back(elapsed);
workloads.push_back(start_host);
launched_kernels.push_back(log_iter);
#ifdef COMPUTE_EDGES
int *host_queue, *host_workload;
host_queue = (int *)malloc(sizeof(int)*start_host);
host_workload = (int *)malloc(sizeof(int)*start_host);
CUDA_CHECK(hipMemcpy(host_queue, pr.worklists.queue, sizeof(int)*start_host, hipMemcpyDeviceToHost));
uint32_t totalworkload = 0;
for(int i=0; i<start_host; i++) {
int node_id = host_queue[i];
host_workload[i] = csr.row_offset[node_id+1]-csr.row_offset[node_id];
totalworkload = totalworkload + host_workload[i];
}
workload_edges.push_back(totalworkload);
std::cout << "workload edges: "<< totalworkload << std::endl;
#endif
write_file_log_iter = log_iter;
}
std::cout << "Ave. Time: "<< std::accumulate(times.begin(), times.end(), float(0.0))/times.size() << std::endl;
std::cout << "Ave. Workload(vertices): "<< std::accumulate(workloads.begin(), workloads.end(), (uint64_t)0)/workloads.size() << std::endl;
std::cout << "Ave. kernels launched: "<< std::accumulate(launched_kernels.begin(), launched_kernels.end(), 0)/launched_kernels.size()<< std::endl;
#ifdef COMPUTE_EDGES
std::cout << "Ave. Workload(edges): "<< std::accumulate(workload_edges.begin(), workload_edges.end(), (uint64_t)0)/workload_edges.size()<< std::endl;
#endif
if(check)
host::PrValid<int, int, float>(csr, pr);
if(write_profile) {
float * time_interval[32];
float * time_accu[32];
int max_len=0;
for(int i=0; i<32; i++) {
time_interval[i] = (float *)malloc(sizeof(float)*event_counter[i]);
time_accu[i] = (float *)malloc(sizeof(float)*event_counter[i]);
max_len = max(event_counter[i], max_len);
}
for(int stream_id=0; stream_id < 32; stream_id++)
for(int i=0; i<event_counter[stream_id]; i++) {
CUDA_CHECK(hipEventElapsedTime(time_interval[stream_id]+i, event_start[stream_id*1000+i], event_stop[stream_id*1000+i]))
CUDA_CHECK(hipEventElapsedTime(time_accu[stream_id]+i, event_begin[stream_id], event_stop[stream_id*1000+i]))
}
string file_name_interval = str_file.substr(25, str_file.length()-4-25)+"_interval_time.txt";
string file_name_accu = str_file.substr(25, str_file.length()-4-25)+"_accu_time.txt";
string file_name_size = str_file.substr(25, str_file.length()-4-25)+"_kernel_info.txt";
string file_name_stream_kernel_size = str_file.substr(25, str_file.length()-4-25)+"_stream_size.txt";
std::cout << "Writing to files: "<< file_name_interval << "\t" << file_name_accu << "\t" << file_name_size << "\t"<< file_name_stream_kernel_size << std::endl;
int * kernel_size = (int *)malloc(sizeof(int)*max_len);
int local_size = 0;
ofstream myfile4 (file_name_stream_kernel_size);
if(myfile4.is_open()) {
for(int stream_id =0; stream_id < 32; stream_id++)
{
for(int i=0; i<write_file_log_iter; i++)
if(profile_output[i].stream_id==stream_id) {
kernel_size[local_size] = profile_output[i].size;
local_size++;
}
std::cout << "stream "<< stream_id << "\t" << local_size << std::endl;
for(int i=0; i<local_size; i++)
myfile4 << kernel_size[i] << "\t";
myfile4 << "\n\n";
local_size = 0;
}
myfile4.close();
}
ofstream myfile (file_name_interval);
if (myfile.is_open())
{
for(int stream_id=0; stream_id<32; stream_id++) {
for(int count = 0; count < event_counter[stream_id]; count++){
myfile << time_interval[stream_id][count] << "\t";
}
myfile << "\n\n";
}
myfile.close();
}
ofstream myfile2 (file_name_accu);
if (myfile2.is_open())
{
for(int stream_id=0; stream_id<32; stream_id++) {
for(int count = 0; count < event_counter[stream_id]; count++){
myfile2 << time_accu[stream_id][count] << "\t";
}
myfile2 << "\n\n";
}
myfile2.close();
}
ofstream myfile3 (file_name_size);
if(myfile3.is_open())
{
for(int count = 0; count < write_file_log_iter; count++)
myfile3 << profile_output[count].start << "\t" << profile_output[count].end << "\t"<< profile_output[count].size << "\t" << profile_output[count].stream_id << "\n";
myfile3.close();
}
}
csr.release();
pr.release();
return 0;
}
| 88fa5f677ee0adf2fe5b56817f09747533c2c69e.cu | #include <iostream>
#include <string>
#include <numeric>
#include "../../comm/csr.cuh"
#include "pr_cta_discrete.cuh"
#include "validation.cuh"
#include "../../util/time.cuh"
#define FETCH_SIZE (FETCHSIZE)
#define BLOCK_SIZE (512)
using namespace std;
__global__ void print(float *array, int size)
{
if(TID ==0)
{
for(int i=0; i<size; i++)
printf("%d: %f, ", i, array[i]);
printf("\n");
}
}
struct prof_entry {
uint32_t start;
uint32_t end;
int stream_id;
uint32_t size;
prof_entry() {}
prof_entry(uint32_t start_, uint32_t end_, int id, uint32_t size_){
start =start_;
end = end_;
stream_id = id;
size = size_;
}
};
int main(int argc, char *argv[])
{
char *input_file = NULL;
float lambda=0.85;
float epsilon=0.01;
bool start_from_0 = false;
bool write_profile = false;
uint32_t min_iter = 300;
bool check =false;
int rounds=1;
if(argc == 1)
{
cout<< "./test -l <lambda=0.85> -f <file> -p <epsilon=0.005> -s <file vertex ID start from 0?=false> -w <write profile?=false> -i <min iteration for queue=2500>\n";
exit(0);
}
if(argc > 1)
for(int i=1; i<argc; i++) {
if(string(argv[i]) == "-l")
lambda = stof(argv[i+1]);
else if(string(argv[i]) == "-f")
input_file = argv[i+1];
else if(string(argv[i]) == "-p")
epsilon = stof(argv[i+1]);
else if(string(argv[i]) == "-s")
start_from_0 = stoi(argv[i+1]);
else if(string(argv[i]) == "-w")
write_profile = stoi(argv[i+1]);
else if(string(argv[i]) == "-i")
min_iter = stoi(argv[i+1]);
else if(string(argv[i]) == "-check")
check = stoi(argv[i+1]);
else if(string(argv[i]) == "-rounds")
rounds = stoi(argv[i+1]);
}
if(input_file == NULL)
{
cout << "input file is needed\n";
cout<< "./test -l <lambda=0.85> -f <file> -p <epsilon=0.005> -s <file vertex ID start from 0?=false> -w <write profile?=false> -i <min iteration for queue=2500> \n";
exit(0);
}
int numBlock = 80;
int numThread = 1024;
std::cout << "file: "<< input_file << " lambda: " << lambda << " epsilon: "<< epsilon << " start from 0: " << start_from_0 << " write profile file: "<< write_profile << " Fetch size: " << FETCH_SIZE << " Block size: "<< BLOCK_SIZE << " min iter "<< min_iter<<std::endl;
std::string str_file(input_file);
Csr<int, int> csr;
if(str_file.substr(str_file.length()-4) == ".csr")
{
csr.ReadFromBinary(input_file);
}
else { std::cout << "file type not supported\n"; exit(1);}
csr.PrintCsr();
GpuTimer timer;
prof_entry *profile_output;
uint32_t profile_size = 3000000;
cudaEvent_t *event_start;
cudaEvent_t *event_stop;
cudaEvent_t event_begin[32];
int event_counter[32];
if(write_profile)
{
profile_output = (prof_entry *)malloc(sizeof(prof_entry)*profile_size);
event_start = (cudaEvent_t *)malloc(sizeof(cudaEvent_t)*32*1000);
event_stop = (cudaEvent_t *)malloc(sizeof(cudaEvent_t)*32*1000);
for(int i=0; i<32*1000; i++) {
cudaEventCreate(event_start+i);
cudaEventCreate(event_stop+i);
if(i<32) {
event_counter[i]=0;
cudaEventCreate(event_begin+i);
}
}
}
PageRank<int, int, float> pr(csr, lambda, epsilon, min_iter);
cudaStream_t streams[33];
for(int i=0; i<33; i++)
CUDA_CHECK(cudaStreamCreateWithFlags(streams+i, cudaStreamNonBlocking));
if(write_profile)
for(int i=0; i<32; i++)
CUDA_CHECK(cudaEventRecord(event_begin[i], streams[i]));
int write_file_log_iter=0;
std::vector<float> times;
std::vector<uint64_t> workloads;
std::vector<uint64_t> launched_kernels;
#ifdef COMPUTE_EDGES
std::vector<uint64_t> workload_edges;
#endif
for(int iteration=0; iteration < rounds; iteration++) {
pr.reset();
pr.PrInit(numBlock, numThread);
//host::PrInitValid<int, int, float>(csr, pr);
//pr.worklists.print();
uint32_t end_host, start_host;
int end_iter = min_iter;
int iter = 0;
int log_iter = 0;
start_host = 0;
timer.Start();
while(iter < end_iter)
{
CUDA_CHECK(cudaMemcpyAsync(&end_host, (uint32_t *)pr.worklists.end, sizeof(uint32_t), cudaMemcpyDeviceToHost, streams[32]));
CUDA_CHECK(cudaStreamSynchronize(streams[32]));
int size = end_host-start_host;
int stream_id = log_iter%32;
if(size > 0)
//if(size > 256|| (iter > 5 && size > 0))
{
if(write_profile)
cudaEventRecord(event_start[stream_id*1000+event_counter[stream_id]], streams[stream_id]);
pr.PrStart<FETCH_SIZE, BLOCK_SIZE>(start_host, size, 0, streams[stream_id]);
if(write_profile) {
profile_output[log_iter] = prof_entry(start_host, end_host, stream_id, size);
cudaEventRecord(event_stop[stream_id*1000+event_counter[stream_id]], streams[stream_id]);
event_counter[stream_id]++;
}
log_iter++;
start_host = end_host;
iter = 0;
}
else {
if(iter > end_iter/2)
checkEnd<<<1,32,0, streams[32]>>>(pr.worklists);
iter++;
}
}
CUDA_CHECK(cudaDeviceSynchronize());
timer.Stop();
float elapsed = timer.ElapsedMillis();
CUDA_CHECK(cudaMemcpy((uint32_t *)pr.worklists.start, &start_host, sizeof(uint32_t), cudaMemcpyHostToDevice));
pr.worklists.print();
std::cout << "Time: " << elapsed << std::endl;
std::cout << "workload vertices: " << start_host << std::endl;
std::cout << "kernels launched: "<< log_iter << std::endl;
times.push_back(elapsed);
workloads.push_back(start_host);
launched_kernels.push_back(log_iter);
#ifdef COMPUTE_EDGES
int *host_queue, *host_workload;
host_queue = (int *)malloc(sizeof(int)*start_host);
host_workload = (int *)malloc(sizeof(int)*start_host);
CUDA_CHECK(cudaMemcpy(host_queue, pr.worklists.queue, sizeof(int)*start_host, cudaMemcpyDeviceToHost));
uint32_t totalworkload = 0;
for(int i=0; i<start_host; i++) {
int node_id = host_queue[i];
host_workload[i] = csr.row_offset[node_id+1]-csr.row_offset[node_id];
totalworkload = totalworkload + host_workload[i];
}
workload_edges.push_back(totalworkload);
std::cout << "workload edges: "<< totalworkload << std::endl;
#endif
write_file_log_iter = log_iter;
}
std::cout << "Ave. Time: "<< std::accumulate(times.begin(), times.end(), float(0.0))/times.size() << std::endl;
std::cout << "Ave. Workload(vertices): "<< std::accumulate(workloads.begin(), workloads.end(), (uint64_t)0)/workloads.size() << std::endl;
std::cout << "Ave. kernels launched: "<< std::accumulate(launched_kernels.begin(), launched_kernels.end(), 0)/launched_kernels.size()<< std::endl;
#ifdef COMPUTE_EDGES
std::cout << "Ave. Workload(edges): "<< std::accumulate(workload_edges.begin(), workload_edges.end(), (uint64_t)0)/workload_edges.size()<< std::endl;
#endif
if(check)
host::PrValid<int, int, float>(csr, pr);
if(write_profile) {
float * time_interval[32];
float * time_accu[32];
int max_len=0;
for(int i=0; i<32; i++) {
time_interval[i] = (float *)malloc(sizeof(float)*event_counter[i]);
time_accu[i] = (float *)malloc(sizeof(float)*event_counter[i]);
max_len = max(event_counter[i], max_len);
}
for(int stream_id=0; stream_id < 32; stream_id++)
for(int i=0; i<event_counter[stream_id]; i++) {
CUDA_CHECK(cudaEventElapsedTime(time_interval[stream_id]+i, event_start[stream_id*1000+i], event_stop[stream_id*1000+i]))
CUDA_CHECK(cudaEventElapsedTime(time_accu[stream_id]+i, event_begin[stream_id], event_stop[stream_id*1000+i]))
}
string file_name_interval = str_file.substr(25, str_file.length()-4-25)+"_interval_time.txt";
string file_name_accu = str_file.substr(25, str_file.length()-4-25)+"_accu_time.txt";
string file_name_size = str_file.substr(25, str_file.length()-4-25)+"_kernel_info.txt";
string file_name_stream_kernel_size = str_file.substr(25, str_file.length()-4-25)+"_stream_size.txt";
std::cout << "Writing to files: "<< file_name_interval << "\t" << file_name_accu << "\t" << file_name_size << "\t"<< file_name_stream_kernel_size << std::endl;
int * kernel_size = (int *)malloc(sizeof(int)*max_len);
int local_size = 0;
ofstream myfile4 (file_name_stream_kernel_size);
if(myfile4.is_open()) {
for(int stream_id =0; stream_id < 32; stream_id++)
{
for(int i=0; i<write_file_log_iter; i++)
if(profile_output[i].stream_id==stream_id) {
kernel_size[local_size] = profile_output[i].size;
local_size++;
}
std::cout << "stream "<< stream_id << "\t" << local_size << std::endl;
for(int i=0; i<local_size; i++)
myfile4 << kernel_size[i] << "\t";
myfile4 << "\n\n";
local_size = 0;
}
myfile4.close();
}
ofstream myfile (file_name_interval);
if (myfile.is_open())
{
for(int stream_id=0; stream_id<32; stream_id++) {
for(int count = 0; count < event_counter[stream_id]; count++){
myfile << time_interval[stream_id][count] << "\t";
}
myfile << "\n\n";
}
myfile.close();
}
ofstream myfile2 (file_name_accu);
if (myfile2.is_open())
{
for(int stream_id=0; stream_id<32; stream_id++) {
for(int count = 0; count < event_counter[stream_id]; count++){
myfile2 << time_accu[stream_id][count] << "\t";
}
myfile2 << "\n\n";
}
myfile2.close();
}
ofstream myfile3 (file_name_size);
if(myfile3.is_open())
{
for(int count = 0; count < write_file_log_iter; count++)
myfile3 << profile_output[count].start << "\t" << profile_output[count].end << "\t"<< profile_output[count].size << "\t" << profile_output[count].stream_id << "\n";
myfile3.close();
}
}
csr.release();
pr.release();
return 0;
}
|
441141556d4db0435070d8042250fb6c572cef8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
__global__ void print_threadIds() {
printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d , blockDim.x : %d, blockDim.y : %d, gridDim.x : %d, gridDim.y : %d \n",
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y,
gridDim.x, gridDim.y);
}
int main(void)
{
int nx, ny;
nx = 16;
ny = 16;
dim3 block(8, 8);
dim3 grid(nx / block.x, ny / block.y);
hipLaunchKernelGGL(( print_threadIds) , dim3(grid), dim3(block), 0, 0, );
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| 441141556d4db0435070d8042250fb6c572cef8c.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
__global__ void print_threadIds() {
printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d , blockDim.x : %d, blockDim.y : %d, gridDim.x : %d, gridDim.y : %d \n",
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y,
gridDim.x, gridDim.y);
}
int main(void)
{
int nx, ny;
nx = 16;
ny = 16;
dim3 block(8, 8);
dim3 grid(nx / block.x, ny / block.y);
print_threadIds <<<grid, block>>> ();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
b9474ae8af41be7c11d79e21022be17e86f1e84f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//double* x, * devx, * val, * gra, * r, * graMax;
//double* hes_value;
////int size;
//int* pos_x, * pos_y;
//int* csr;
double* x;
//thrust::pair<int, int> *device_pos;
//typedef double (*fp)(double);
//typedef void (*val_fp)(double*, double*, int);
//typedef void (*valsum_fp)(double*, double*,int);
//typedef void (*gra_fp)(double*, double*, int);
//typedef void (*gramin_fp)(double*, double*,int);
//typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int);
//typedef void (*print_fp)(double*, int);
int numSMs;
__device__ void wait() {
for (int i = 1; i <= 10000000; i++);
}
__device__ double sqr(double x) {
return x * x;
}
__global__ void calculate_val(double* devx, double* val, int size) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < size;
index += blockDim.x * gridDim.x)
{
int pre = index - 1;
if (pre < 0) pre += size;
int next = index + 1;
if (next >= size) next -= size;
val[index] = sqr(sin(devx[pre] * devx[index])) * sqr(sin(devx[next] * devx[index]));
}
// wait();
} | b9474ae8af41be7c11d79e21022be17e86f1e84f.cu | #include "includes.h"
//double* x, * devx, * val, * gra, * r, * graMax;
//double* hes_value;
////int size;
//int* pos_x, * pos_y;
//int* csr;
double* x;
//thrust::pair<int, int> *device_pos;
//typedef double (*fp)(double);
//typedef void (*val_fp)(double*, double*, int);
//typedef void (*valsum_fp)(double*, double*,int);
//typedef void (*gra_fp)(double*, double*, int);
//typedef void (*gramin_fp)(double*, double*,int);
//typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int);
//typedef void (*print_fp)(double*, int);
int numSMs;
__device__ void wait() {
for (int i = 1; i <= 10000000; i++);
}
__device__ double sqr(double x) {
return x * x;
}
__global__ void calculate_val(double* devx, double* val, int size) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < size;
index += blockDim.x * gridDim.x)
{
int pre = index - 1;
if (pre < 0) pre += size;
int next = index + 1;
if (next >= size) next -= size;
val[index] = sqr(sin(devx[pre] * devx[index])) * sqr(sin(devx[next] * devx[index]));
}
// wait();
} |
b21f24be680539c220cb2247a5780498d0de4ffb.hip | // !!! This is a file automatically generated by hipify!!!
/// LSU EE 7722 GPU Microarchitecture
//
/// Simple, Self-Contained, One-File CUDA Example
/// How to Compile from the Command Line
//
// nvcc -o addr-space addr-space.cu -O3 -Xcompiler -Wall
/// Documentation
//
// CUDA: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
// C++: http://en.cppreference.com/w/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <vector>
using namespace std;
#if 0
/// Address Spaces
// - Global
// - Constant
// - Shared
// - Local
/// Global Address Space
/// Size: 32- or 64-bit address space. Matches size used by host OS, usually 64 bits.
/// Scope:
//
// Single space shared by all threads.
// Readable and writeable by all threads and host.
// Difficult to use for sharing of data.
/// Latency, Bandwidth
//
// Cases
// - Off-Chip Access. (Most common.)
// - L2 Cache Hit. (Fairly common. Possible through Volta [CC 7.x])
// - L1 Cache Hit. (Depends on CC. Possible in CC 2.x, 7.0)
// - Texture Cache Hit. (Depends on CC.)
// Off-Chip Access
// - Latency hundreds of cycles. Course default: 400 cycles.
// - Limited by off-chip bandwidth. 400 GB/s for high-end devices. (2018)
//
/// Declaration
//
// In global scope:
//
// :Syntax: __device__ TYPE VARNAME;
// :Sample: __device__ double mydata[1000];
//
// Note: Sample above shows *static* allocation of global space.
// In most cases dynamic allocation is used.
/// Dynamic Allocation on Host
//
// :Syntax: hipMalloc( PTR, AMT_CHARS );
// Allocates AMT_CHARS bytes of storage in global space,
// and puts address of that storage in PTR.
//
/// Transfer Between CPU and GPU (Either Direction)
//
// :Syntax: hipMemcpy( DST_PTR, SRC_PTR, SZ_CHARS, hipMemcpyHostToDevice );
// Copies SZ_CHARS bytes
// from the CPU (host) starting at address SRC_PTR
// to the GPU (device) starting at address DST_PTR, where
// SRC_PTR is an address in the CPU address space and
// DST_PTR is an address in the GPU global address space.
//
// :Syntax: hipMemcpy( DST_PTR, SRC_PTR, SZ_CHARS, hipMemcpyDeviceToHost );
// Copies SZ_CHARS bytes
// from the GPU (device) starting at address SRC_PTR
// to the CPU (host) starting at address DST_PTR, where
// SRC_PTR is an address in the GPU global address space and
// DST_PTR is an address in the CPU address space.
/// Short Example
const int size = 1024 * 32;
__device__ float a[size];
__device__ float b[size];
__global__ void thread_main(float *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
a[idx] = idx + blockIdx.x;
b[idx] = float(blockIdx.x) / (idx+1);
x[idx] = a[idx] + b[idx];
}
__host__ int main(int argc, char** argv)
{
void *x_dev;
hipMalloc( &x_dev, size * sizeof(x_dev[0]) );
hipLaunchKernelGGL(( thread_main), dim3(32),dim3(1024), 0, 0, x_dev);
// Copy data from GPU to CPU.
//
float x[size];
hipMemcpy( x, x_dev, size * sizeof(x[0]), hipMemcpyDeviceToHost );
}
/// Constant Address Space
/// Size 64 kiB ( 16-bit address space )
/// Scope
//
// Single space shared by all threads.
// Readable by all threads.
// Cannot be written by threads.
// Writeable from CPU.
/// Declaration
//
// In global scope:
//
// :Syntax: __constant__ TYPE VARNAME;
// :Sample: __constant__ int my_int_var;
/// Transfer Between CPU and GPU
//
// :Syntax: hipMemcpyToSymbol(DST_SYM, SRC_PTR, SZ_CHARS, OFF, hipMemcpyHostToDevice );
// Copy SZ_CHARS from CPU starting at address SRC_PTR + OFF
// to GPU symbol DST_SYM, which can be in constant address space.
/// Short Example
__constant__ int d_size;
__constant__ float *d_x;
__device__ float a[size];
__device__ float b[size];
__global__ void thread_main()
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= d_size ) return;
a[idx] = idx + blockIdx.x;
b[idx] = float(blockIdx.x) / (idx+1);
d_x[idx] = a[idx] + b[idx];
}
__host__ int main(int argc, char** argv)
{
int size = 1024 * 32;
void *x_dev;
hipMalloc( &x_dev, size * sizeof(x_dev[0]) );
hipMemcpyToSymbol
( d_size, &size, sizeof(size), 0, hipMemcpyHostToDevice )
hipMemcpyToSymbol
( d_x, &x_dev, sizeof(x_dev), 0, hipMemcpyHostToDevice )
hipLaunchKernelGGL(( thread_main), dim3(32),dim3(1024), 0, 0, );
// Copy data from GPU to CPU.
//
float x[size];
hipMemcpy( x, x_dev, size * sizeof(x[0]), hipMemcpyDeviceToHost );
}
/// Shared Address Space
/// Size 48 kiB (Before CC 7.0)
/// Scope
//
// Each block has its own shared address space.
// Shared address space shared by all threads within a block.
// Readable and writeable by threads.
/// Declaration
//
// In global or procedure scope
//
// :Syntax: __shared__ TYPE VARNAME;
// :Sample: __shared__ int my_int_var;
/// Transfer Between CPU and GPU
//
// Not easily accomplished.
/// Local Address Space
/// Size 512 kiB per thread.
/// Scope
//
// Each thread has its own local address space.
/// Declaration
//
// In procedure scope declared without a qualifier:
//
// :Syntax: TYPE VARNAME;
// :Sample: float my_array[20];
/// Implementation <- IMPORTANT
//
// Registers, If Possible
// Some L1 cache, depending on CC.
// L2, device memory.
//
#endif
__device__ int size_d;
__constant__ int size_c;
__shared__ int size_s;
/// Declaration of Kernel (Entry point for code running on GPU.)
//
// Note: the attribute __global__ indicates that the procedure is
// started by a kernel launch. A GPU-only procedure would use the
// attribute __device__ and a CPU-only procedure would use the
// attribute __host__.
//
__global__ void
thread_main(float *x, float *a, float *b)
{
// Variables threadIdx, blockIdx, and blockDim are pre-set.
//
int size = size_c;
// Compute a unique index (number) for this thread.
// This will be used as an array index.
//
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// Array size might not be a multiple of block size.
//
if ( idx >= size ) return;
a[idx] = idx + blockIdx.x;
b[idx] = float(blockIdx.x) / (idx+1);
// Perform Computation
//
x[idx] = a[idx] + b[idx];
}
__host__ int
main(int argc, char** argv)
{
const int SIZE = 100000000;
// Declare host arrays for inputs and output.
//
vector<float> a(SIZE);
vector<float> b(SIZE);
vector<float> x(SIZE);
// Compute size of each array.
//
const int array_size_chars = a.size() * sizeof(a[0]);
// Allocate storage for GPU copy of data.
//
// The address of the allocated storage is returned in the first
// argument, a_dev, etc. The addresses are in GPU global space and
// so they are not necessarily valid on the CPU.
//
void *a_dev, *b_dev, *x_dev;
hipMalloc( &a_dev, array_size_chars );
hipMalloc( &b_dev, array_size_chars );
hipMalloc( &x_dev, array_size_chars );
// Specify Launch Configuration
//
const int db = 64; // Number of threads per block.
// Choose grid size so that there is at least one thread per array
// element.
//
const int dg = (SIZE + db - 1 ) / db;
// Launch Kernel
//
hipLaunchKernelGGL(( thread_main), dim3(dg),dim3(db), 0, 0, SIZE, x.data(), a.data(), b.data());
// Copy data from GPU to CPU.
//
hipMemcpy( x.data(), x_dev, array_size_chars, hipMemcpyDeviceToHost );
printf("Finished with %d elements, element %d is %.5f\n",
SIZE, argc, x[argc]);
hipFree( a_dev );
hipFree( b_dev );
hipFree( x_dev );
}
| b21f24be680539c220cb2247a5780498d0de4ffb.cu | /// LSU EE 7722 GPU Microarchitecture
//
/// Simple, Self-Contained, One-File CUDA Example
/// How to Compile from the Command Line
//
// nvcc -o addr-space addr-space.cu -O3 -Xcompiler -Wall
/// Documentation
//
// CUDA: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
// C++: http://en.cppreference.com/w/
#include <stdio.h>
#include <cuda_runtime.h>
#include <vector>
using namespace std;
#if 0
/// Address Spaces
// - Global
// - Constant
// - Shared
// - Local
/// Global Address Space
/// Size: 32- or 64-bit address space. Matches size used by host OS, usually 64 bits.
/// Scope:
//
// Single space shared by all threads.
// Readable and writeable by all threads and host.
// Difficult to use for sharing of data.
/// Latency, Bandwidth
//
// Cases
// - Off-Chip Access. (Most common.)
// - L2 Cache Hit. (Fairly common. Possible through Volta [CC 7.x])
// - L1 Cache Hit. (Depends on CC. Possible in CC 2.x, 7.0)
// - Texture Cache Hit. (Depends on CC.)
// Off-Chip Access
// - Latency hundreds of cycles. Course default: 400 cycles.
// - Limited by off-chip bandwidth. 400 GB/s for high-end devices. (2018)
//
/// Declaration
//
// In global scope:
//
// :Syntax: __device__ TYPE VARNAME;
// :Sample: __device__ double mydata[1000];
//
// Note: Sample above shows *static* allocation of global space.
// In most cases dynamic allocation is used.
/// Dynamic Allocation on Host
//
// :Syntax: cudaMalloc( PTR, AMT_CHARS );
// Allocates AMT_CHARS bytes of storage in global space,
// and puts address of that storage in PTR.
//
/// Transfer Between CPU and GPU (Either Direction)
//
// :Syntax: cudaMemcpy( DST_PTR, SRC_PTR, SZ_CHARS, cudaMemcpyHostToDevice );
// Copies SZ_CHARS bytes
// from the CPU (host) starting at address SRC_PTR
// to the GPU (device) starting at address DST_PTR, where
// SRC_PTR is an address in the CPU address space and
// DST_PTR is an address in the GPU global address space.
//
// :Syntax: cudaMemcpy( DST_PTR, SRC_PTR, SZ_CHARS, cudaMemcpyDeviceToHost );
// Copies SZ_CHARS bytes
// from the GPU (device) starting at address SRC_PTR
// to the CPU (host) starting at address DST_PTR, where
// SRC_PTR is an address in the GPU global address space and
// DST_PTR is an address in the CPU address space.
/// Short Example
const int size = 1024 * 32;
__device__ float a[size];
__device__ float b[size];
__global__ void thread_main(float *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
a[idx] = idx + blockIdx.x;
b[idx] = float(blockIdx.x) / (idx+1);
x[idx] = a[idx] + b[idx];
}
__host__ int main(int argc, char** argv)
{
void *x_dev;
cudaMalloc( &x_dev, size * sizeof(x_dev[0]) );
thread_main<<<32,1024>>>(x_dev);
// Copy data from GPU to CPU.
//
float x[size];
cudaMemcpy( x, x_dev, size * sizeof(x[0]), cudaMemcpyDeviceToHost );
}
/// Constant Address Space
/// Size 64 kiB ( 16-bit address space )
/// Scope
//
// Single space shared by all threads.
// Readable by all threads.
// Cannot be written by threads.
// Writeable from CPU.
/// Declaration
//
// In global scope:
//
// :Syntax: __constant__ TYPE VARNAME;
// :Sample: __constant__ int my_int_var;
/// Transfer Between CPU and GPU
//
// :Syntax: cudaMemcpyToSymbol(DST_SYM, SRC_PTR, SZ_CHARS, OFF, cudaMemcpyHostToDevice );
// Copy SZ_CHARS from CPU starting at address SRC_PTR + OFF
// to GPU symbol DST_SYM, which can be in constant address space.
/// Short Example
__constant__ int d_size;
__constant__ float *d_x;
__device__ float a[size];
__device__ float b[size];
__global__ void thread_main()
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= d_size ) return;
a[idx] = idx + blockIdx.x;
b[idx] = float(blockIdx.x) / (idx+1);
d_x[idx] = a[idx] + b[idx];
}
__host__ int main(int argc, char** argv)
{
int size = 1024 * 32;
void *x_dev;
cudaMalloc( &x_dev, size * sizeof(x_dev[0]) );
cudaMemcpyToSymbol
( d_size, &size, sizeof(size), 0, cudaMemcpyHostToDevice )
cudaMemcpyToSymbol
( d_x, &x_dev, sizeof(x_dev), 0, cudaMemcpyHostToDevice )
thread_main<<<32,1024>>>();
// Copy data from GPU to CPU.
//
float x[size];
cudaMemcpy( x, x_dev, size * sizeof(x[0]), cudaMemcpyDeviceToHost );
}
/// Shared Address Space
/// Size 48 kiB (Before CC 7.0)
/// Scope
//
// Each block has its own shared address space.
// Shared address space shared by all threads within a block.
// Readable and writeable by threads.
/// Declaration
//
// In global or procedure scope
//
// :Syntax: __shared__ TYPE VARNAME;
// :Sample: __shared__ int my_int_var;
/// Transfer Between CPU and GPU
//
// Not easily accomplished.
/// Local Address Space
/// Size 512 kiB per thread.
/// Scope
//
// Each thread has its own local address space.
/// Declaration
//
// In procedure scope declared without a qualifier:
//
// :Syntax: TYPE VARNAME;
// :Sample: float my_array[20];
/// Implementation <- IMPORTANT
//
// Registers, If Possible
// Some L1 cache, depending on CC.
// L2, device memory.
//
#endif
__device__ int size_d;
__constant__ int size_c;
__shared__ int size_s;
/// Declaration of Kernel (Entry point for code running on GPU.)
//
// Note: the attribute __global__ indicates that the procedure is
// started by a kernel launch. A GPU-only procedure would use the
// attribute __device__ and a CPU-only procedure would use the
// attribute __host__.
//
__global__ void
thread_main(float *x, float *a, float *b)
{
// Variables threadIdx, blockIdx, and blockDim are pre-set.
//
int size = size_c;
// Compute a unique index (number) for this thread.
// This will be used as an array index.
//
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// Array size might not be a multiple of block size.
//
if ( idx >= size ) return;
a[idx] = idx + blockIdx.x;
b[idx] = float(blockIdx.x) / (idx+1);
// Perform Computation
//
x[idx] = a[idx] + b[idx];
}
__host__ int
main(int argc, char** argv)
{
const int SIZE = 100000000;
// Declare host arrays for inputs and output.
//
vector<float> a(SIZE);
vector<float> b(SIZE);
vector<float> x(SIZE);
// Compute size of each array.
//
const int array_size_chars = a.size() * sizeof(a[0]);
// Allocate storage for GPU copy of data.
//
// The address of the allocated storage is returned in the first
// argument, a_dev, etc. The addresses are in GPU global space and
// so they are not necessarily valid on the CPU.
//
void *a_dev, *b_dev, *x_dev;
cudaMalloc( &a_dev, array_size_chars );
cudaMalloc( &b_dev, array_size_chars );
cudaMalloc( &x_dev, array_size_chars );
// Specify Launch Configuration
//
const int db = 64; // Number of threads per block.
// Choose grid size so that there is at least one thread per array
// element.
//
const int dg = (SIZE + db - 1 ) / db;
// Launch Kernel
//
thread_main<<<dg,db>>>(SIZE, x.data(), a.data(), b.data());
// Copy data from GPU to CPU.
//
cudaMemcpy( x.data(), x_dev, array_size_chars, cudaMemcpyDeviceToHost );
printf("Finished with %d elements, element %d is %.5f\n",
SIZE, argc, x[argc]);
cudaFree( a_dev );
cudaFree( b_dev );
cudaFree( x_dev );
}
|
581c85c1555edb7e7fb509b86194be4607120238.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <hip/hip_fp16.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#pragma clang diagnostic push
#pragma ide diagnostic ignored "CannotResolve"
template<typename Gtype, typename Wtype>
__global__ void NesterovRegUpdateAllAndClear(int N,
Gtype* g, Wtype* w, Wtype* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
CUDA_KERNEL_LOOP(i, N) {
Wtype reg = reg_L2 ? w[i] : Wtype((Wtype(0) < w[i]) - (w[i] < Wtype(0)));
Wtype gr = Wtype(g[i]) + reg * local_decay;
Wtype hi = h[i];
Wtype hi_new = h[i] = momentum * hi + local_rate * gr;
gr = (Wtype(1) + momentum) * hi_new - momentum * hi;
w[i] -= gr;
g[i] = clear_grads ? Gtype(0) : Gtype(gr);
}
}
#pragma clang diagnostic pop
template<>
__global__ void NesterovRegUpdateAllAndClear<__half, __half>(int N,
__half* g, __half* w, __half* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
__half hz;
hz.x = 0;
CUDA_KERNEL_LOOP(i, N) {
float gf = __half2float(g[i]);
float wf = __half2float(w[i]);
float hf = __half2float(h[i]);
float reg = reg_L2 ? wf : float((0.F < wf) - (wf < 0.F));
gf += reg * local_decay;
float hf_new = momentum * hf + local_rate * gf; // TODO fix see SGD with momentum
gf = (1. + momentum) * hf_new - momentum * hf;
wf -= gf;
h[i] = float2half_clip(hf_new);
w[i] = float2half_clip(wf);
g[i] = clear_grads ? hz : float2half_clip(gf);
}
}
template<typename Gtype, typename Wtype>
void nesterov_reg_update_and_clear_gpu(int N,
Gtype* g, Wtype* w, Wtype* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
NesterovRegUpdateAllAndClear // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N,
g, w, h,
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void nesterov_reg_update_and_clear_gpu<float16, float16>(int N,
float16* g, float16* w, float16* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
NesterovRegUpdateAllAndClear // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N,
reinterpret_cast<__half*>(g), reinterpret_cast<__half*>(w), reinterpret_cast<__half*>(h),
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template void nesterov_reg_update_and_clear_gpu<float16, float>(int, float16*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float16, double>(int, float16*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float16, float16>(int, float16*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float, float>(int, float*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float, double>(int, float*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float, float16>(int, float*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<double, float>(int, double*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<double, double>(int, double*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<double, float16>(int, double*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
} // namespace caffe
| 581c85c1555edb7e7fb509b86194be4607120238.cu | #include <string>
#include <cuda_fp16.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#pragma clang diagnostic push
#pragma ide diagnostic ignored "CannotResolve"
template<typename Gtype, typename Wtype>
__global__ void NesterovRegUpdateAllAndClear(int N,
Gtype* g, Wtype* w, Wtype* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
CUDA_KERNEL_LOOP(i, N) {
Wtype reg = reg_L2 ? w[i] : Wtype((Wtype(0) < w[i]) - (w[i] < Wtype(0)));
Wtype gr = Wtype(g[i]) + reg * local_decay;
Wtype hi = h[i];
Wtype hi_new = h[i] = momentum * hi + local_rate * gr;
gr = (Wtype(1) + momentum) * hi_new - momentum * hi;
w[i] -= gr;
g[i] = clear_grads ? Gtype(0) : Gtype(gr);
}
}
#pragma clang diagnostic pop
template<>
__global__ void NesterovRegUpdateAllAndClear<__half, __half>(int N,
__half* g, __half* w, __half* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
__half hz;
hz.x = 0;
CUDA_KERNEL_LOOP(i, N) {
float gf = __half2float(g[i]);
float wf = __half2float(w[i]);
float hf = __half2float(h[i]);
float reg = reg_L2 ? wf : float((0.F < wf) - (wf < 0.F));
gf += reg * local_decay;
float hf_new = momentum * hf + local_rate * gf; // TODO fix see SGD with momentum
gf = (1. + momentum) * hf_new - momentum * hf;
wf -= gf;
h[i] = float2half_clip(hf_new);
w[i] = float2half_clip(wf);
g[i] = clear_grads ? hz : float2half_clip(gf);
}
}
template<typename Gtype, typename Wtype>
void nesterov_reg_update_and_clear_gpu(int N,
Gtype* g, Wtype* w, Wtype* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
NesterovRegUpdateAllAndClear // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N,
g, w, h,
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void nesterov_reg_update_and_clear_gpu<float16, float16>(int N,
float16* g, float16* w, float16* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
NesterovRegUpdateAllAndClear // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N,
reinterpret_cast<__half*>(g), reinterpret_cast<__half*>(w), reinterpret_cast<__half*>(h),
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template void nesterov_reg_update_and_clear_gpu<float16, float>(int, float16*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float16, double>(int, float16*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float16, float16>(int, float16*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float, float>(int, float*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float, double>(int, float*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<float, float16>(int, float*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<double, float>(int, double*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<double, double>(int, double*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void nesterov_reg_update_and_clear_gpu<double, float16>(int, double*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
} // namespace caffe
|
8c27e98dc53dfd361cb230c6756052ae2b52a5ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_math.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include <cstdlib>
#include "new_kern.h"
#include "nlist.h"
#include "thrust/reduce.h"
#include "thrust/device_ptr.h"
#include "thrust/functional.h"
extern __constant__ NewParams nparams;
template<class O>
__global__ void funcNListK(uint* nlist, // o:neighbor list
uint* num_neigh,// o:num neighbors
const float4* dpos, // i: position
const uint* phash,
const uint* cellStart,
const uint* cellEnd,
const uint* cellAdj,
const uint max_neigh,
O op)
{
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= nparams.N)
return;
float4 pos1 = dpos[idx];
float3 p1 = make_float3(pos1);
float rad1 = pos1.w;
uint hash = phash[idx];
uint n_neigh = 0;
for(uint i = 0; i < nparams.numAdjCells; i++)
{
//uint nhash = cellAdj[i*nparams.numCells + hash];
uint nhash = cellAdj[i + hash*nparams.numAdjCells];
uint cstart = cellStart[nhash];
if(cstart == 0xffffffff)//if cell empty, skip cell
continue;
uint cend = cellEnd[nhash];
for(uint idx2 = cstart; idx2 < cend; idx2++){
if(idx == idx2)//if self interacting, skip
continue;
float4 pos2 = dpos[idx2];
float3 p2 = make_float3(pos2);
float rad2 = pos2.w;
float3 dr = p1 - p2;
dr.x = dr.x - nparams.L.x*rintf(dr.x*nparams.Linv.x);
dr.z = dr.z - nparams.L.z*rintf(dr.z*nparams.Linv.z);
float lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
dr = dr*rsqrtf(lsq);
if (op(rad1,rad2,dr,lsq)){
if(n_neigh < max_neigh){
nlist[nparams.N*n_neigh + idx] = idx2;
}
n_neigh++;
}
}
}
num_neigh[idx] = n_neigh;
}
//uses an adjacency definition based on max_dist_m*(rad1 + rad2)
//Note: this func modifies nlist and max_neigh
//pass in a functor of type NListDistCond
//doesn't use moment data
template<class O>
uint funcNList(uint*& nlist, //reference to the nlist pointer
uint* num_neigh,
const float* dpos,
const uint* phash,
const uint* cellStart,
const uint* cellEnd,
const uint* cellAdj,
const uint numParticles,
uint& max_neigh,
O op)
{
uint numThreads = 192;
uint numBlocks = iDivUp(numParticles, numThreads);
hipFuncSetCacheConfig(funcNListK<O>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( funcNListK), dim3(numBlocks), dim3(numThreads), 0, 0, nlist, num_neigh, (float4*) dpos, phash,
cellStart, cellEnd, cellAdj, max_neigh, op);
thrust::maximum<uint> mx;
thrust::device_ptr<uint> numneigh_ptr(num_neigh);
uint maxn = thrust::reduce(numneigh_ptr, numneigh_ptr+numParticles, 0, mx);
if(maxn > max_neigh) {
printf("Extending NList from %u to %u\n", max_neigh, maxn);
hipFree(nlist);
assert(hipMalloc((void**)&nlist, numParticles*maxn*sizeof(uint)) == hipSuccess);
hipMemset(nlist, 0, numParticles*maxn*sizeof(uint));
max_neigh = maxn;
hipLaunchKernelGGL(( funcNListK), dim3(numBlocks), dim3(numThreads), 0, 0, nlist, num_neigh, (float4*) dpos, phash,
cellStart, cellEnd, cellAdj, max_neigh, op);
}
// getLastCudaError("funcNList");
return maxn;
}
//instantiate various implementations for cross compiling
template uint funcNList<VertCond>(uint*& nlist, uint* num_neigh,
const float* dpos, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, VertCond op);
template uint funcNList<OutOfPlane>(uint*& nlist, uint* num_neigh,
const float* dpos, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, OutOfPlane op);
template uint funcNList<VarCond>(uint*& nlist, uint* num_neigh,
const float* dpos, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, VarCond op);
template<class O>
__global__ void momNListK(uint* nlist, // o:neighbor list
uint* num_neigh,// o:num neighbors
const float4* dpos, // i: position
const float4* dmom,
const uint* phash,
const uint* cellStart,
const uint* cellEnd,
const uint* cellAdj,
const uint max_neigh,
O op)
{
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= nparams.N)
return;
float4 pos1 = dpos[idx];
float3 p1 = make_float3(pos1);
float rad1 = pos1.w;
float Cp1 = dmom[idx].w;
uint hash = phash[idx];
uint n_neigh = 0;
for(uint i = 0; i < nparams.numAdjCells; i++)
{
//uint nhash = cellAdj[i*nparams.numCells + hash];
uint nhash = cellAdj[i + hash*nparams.numAdjCells];
uint cstart = cellStart[nhash];
if(cstart == 0xffffffff)//if cell empty, skip cell
continue;
uint cend = cellEnd[nhash];
for(uint idx2 = cstart; idx2 < cend; idx2++){
if(idx == idx2)//if self interacting, skip
continue;
float4 pos2 = dpos[idx2];
float3 p2 = make_float3(pos2);
float rad2 = pos2.w;
float Cp2 = dmom[idx2].w;
float3 dr = p1 - p2;
dr.x = dr.x - nparams.L.x*rintf(dr.x*nparams.Linv.x);
dr.z = dr.z - nparams.L.z*rintf(dr.z*nparams.Linv.z);
float lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
dr = dr*rsqrtf(lsq);
if (op(rad1,rad2,dr,lsq,Cp1,Cp2)){
if(n_neigh < max_neigh){
nlist[nparams.N*n_neigh + idx] = idx2;
}
n_neigh++;
}
}
}
num_neigh[idx] = n_neigh;
}
template<class O>
uint momNList(uint*& nlist, //reference to the nlist pointer
uint* num_neigh,
const float* dpos,
const float* dmom,
const uint* phash,
const uint* cellStart,
const uint* cellEnd,
const uint* cellAdj,
const uint numParticles,
uint& max_neigh,
O op)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
hipFuncSetCacheConfig(momNListK<O>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( momNListK), dim3(numBlocks), dim3(numThreads), 0, 0, nlist, num_neigh, (float4*) dpos, (float4*) dmom,
phash, cellStart, cellEnd, cellAdj, max_neigh, op);
thrust::maximum<uint> mx;
thrust::device_ptr<uint> numneigh_ptr(num_neigh);
uint maxn = thrust::reduce(numneigh_ptr, numneigh_ptr+numParticles, 0, mx);
if(maxn > max_neigh) {
printf("Extending NList from %u to %u\n", max_neigh, maxn);
hipFree(nlist);
assert(hipMalloc((void**)&nlist, numParticles*maxn*sizeof(uint)) == hipSuccess);
hipMemset(nlist, 0, numParticles*maxn*sizeof(uint));
max_neigh = maxn;
hipLaunchKernelGGL(( momNListK), dim3(numBlocks), dim3(numThreads), 0, 0, nlist, num_neigh, (float4*) dpos,
(float4*) dmom, phash, cellStart, cellEnd, cellAdj, max_neigh, op);
}
getLastCudaError("momNList");
return maxn;
}
template uint momNList<MomVar>(uint*& nlist, uint* num_neigh, const float* dpos,
const float* dmom, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, MomVar op);
template uint momNList<MomCut>(uint*& nlist, uint* num_neigh, const float* dpos,
const float* dmom, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, MomCut op);
| 8c27e98dc53dfd361cb230c6756052ae2b52a5ce.cu | #include <cuda_runtime.h>
#include <helper_math.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include <cstdlib>
#include "new_kern.h"
#include "nlist.h"
#include "thrust/reduce.h"
#include "thrust/device_ptr.h"
#include "thrust/functional.h"
extern __constant__ NewParams nparams;
template<class O>
__global__ void funcNListK(uint* nlist, // o:neighbor list
uint* num_neigh,// o:num neighbors
const float4* dpos, // i: position
const uint* phash,
const uint* cellStart,
const uint* cellEnd,
const uint* cellAdj,
const uint max_neigh,
O op)
{
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= nparams.N)
return;
float4 pos1 = dpos[idx];
float3 p1 = make_float3(pos1);
float rad1 = pos1.w;
uint hash = phash[idx];
uint n_neigh = 0;
for(uint i = 0; i < nparams.numAdjCells; i++)
{
//uint nhash = cellAdj[i*nparams.numCells + hash];
uint nhash = cellAdj[i + hash*nparams.numAdjCells];
uint cstart = cellStart[nhash];
if(cstart == 0xffffffff)//if cell empty, skip cell
continue;
uint cend = cellEnd[nhash];
for(uint idx2 = cstart; idx2 < cend; idx2++){
if(idx == idx2)//if self interacting, skip
continue;
float4 pos2 = dpos[idx2];
float3 p2 = make_float3(pos2);
float rad2 = pos2.w;
float3 dr = p1 - p2;
dr.x = dr.x - nparams.L.x*rintf(dr.x*nparams.Linv.x);
dr.z = dr.z - nparams.L.z*rintf(dr.z*nparams.Linv.z);
float lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
dr = dr*rsqrtf(lsq);
if (op(rad1,rad2,dr,lsq)){
if(n_neigh < max_neigh){
nlist[nparams.N*n_neigh + idx] = idx2;
}
n_neigh++;
}
}
}
num_neigh[idx] = n_neigh;
}
//uses an adjacency definition based on max_dist_m*(rad1 + rad2)
//Note: this func modifies nlist and max_neigh
//pass in a functor of type NListDistCond
//doesn't use moment data
template<class O>
uint funcNList(uint*& nlist, //reference to the nlist pointer
uint* num_neigh,
const float* dpos,
const uint* phash,
const uint* cellStart,
const uint* cellEnd,
const uint* cellAdj,
const uint numParticles,
uint& max_neigh,
O op)
{
uint numThreads = 192;
uint numBlocks = iDivUp(numParticles, numThreads);
cudaFuncSetCacheConfig(funcNListK<O>, cudaFuncCachePreferL1);
funcNListK<<<numBlocks, numThreads>>>(nlist, num_neigh, (float4*) dpos, phash,
cellStart, cellEnd, cellAdj, max_neigh, op);
thrust::maximum<uint> mx;
thrust::device_ptr<uint> numneigh_ptr(num_neigh);
uint maxn = thrust::reduce(numneigh_ptr, numneigh_ptr+numParticles, 0, mx);
if(maxn > max_neigh) {
printf("Extending NList from %u to %u\n", max_neigh, maxn);
cudaFree(nlist);
assert(cudaMalloc((void**)&nlist, numParticles*maxn*sizeof(uint)) == cudaSuccess);
cudaMemset(nlist, 0, numParticles*maxn*sizeof(uint));
max_neigh = maxn;
funcNListK<<<numBlocks, numThreads>>>(nlist, num_neigh, (float4*) dpos, phash,
cellStart, cellEnd, cellAdj, max_neigh, op);
}
// getLastCudaError("funcNList");
return maxn;
}
//instantiate various implementations for cross compiling
template uint funcNList<VertCond>(uint*& nlist, uint* num_neigh,
const float* dpos, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, VertCond op);
template uint funcNList<OutOfPlane>(uint*& nlist, uint* num_neigh,
const float* dpos, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, OutOfPlane op);
template uint funcNList<VarCond>(uint*& nlist, uint* num_neigh,
const float* dpos, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, VarCond op);
template<class O>
__global__ void momNListK(uint* nlist, // o:neighbor list
uint* num_neigh,// o:num neighbors
const float4* dpos, // i: position
const float4* dmom,
const uint* phash,
const uint* cellStart,
const uint* cellEnd,
const uint* cellAdj,
const uint max_neigh,
O op)
{
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= nparams.N)
return;
float4 pos1 = dpos[idx];
float3 p1 = make_float3(pos1);
float rad1 = pos1.w;
float Cp1 = dmom[idx].w;
uint hash = phash[idx];
uint n_neigh = 0;
for(uint i = 0; i < nparams.numAdjCells; i++)
{
//uint nhash = cellAdj[i*nparams.numCells + hash];
uint nhash = cellAdj[i + hash*nparams.numAdjCells];
uint cstart = cellStart[nhash];
if(cstart == 0xffffffff)//if cell empty, skip cell
continue;
uint cend = cellEnd[nhash];
for(uint idx2 = cstart; idx2 < cend; idx2++){
if(idx == idx2)//if self interacting, skip
continue;
float4 pos2 = dpos[idx2];
float3 p2 = make_float3(pos2);
float rad2 = pos2.w;
float Cp2 = dmom[idx2].w;
float3 dr = p1 - p2;
dr.x = dr.x - nparams.L.x*rintf(dr.x*nparams.Linv.x);
dr.z = dr.z - nparams.L.z*rintf(dr.z*nparams.Linv.z);
float lsq = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
dr = dr*rsqrtf(lsq);
if (op(rad1,rad2,dr,lsq,Cp1,Cp2)){
if(n_neigh < max_neigh){
nlist[nparams.N*n_neigh + idx] = idx2;
}
n_neigh++;
}
}
}
num_neigh[idx] = n_neigh;
}
template<class O>
uint momNList(uint*& nlist, //reference to the nlist pointer
uint* num_neigh,
const float* dpos,
const float* dmom,
const uint* phash,
const uint* cellStart,
const uint* cellEnd,
const uint* cellAdj,
const uint numParticles,
uint& max_neigh,
O op)
{
uint numThreads = 128;
uint numBlocks = iDivUp(numParticles, numThreads);
cudaFuncSetCacheConfig(momNListK<O>, cudaFuncCachePreferL1);
momNListK<<<numBlocks, numThreads>>>(nlist, num_neigh, (float4*) dpos, (float4*) dmom,
phash, cellStart, cellEnd, cellAdj, max_neigh, op);
thrust::maximum<uint> mx;
thrust::device_ptr<uint> numneigh_ptr(num_neigh);
uint maxn = thrust::reduce(numneigh_ptr, numneigh_ptr+numParticles, 0, mx);
if(maxn > max_neigh) {
printf("Extending NList from %u to %u\n", max_neigh, maxn);
cudaFree(nlist);
assert(cudaMalloc((void**)&nlist, numParticles*maxn*sizeof(uint)) == cudaSuccess);
cudaMemset(nlist, 0, numParticles*maxn*sizeof(uint));
max_neigh = maxn;
momNListK<<<numBlocks, numThreads>>>(nlist, num_neigh, (float4*) dpos,
(float4*) dmom, phash, cellStart, cellEnd, cellAdj, max_neigh, op);
}
getLastCudaError("momNList");
return maxn;
}
template uint momNList<MomVar>(uint*& nlist, uint* num_neigh, const float* dpos,
const float* dmom, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, MomVar op);
template uint momNList<MomCut>(uint*& nlist, uint* num_neigh, const float* dpos,
const float* dmom, const uint* phash, const uint* cellStart,
const uint* cellEnd, const uint* cellAdj, const uint numParticles,
uint& max_neigh, MomCut op);
|
a89a1066f6753c64b9c8aabc672f5c513f7681f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void update_old( float4 *__restrict__ newPos, float4 *__restrict__ oldPos )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
oldPos[index] = newPos[index];
} | a89a1066f6753c64b9c8aabc672f5c513f7681f0.cu | #include "includes.h"
__global__ void update_old( float4 *__restrict__ newPos, float4 *__restrict__ oldPos )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
oldPos[index] = newPos[index];
} |
b725894836ea174b49fb6086c638f20eed784a8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/gpu_anim.h"
#define DIM 1024
__global__ void kernel( uchar4 *ptr, int ticks ) {
// map from threadIdx/blockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f));
ptr[offset].x = grey;
ptr[offset].y = grey;
ptr[offset].z = grey;
ptr[offset].w = 255;
}
void generate_frame( uchar4 *pixels, void*, int ticks ) {
dim3 grids( DIM / 16, DIM / 16 );
dim3 threads( 16, 16 );
hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, pixels, ticks );
}
struct GPUAnimBitmap {
GLunit bufferObj;
cudaGraphicsResource *resource;
int width, height;
void *dataBlock;
void (*fAnim)(uchar4*, void*, int);
void (*animExit)(void*);
void (*clickDrag)(void*, int, int, int, int);
int dragStartX, dragStartY;
}
GPUAnimBitmap( int w, int h, void* d ) {
width = w;
height = h;
dataBlock = d;
clickDrag = NULL;
// first, find a CUDA device and set it to graphic interop
hipDeviceProp_t prop;
int dev;
memset( &prop, 0, sizeof( hipDeviceProp_t ) );
prop.major = 1;
prop.minor = 0;
HANDLE_ERROR( hipChooseDevice( &dev, &prop ) );
hipGLSetGLDevice( dev );
int c = 1;
char *foo = "name";
glutInit( &c, &foo );
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( width, height );
glutCreateWindow( "bitmap" );
glGenBuffers( 1, &bufferObj );
glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj );
glBufferData( GL_PIXEL_UNPACK_BUFFER_ARB, width * height * 4, NULL, GL_DYNAMIC_DRAW_ARB );
HANDLE_ERROR( cudaGraphicsRegisterBuffer( &resource, bufferObj, hipGraphicsMapFlagsNone ) );
}
// static method used for GLUT callbacks
static void idle_func( void ) {
static int ticks = 1;
GPUAnimBitmap* bitmap = *(get_bitmap_ptr());
uchar4* devPtr;
size_t size;
HANDLE_ERROR( hipGraphicsMapResources( 1, &(bitmap -> resource), NULL ) );
HANDLE_ERROR( hipGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, bitmap -> resource ) );
bitmap -> fAnim( devPtr, bitmap -> dataBlock, ticks++ );
HANDLE_ERROR( hipGraphicsUnmapResources( 1, &(bitmap -> resource), NULL ) );
glutPostRedisplay();
}
int main( void ) {
GPUAnimBitmap bitmap( DIM, DIM, NULL );
bitmap.anim_and_exit( (void (*)(uchar4*, void*, int))generate_frame, NULL );
} | b725894836ea174b49fb6086c638f20eed784a8e.cu | #include "../common/book.h"
#include "../common/gpu_anim.h"
#define DIM 1024
__global__ void kernel( uchar4 *ptr, int ticks ) {
// map from threadIdx/blockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f));
ptr[offset].x = grey;
ptr[offset].y = grey;
ptr[offset].z = grey;
ptr[offset].w = 255;
}
void generate_frame( uchar4 *pixels, void*, int ticks ) {
dim3 grids( DIM / 16, DIM / 16 );
dim3 threads( 16, 16 );
kernel<<<grids, threads>>>( pixels, ticks );
}
struct GPUAnimBitmap {
GLunit bufferObj;
cudaGraphicsResource *resource;
int width, height;
void *dataBlock;
void (*fAnim)(uchar4*, void*, int);
void (*animExit)(void*);
void (*clickDrag)(void*, int, int, int, int);
int dragStartX, dragStartY;
}
GPUAnimBitmap( int w, int h, void* d ) {
width = w;
height = h;
dataBlock = d;
clickDrag = NULL;
// first, find a CUDA device and set it to graphic interop
cudaDeviceProp prop;
int dev;
memset( &prop, 0, sizeof( cudaDeviceProp ) );
prop.major = 1;
prop.minor = 0;
HANDLE_ERROR( cudaChooseDevice( &dev, &prop ) );
cudaGLSetGLDevice( dev );
int c = 1;
char *foo = "name";
glutInit( &c, &foo );
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( width, height );
glutCreateWindow( "bitmap" );
glGenBuffers( 1, &bufferObj );
glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj );
glBufferData( GL_PIXEL_UNPACK_BUFFER_ARB, width * height * 4, NULL, GL_DYNAMIC_DRAW_ARB );
HANDLE_ERROR( cudaGraphicsRegisterBuffer( &resource, bufferObj, cudaGraphicsMapFlagsNone ) );
}
// static method used for GLUT callbacks
static void idle_func( void ) {
static int ticks = 1;
GPUAnimBitmap* bitmap = *(get_bitmap_ptr());
uchar4* devPtr;
size_t size;
HANDLE_ERROR( cudaGraphicsMapResources( 1, &(bitmap -> resource), NULL ) );
HANDLE_ERROR( cudaGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, bitmap -> resource ) );
bitmap -> fAnim( devPtr, bitmap -> dataBlock, ticks++ );
HANDLE_ERROR( cudaGraphicsUnmapResources( 1, &(bitmap -> resource), NULL ) );
glutPostRedisplay();
}
int main( void ) {
GPUAnimBitmap bitmap( DIM, DIM, NULL );
bitmap.anim_and_exit( (void (*)(uchar4*, void*, int))generate_frame, NULL );
} |
02983aa5096d92e751c299330aec61c3c2c35e88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "device_funcs.cuh"
#include <helper_math.h>
#define TX_2D 32
#define TY_2D 32
#define TX 8
#define TY 8
#define TZ 8
#define NUMSTEPS 20
__global__ void renderKernel(uchar4 *d_out, float *d_vol, int w, int h,
int3 volSize, int method, float zs, float theta, float threshold, float dist) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r * w;
if ((c >= w) || (r >= h)) return;
const uchar4 background = { 64, 0, 128, 0 };
float3 source = { 0.f,0.f,-zs };
float3 pix = scrIdxToPos(c, r, w, h, 2 * volSize.z - zs);
//apply viewing transformation: here rotate about y_axis
source = yRotate(source, theta);
pix = yRotate(pix, theta);
//prepare inputs for ray-box intersection
float t0, t1;
const Ray pixRay = { source, pix - source };
float3 center = { volSize.x / 2.f, volSize.y / 2.f, volSize.z / 2.f };
const float3 boxmin = -center;
const float3 boxmax = { volSize.x - center.x, volSize.y - center.y, volSize.z - center.z };
//perform ray_box intersection test
const bool hitBox = intersectBox(pixRay, boxmin, boxmax, &t0, &t1);
uchar4 shade;
if (!hitBox)
shade = background; //miss box => background color
else {
if (t0 < 0.0f)
t0 = 0.f; //clamp to 0 to avoid looking backward
// bounded by points where the ray enters and leaves the box
const Ray boxRay = { paramRay(pixRay,t0), paramRay(pixRay,t1) - paramRay(pixRay,t0) };
if (method == 1)
shade = sliceShader(d_vol, volSize, boxRay, threshold, dist, source);
else if (method == 2)
shade = rayCastShader(d_vol, volSize, boxRay, threshold);
else
shade = volumeRenderShader(d_vol, volSize, boxRay, threshold, NUMSTEPS);
}
d_out[i] = shade;
}
__global__ void volumeKernel(float *d_vol, int3 volSize, int id, float4 params) {
const int w = volSize.x, h = volSize.y, d = volSize.z;
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int s = blockIdx.z*blockDim.z + threadIdx.z;
const int i = c + r * w + s * w * h;
if ((c >= w) || (r >= h) || (s >= d)) return;
d_vol[i] = func(c, r, s, id, volSize, params);
}
void kernelLauncher(uchar4 * d_out, float *d_vol, int w, int h,
int3 volSize, int method, int zs, float theta, float threshold, float dist) {
dim3 blockSize(TX_2D, TY_2D);
dim3 gridSize(divUp(w, TX_2D), divUp(h, TY_2D));
renderKernel << <gridSize, blockSize >> > (d_out, d_vol, w, h, volSize, method, zs, theta, threshold, dist);
}
void volumeKernelLauncher(float *d_vol, int3 volSize, int id, float4 params) {
dim3 blockSize(TX, TY, TZ);
dim3 gridSize(divUp(volSize.x, TX), divUp(volSize.y, TY), divUp(volSize.z, TZ));
volumeKernel << <gridSize, blockSize >> > (d_vol, volSize, id, params);
} | 02983aa5096d92e751c299330aec61c3c2c35e88.cu | #include "kernel.h"
#include "device_funcs.cuh"
#include <helper_math.h>
#define TX_2D 32
#define TY_2D 32
#define TX 8
#define TY 8
#define TZ 8
#define NUMSTEPS 20
__global__ void renderKernel(uchar4 *d_out, float *d_vol, int w, int h,
int3 volSize, int method, float zs, float theta, float threshold, float dist) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r * w;
if ((c >= w) || (r >= h)) return;
const uchar4 background = { 64, 0, 128, 0 };
float3 source = { 0.f,0.f,-zs };
float3 pix = scrIdxToPos(c, r, w, h, 2 * volSize.z - zs);
//apply viewing transformation: here rotate about y_axis
source = yRotate(source, theta);
pix = yRotate(pix, theta);
//prepare inputs for ray-box intersection
float t0, t1;
const Ray pixRay = { source, pix - source };
float3 center = { volSize.x / 2.f, volSize.y / 2.f, volSize.z / 2.f };
const float3 boxmin = -center;
const float3 boxmax = { volSize.x - center.x, volSize.y - center.y, volSize.z - center.z };
//perform ray_box intersection test
const bool hitBox = intersectBox(pixRay, boxmin, boxmax, &t0, &t1);
uchar4 shade;
if (!hitBox)
shade = background; //miss box => background color
else {
if (t0 < 0.0f)
t0 = 0.f; //clamp to 0 to avoid looking backward
// bounded by points where the ray enters and leaves the box
const Ray boxRay = { paramRay(pixRay,t0), paramRay(pixRay,t1) - paramRay(pixRay,t0) };
if (method == 1)
shade = sliceShader(d_vol, volSize, boxRay, threshold, dist, source);
else if (method == 2)
shade = rayCastShader(d_vol, volSize, boxRay, threshold);
else
shade = volumeRenderShader(d_vol, volSize, boxRay, threshold, NUMSTEPS);
}
d_out[i] = shade;
}
__global__ void volumeKernel(float *d_vol, int3 volSize, int id, float4 params) {
const int w = volSize.x, h = volSize.y, d = volSize.z;
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int s = blockIdx.z*blockDim.z + threadIdx.z;
const int i = c + r * w + s * w * h;
if ((c >= w) || (r >= h) || (s >= d)) return;
d_vol[i] = func(c, r, s, id, volSize, params);
}
void kernelLauncher(uchar4 * d_out, float *d_vol, int w, int h,
int3 volSize, int method, int zs, float theta, float threshold, float dist) {
dim3 blockSize(TX_2D, TY_2D);
dim3 gridSize(divUp(w, TX_2D), divUp(h, TY_2D));
renderKernel << <gridSize, blockSize >> > (d_out, d_vol, w, h, volSize, method, zs, theta, threshold, dist);
}
void volumeKernelLauncher(float *d_vol, int3 volSize, int id, float4 params) {
dim3 blockSize(TX, TY, TZ);
dim3 gridSize(divUp(volSize.x, TX), divUp(volSize.y, TY), divUp(volSize.z, TZ));
volumeKernel << <gridSize, blockSize >> > (d_vol, volSize, id, params);
} |
434dfccd306bfbdd1d115d5198b9c23538f28f1d.hip | // !!! This is a file automatically generated by hipify!!!
/*------------------------------------------------------------------------
CUDA C extension for Python
Provides functionality for forward and back projection in PET image
reconstruction.
author: Pawel Markiewicz
Copyrights: 2018
------------------------------------------------------------------------*/
#include <Python.h>
#include <stdlib.h>
#include <numpy/arrayobject.h>
#include "def.h"
#include "prjf.h"
#include "prjb.h"
#include "recon.h"
#include "scanner_0.h"
//--- Docstrings
static char module_docstring[] =
"This module provides an interface for GPU routines of forward and back projection.";
static char fprj_docstring[] =
"Forward projector for PET system.";
static char bprj_docstring[] =
"Back projector for PET system.";
static char osem_docstring[] =
"OSEM reconstruction of PET data.";
//---
//--- Available functions
static PyObject *frwd_prj(PyObject *self, PyObject *args);
static PyObject *back_prj(PyObject *self, PyObject *args);
static PyObject *osem_rec(PyObject *self, PyObject *args);
/* Module specification */
static PyMethodDef module_methods[] = {
{ "fprj", frwd_prj, METH_VARARGS, fprj_docstring },
{ "bprj", back_prj, METH_VARARGS, bprj_docstring },
{ "osem", osem_rec, METH_VARARGS, osem_docstring },
{ NULL, NULL, 0, NULL }
};
//---
//--- Initialize the module
PyMODINIT_FUNC initpetprj(void) //it HAS to be init______ and then the name of the shared lib.
{
PyObject *m = Py_InitModule3("petprj", module_methods, module_docstring);
if (m == NULL)
return;
/* Load NumPy functionality. */
import_array();
}
//---
//=======================
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//==============================================================================
// F O R W A R D P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *frwd_prj(PyObject *self, PyObject *args)
{
//Structure of constants
Cnst Cnt;
//Dictionary of scanner constants
PyObject * o_mmrcnst;
// axial LUT dictionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject * o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject * o_txLUT;
// input image to be forward projected (reshaped for GPU execution)
PyObject * o_im;
// subsets for OSEM, first the default
PyObject * o_subs;
//output projection sino
PyObject * o_prjout;
//flag for attenuation factors to be found based on mu-map; if 0 normal emission projection is used
int att;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOOOi", &o_prjout, &o_im, &o_txLUT, &o_axLUT, &o_subs, &o_mmrcnst, &att))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject* pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyInt_AS_LONG(pd_span);
PyObject* pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
Cnt.RNG_STRT = (char)PyInt_AS_LONG(pd_rngstrt);
PyObject* pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_END = (char)PyInt_AS_LONG(pd_rngend);
PyObject* pd_verbose = PyDict_GetItemString(o_mmrcnst, "VERBOSE");
Cnt.VERBOSE = (bool)PyInt_AS_LONG(pd_verbose);
PyObject* pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyInt_AS_LONG(pd_devid);
/* Interpret the input objects as numpy arrays. */
//axial LUTs:
PyObject* pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject* pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject* pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject* pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject* pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
//trasaxial sino LUTs:
PyObject* pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject* pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
PyObject* pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
//-- get the arrays from the dictionaries
//axLUTs
PyObject *p_li2rno = PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2sn = PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2sn1 = PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2nos = PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2rng = PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_IN_ARRAY);
//2D sino index LUT:
PyObject *p_aw2ali = PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_IN_ARRAY);
//sino to crystal, crystals
PyObject *p_s2c = PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_crs = PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_IN_ARRAY);
//image object
PyObject *p_im = PyArray_FROM_OTF(o_im, NPY_FLOAT32, NPY_IN_ARRAY);
//subsets if using e.g. OSEM
PyObject *p_subs = PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_IN_ARRAY);
//output sino object
PyObject *p_prjout = PyArray_FROM_OTF(o_prjout, NPY_FLOAT32, NPY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_aw2ali == NULL || p_s2c == NULL || p_im == NULL || p_crs == NULL ||
p_subs == NULL || p_prjout == NULL || p_li2rng == NULL)
{
//axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_li2rng);
//2D sino LUT
Py_XDECREF(p_aw2ali);
//sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
//image object
Py_XDECREF(p_im);
//subset definition object
Py_XDECREF(p_subs);
//output sino object
Py_XDECREF(p_prjout);
return NULL;
}
int *subs_ = (int*)PyArray_DATA(p_subs);
short *s2c = (short*)PyArray_DATA(p_s2c);
int *aw2ali = (int*)PyArray_DATA(p_aw2ali);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short*)PyArray_DATA(p_li2sn);
}
else if (Cnt.SPN == 1) {
li2sn = (short*)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char*)PyArray_DATA(p_li2nos);
float *li2rng = (float*)PyArray_DATA(p_li2rng);
float *crs = (float*)PyArray_DATA(p_crs);
float *im = (float*)PyArray_DATA(p_im);
if (Cnt.VERBOSE == 1)
printf("ic> fwd-prj image dimensions: %d, %d, %d\n", PyArray_DIM(p_im, 0), PyArray_DIM(p_im, 1), PyArray_DIM(p_im, 2));
int Nprj = PyArray_DIM(p_subs, 0);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
int Naw = PyArray_DIM(p_aw2ali, 0);
if (Cnt.VERBOSE == 1)
printf("\nic> N0crs=%d, N1crs=%d, Naw=%d, Nprj=%d\n", N0crs, N1crs, Naw, Nprj);
int *subs;
if (subs_[0] == -1) {
Nprj = AW;
if (Cnt.VERBOSE == 1)
printf("ic> no subsets defined. number of projection bins in 2D: %d\n", Nprj);
// all projections in
subs = (int*)malloc(Nprj * sizeof(int));
for (int i = 0; i<Nprj; i++) {
subs[i] = i;
}
}
else {
if (Cnt.VERBOSE == 1)
printf("ic> subsets defined. number of subset projection bins in 2D: %d\n", Nprj);
subs = subs_;
}
// output projection sinogram
float *prjout = (float*)PyArray_DATA(p_prjout);
// sets the device on which to calculate
hipSetDevice(Cnt.DEVID);
//<><><><><><><<><><><><><><><><><><><><><><><><><<><><><><><><><><><><><><><><><><><><<><><><><><><><><><><>
gpu_fprj(prjout, im,
li2rng, li2sn, li2nos,
s2c, aw2ali, crs, subs,
Nprj, Naw, N0crs, N1crs, Cnt, att);
//<><><><><><><><<><><><><><><><><><><><><><><><><<><><><><><><><><><><><><><><><><><><<><><><><><><><><><><>
//Clean up
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_aw2ali);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_DECREF(p_im);
Py_DECREF(p_subs);
Py_DECREF(p_prjout);
if (subs_[0] == -1) free(subs);
Py_INCREF(Py_None);
return Py_None;
}
//==============================================================================
// B A C K P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *back_prj(PyObject *self, PyObject *args)
{
//Structure of constants
Cnst Cnt;
//Dictionary of scanner constants
PyObject * o_mmrcnst;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject * o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject * o_txLUT;
// sino to be back projected to image (both reshaped for GPU execution)
PyObject * o_sino;
// subsets for OSEM, first the default
PyObject * o_subs;
//output backprojected image
PyObject * o_bimg;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOOO", &o_bimg, &o_sino, &o_txLUT, &o_axLUT, &o_subs, &o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject* pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyInt_AS_LONG(pd_span);
PyObject* pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
Cnt.RNG_STRT = (char)PyInt_AS_LONG(pd_rngstrt);
PyObject* pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_END = (char)PyInt_AS_LONG(pd_rngend);
PyObject* pd_verbose = PyDict_GetItemString(o_mmrcnst, "VERBOSE");
Cnt.VERBOSE = (bool)PyInt_AS_LONG(pd_verbose);
PyObject* pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyInt_AS_LONG(pd_devid);
/* Interpret the input objects as numpy arrays. */
//axial LUTs:
PyObject* pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject* pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject* pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject* pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject* pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
//trasaxial sino LUTs:
PyObject* pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject* pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
PyObject* pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
//-- get the arrays from the dictionaries
//axLUTs
PyObject *p_li2rno = PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2sn = PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2sn1 = PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2nos = PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2rng = PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_IN_ARRAY);
//2D sino index LUT:
PyObject *p_aw2ali = PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_IN_ARRAY);
//sino to crystal, crystals
PyObject *p_s2c = PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_crs = PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_IN_ARRAY);
//sino object
PyObject *p_sino = PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_IN_ARRAY);
//subset definition
PyObject *p_subs = PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_IN_ARRAY);
//output backprojection iamge
PyObject *p_bim = PyArray_FROM_OTF(o_bimg, NPY_FLOAT32, NPY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_aw2ali == NULL || p_s2c == NULL || p_sino == NULL || p_crs == NULL ||
p_subs == NULL || p_bim == NULL || p_li2rng == NULL)
{
//axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_li2rng);
//2D sino LUT
Py_XDECREF(p_aw2ali);
//sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
//sino object
Py_XDECREF(p_sino);
//subsets
Py_XDECREF(p_subs);
//backprojection image
Py_XDECREF(p_bim);
return NULL;
}
int *subs_ = (int*)PyArray_DATA(p_subs);
short *s2c = (short*)PyArray_DATA(p_s2c);
int *aw2ali = (int*)PyArray_DATA(p_aw2ali);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short*)PyArray_DATA(p_li2sn);
}
else if (Cnt.SPN == 1) {
li2sn = (short*)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char*)PyArray_DATA(p_li2nos);
float *li2rng = (float*)PyArray_DATA(p_li2rng);
float *crs = (float*)PyArray_DATA(p_crs);
float *sino = (float*)PyArray_DATA(p_sino);
int Nprj = PyArray_DIM(p_subs, 0);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
int Naw = PyArray_DIM(p_aw2ali, 0);
int *subs;
if (subs_[0] == -1) {
Nprj = AW;
if (Cnt.VERBOSE == 1)
printf("\nic> no subsets defined. number of projection bins in 2D: %d\n", Nprj);
// all projections in
subs = (int*)malloc(Nprj * sizeof(int));
for (int i = 0; i<Nprj; i++) {
subs[i] = i;
}
}
else {
if (Cnt.VERBOSE == 1)
printf("\nic> subsets defined. number of subset projection bins in 2D: %d\n", Nprj);
subs = subs_;
}
float *bimg = (float*)PyArray_DATA(p_bim);
if (Cnt.VERBOSE == 1)
printf("ic> bck-prj image dimensions: %d, %d, %d\n", PyArray_DIM(p_bim, 0), PyArray_DIM(p_bim, 1), PyArray_DIM(p_bim, 2));
// sets the device on which to calculate
hipSetDevice(Cnt.DEVID);
//<><><<><><><><><><><><><><><><><><><><><<><><><><<><><><><><><><><><><><><><><><><><<><><><><><><>
gpu_bprj(bimg, sino, li2rng, li2sn, li2nos, s2c, aw2ali, crs, subs, Nprj, Naw, N0crs, N1crs, Cnt);
//<><><><><><><><><><><>><><><><><><><><><<><><><><<><><><><><><><><><><><><><><><><><<><><><><><><>
//Clean up
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_aw2ali);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_DECREF(p_sino);
Py_DECREF(p_subs);
Py_DECREF(p_bim);
if (subs_[0] == -1) free(subs);
Py_INCREF(Py_None);
return Py_None;
}
//==============================================================================
// O S E M R E C O N S T R U C T I O N
//------------------------------------------------------------------------------
static PyObject *osem_rec(PyObject *self, PyObject *args)
{
//Structure of constants
Cnst Cnt;
//output image
PyObject * o_imgout;
//output image mask
PyObject * o_rcnmsk;
//Dictionary of scanner constants
PyObject * o_mmrcnst;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject * o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject * o_txLUT;
// subsets for OSEM, first the default
PyObject * o_subs;
// sinos using in reconstruction (reshaped for GPU execution)
PyObject * o_psng; //prompts (measured)
PyObject * o_rsng; //randoms
PyObject * o_ssng; //scatter
PyObject * o_nsng; //norm
PyObject * o_asng; //attenuation
//sensitivity image
PyObject * o_imgsens;
/* ^^^^^^^^^^^^^^^^^^^^^^^ Parse the input tuple ^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOO", &o_imgout, &o_rcnmsk, &o_psng, &o_rsng, &o_ssng, &o_nsng, &o_asng,
&o_imgsens, &o_txLUT, &o_axLUT, &o_subs, &o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject* pd_verbose = PyDict_GetItemString(o_mmrcnst, "VERBOSE");
Cnt.VERBOSE = (bool)PyInt_AS_LONG(pd_verbose);
PyObject* pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyInt_AS_LONG(pd_span);
PyObject* pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyInt_AS_LONG(pd_devid);
/* Interpret the input objects as numpy arrays. */
//axial LUTs:
PyObject* pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject* pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject* pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject* pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject* pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
//trasaxial sino LUTs:
PyObject* pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject* pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
PyObject* pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
//-- get the arrays from the dictionaries
//output backprojection iamge
PyObject *p_imgout = PyArray_FROM_OTF(o_imgout, NPY_FLOAT32, NPY_IN_ARRAY);
PyObject *p_rcnmsk = PyArray_FROM_OTF(o_rcnmsk, NPY_BOOL, NPY_IN_ARRAY);
//sino objects
PyObject *p_psng = PyArray_FROM_OTF(o_psng, NPY_UINT16, NPY_IN_ARRAY);
PyObject *p_rsng = PyArray_FROM_OTF(o_rsng, NPY_FLOAT32, NPY_IN_ARRAY);
PyObject *p_ssng = PyArray_FROM_OTF(o_ssng, NPY_FLOAT32, NPY_IN_ARRAY);
PyObject *p_nsng = PyArray_FROM_OTF(o_nsng, NPY_FLOAT32, NPY_IN_ARRAY);
PyObject *p_asng = PyArray_FROM_OTF(o_asng, NPY_FLOAT32, NPY_IN_ARRAY);
//subset definition
PyObject *p_subs = PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_IN_ARRAY);
//sensitivity image
PyObject *p_imgsens = PyArray_FROM_OTF(o_imgsens, NPY_FLOAT32, NPY_IN_ARRAY);
//axLUTs
PyObject *p_li2rno = PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2sn = PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2sn1 = PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2nos = PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2rng = PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_IN_ARRAY);
//2D sino index LUT:
PyObject *p_aw2ali = PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_IN_ARRAY);
//sino to crystal, crystals
PyObject *p_s2c = PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_crs = PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_imgout == NULL || p_rcnmsk == NULL || p_subs == NULL || p_psng == NULL || p_rsng == NULL || p_ssng == NULL || p_nsng == NULL || p_asng == NULL ||
p_imgsens == NULL || p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL || p_aw2ali == NULL || p_s2c == NULL || p_crs == NULL)
{
//output image
Py_XDECREF(p_imgout);
Py_XDECREF(p_rcnmsk);
//sino objects
Py_XDECREF(p_psng);
Py_XDECREF(p_rsng);
Py_XDECREF(p_ssng);
Py_XDECREF(p_nsng);
Py_XDECREF(p_asng);
//subsets
Py_XDECREF(p_subs);
Py_XDECREF(p_imgsens);
//axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
//2D sino LUT
Py_XDECREF(p_aw2ali);
//sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
return NULL;
}
float *imgout = (float*)PyArray_DATA(p_imgout);
bool *rcnmsk = (bool*)PyArray_DATA(p_rcnmsk);
unsigned short *psng = (unsigned short*)PyArray_DATA(p_psng);
float *rsng = (float*)PyArray_DATA(p_rsng);
float *ssng = (float*)PyArray_DATA(p_ssng);
float *nsng = (float*)PyArray_DATA(p_nsng);
float *asng = (float*)PyArray_DATA(p_asng);
float *imgsens = (float*)PyArray_DATA(p_imgsens);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short*)PyArray_DATA(p_li2sn);
}
else if (Cnt.SPN == 1) {
li2sn = (short*)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char*)PyArray_DATA(p_li2nos);
float *li2rng = (float*)PyArray_DATA(p_li2rng);
float *crs = (float*)PyArray_DATA(p_crs);
short *s2c = (short*)PyArray_DATA(p_s2c);
int *aw2ali = (int*)PyArray_DATA(p_aw2ali);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
// number of subsets
int Nsub = PyArray_DIM(p_subs, 0);
// number of elements used to store max. number of subsets projection - 1
int Nprj = PyArray_DIM(p_subs, 1);
if (Cnt.VERBOSE == 1) printf("ic> number of subsets = %d, and max. number of projections/subset = %d\n", Nsub, Nprj - 1);
int *subs = (int*)PyArray_DATA(p_subs);
// sets the device on which to calculate
CUDA_CHECK( hipSetDevice(Cnt.DEVID) );
//<><><<><><><><<><><><><><><><><><><>
osem(imgout, rcnmsk, psng, rsng, ssng, nsng, asng, subs, imgsens,
li2rng, li2sn, li2nos, s2c, crs, Nsub, Nprj, N0crs, N1crs, Cnt);
//<><><><><><><><<><><><>><><><><><><>
//Clean up
Py_DECREF(p_imgout);
Py_DECREF(p_rcnmsk);
Py_DECREF(p_psng);
Py_DECREF(p_rsng);
Py_DECREF(p_ssng);
Py_DECREF(p_nsng);
Py_DECREF(p_asng);
Py_DECREF(p_subs);
Py_DECREF(p_imgsens);
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_aw2ali);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_INCREF(Py_None);
return Py_None;
}
| 434dfccd306bfbdd1d115d5198b9c23538f28f1d.cu | /*------------------------------------------------------------------------
CUDA C extension for Python
Provides functionality for forward and back projection in PET image
reconstruction.
author: Pawel Markiewicz
Copyrights: 2018
------------------------------------------------------------------------*/
#include <Python.h>
#include <stdlib.h>
#include <numpy/arrayobject.h>
#include "def.h"
#include "prjf.h"
#include "prjb.h"
#include "recon.h"
#include "scanner_0.h"
//--- Docstrings
static char module_docstring[] =
"This module provides an interface for GPU routines of forward and back projection.";
static char fprj_docstring[] =
"Forward projector for PET system.";
static char bprj_docstring[] =
"Back projector for PET system.";
static char osem_docstring[] =
"OSEM reconstruction of PET data.";
//---
//--- Available functions
static PyObject *frwd_prj(PyObject *self, PyObject *args);
static PyObject *back_prj(PyObject *self, PyObject *args);
static PyObject *osem_rec(PyObject *self, PyObject *args);
/* Module specification */
static PyMethodDef module_methods[] = {
{ "fprj", frwd_prj, METH_VARARGS, fprj_docstring },
{ "bprj", back_prj, METH_VARARGS, bprj_docstring },
{ "osem", osem_rec, METH_VARARGS, osem_docstring },
{ NULL, NULL, 0, NULL }
};
//---
//--- Initialize the module
PyMODINIT_FUNC initpetprj(void) //it HAS to be init______ and then the name of the shared lib.
{
PyObject *m = Py_InitModule3("petprj", module_methods, module_docstring);
if (m == NULL)
return;
/* Load NumPy functionality. */
import_array();
}
//---
//=======================
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//==============================================================================
// F O R W A R D P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *frwd_prj(PyObject *self, PyObject *args)
{
//Structure of constants
Cnst Cnt;
//Dictionary of scanner constants
PyObject * o_mmrcnst;
// axial LUT dictionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject * o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject * o_txLUT;
// input image to be forward projected (reshaped for GPU execution)
PyObject * o_im;
// subsets for OSEM, first the default
PyObject * o_subs;
//output projection sino
PyObject * o_prjout;
//flag for attenuation factors to be found based on mu-map; if 0 normal emission projection is used
int att;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOOOi", &o_prjout, &o_im, &o_txLUT, &o_axLUT, &o_subs, &o_mmrcnst, &att))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject* pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyInt_AS_LONG(pd_span);
PyObject* pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
Cnt.RNG_STRT = (char)PyInt_AS_LONG(pd_rngstrt);
PyObject* pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_END = (char)PyInt_AS_LONG(pd_rngend);
PyObject* pd_verbose = PyDict_GetItemString(o_mmrcnst, "VERBOSE");
Cnt.VERBOSE = (bool)PyInt_AS_LONG(pd_verbose);
PyObject* pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyInt_AS_LONG(pd_devid);
/* Interpret the input objects as numpy arrays. */
//axial LUTs:
PyObject* pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject* pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject* pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject* pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject* pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
//trasaxial sino LUTs:
PyObject* pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject* pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
PyObject* pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
//-- get the arrays from the dictionaries
//axLUTs
PyObject *p_li2rno = PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2sn = PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2sn1 = PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2nos = PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2rng = PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_IN_ARRAY);
//2D sino index LUT:
PyObject *p_aw2ali = PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_IN_ARRAY);
//sino to crystal, crystals
PyObject *p_s2c = PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_crs = PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_IN_ARRAY);
//image object
PyObject *p_im = PyArray_FROM_OTF(o_im, NPY_FLOAT32, NPY_IN_ARRAY);
//subsets if using e.g. OSEM
PyObject *p_subs = PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_IN_ARRAY);
//output sino object
PyObject *p_prjout = PyArray_FROM_OTF(o_prjout, NPY_FLOAT32, NPY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_aw2ali == NULL || p_s2c == NULL || p_im == NULL || p_crs == NULL ||
p_subs == NULL || p_prjout == NULL || p_li2rng == NULL)
{
//axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_li2rng);
//2D sino LUT
Py_XDECREF(p_aw2ali);
//sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
//image object
Py_XDECREF(p_im);
//subset definition object
Py_XDECREF(p_subs);
//output sino object
Py_XDECREF(p_prjout);
return NULL;
}
int *subs_ = (int*)PyArray_DATA(p_subs);
short *s2c = (short*)PyArray_DATA(p_s2c);
int *aw2ali = (int*)PyArray_DATA(p_aw2ali);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short*)PyArray_DATA(p_li2sn);
}
else if (Cnt.SPN == 1) {
li2sn = (short*)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char*)PyArray_DATA(p_li2nos);
float *li2rng = (float*)PyArray_DATA(p_li2rng);
float *crs = (float*)PyArray_DATA(p_crs);
float *im = (float*)PyArray_DATA(p_im);
if (Cnt.VERBOSE == 1)
printf("ic> fwd-prj image dimensions: %d, %d, %d\n", PyArray_DIM(p_im, 0), PyArray_DIM(p_im, 1), PyArray_DIM(p_im, 2));
int Nprj = PyArray_DIM(p_subs, 0);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
int Naw = PyArray_DIM(p_aw2ali, 0);
if (Cnt.VERBOSE == 1)
printf("\nic> N0crs=%d, N1crs=%d, Naw=%d, Nprj=%d\n", N0crs, N1crs, Naw, Nprj);
int *subs;
if (subs_[0] == -1) {
Nprj = AW;
if (Cnt.VERBOSE == 1)
printf("ic> no subsets defined. number of projection bins in 2D: %d\n", Nprj);
// all projections in
subs = (int*)malloc(Nprj * sizeof(int));
for (int i = 0; i<Nprj; i++) {
subs[i] = i;
}
}
else {
if (Cnt.VERBOSE == 1)
printf("ic> subsets defined. number of subset projection bins in 2D: %d\n", Nprj);
subs = subs_;
}
// output projection sinogram
float *prjout = (float*)PyArray_DATA(p_prjout);
// sets the device on which to calculate
cudaSetDevice(Cnt.DEVID);
//<><><><><><><<><><><><><><><><><><><><><><><><><<><><><><><><><><><><><><><><><><><><<><><><><><><><><><><>
gpu_fprj(prjout, im,
li2rng, li2sn, li2nos,
s2c, aw2ali, crs, subs,
Nprj, Naw, N0crs, N1crs, Cnt, att);
//<><><><><><><><<><><><><><><><><><><><><><><><><<><><><><><><><><><><><><><><><><><><<><><><><><><><><><><>
//Clean up
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_aw2ali);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_DECREF(p_im);
Py_DECREF(p_subs);
Py_DECREF(p_prjout);
if (subs_[0] == -1) free(subs);
Py_INCREF(Py_None);
return Py_None;
}
//==============================================================================
// B A C K P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *back_prj(PyObject *self, PyObject *args)
{
//Structure of constants
Cnst Cnt;
//Dictionary of scanner constants
PyObject * o_mmrcnst;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject * o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject * o_txLUT;
// sino to be back projected to image (both reshaped for GPU execution)
PyObject * o_sino;
// subsets for OSEM, first the default
PyObject * o_subs;
//output backprojected image
PyObject * o_bimg;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOOO", &o_bimg, &o_sino, &o_txLUT, &o_axLUT, &o_subs, &o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject* pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyInt_AS_LONG(pd_span);
PyObject* pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
Cnt.RNG_STRT = (char)PyInt_AS_LONG(pd_rngstrt);
PyObject* pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_END = (char)PyInt_AS_LONG(pd_rngend);
PyObject* pd_verbose = PyDict_GetItemString(o_mmrcnst, "VERBOSE");
Cnt.VERBOSE = (bool)PyInt_AS_LONG(pd_verbose);
PyObject* pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyInt_AS_LONG(pd_devid);
/* Interpret the input objects as numpy arrays. */
//axial LUTs:
PyObject* pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject* pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject* pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject* pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject* pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
//trasaxial sino LUTs:
PyObject* pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject* pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
PyObject* pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
//-- get the arrays from the dictionaries
//axLUTs
PyObject *p_li2rno = PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2sn = PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2sn1 = PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2nos = PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2rng = PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_IN_ARRAY);
//2D sino index LUT:
PyObject *p_aw2ali = PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_IN_ARRAY);
//sino to crystal, crystals
PyObject *p_s2c = PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_crs = PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_IN_ARRAY);
//sino object
PyObject *p_sino = PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_IN_ARRAY);
//subset definition
PyObject *p_subs = PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_IN_ARRAY);
//output backprojection iamge
PyObject *p_bim = PyArray_FROM_OTF(o_bimg, NPY_FLOAT32, NPY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_aw2ali == NULL || p_s2c == NULL || p_sino == NULL || p_crs == NULL ||
p_subs == NULL || p_bim == NULL || p_li2rng == NULL)
{
//axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_li2rng);
//2D sino LUT
Py_XDECREF(p_aw2ali);
//sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
//sino object
Py_XDECREF(p_sino);
//subsets
Py_XDECREF(p_subs);
//backprojection image
Py_XDECREF(p_bim);
return NULL;
}
int *subs_ = (int*)PyArray_DATA(p_subs);
short *s2c = (short*)PyArray_DATA(p_s2c);
int *aw2ali = (int*)PyArray_DATA(p_aw2ali);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short*)PyArray_DATA(p_li2sn);
}
else if (Cnt.SPN == 1) {
li2sn = (short*)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char*)PyArray_DATA(p_li2nos);
float *li2rng = (float*)PyArray_DATA(p_li2rng);
float *crs = (float*)PyArray_DATA(p_crs);
float *sino = (float*)PyArray_DATA(p_sino);
int Nprj = PyArray_DIM(p_subs, 0);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
int Naw = PyArray_DIM(p_aw2ali, 0);
int *subs;
if (subs_[0] == -1) {
Nprj = AW;
if (Cnt.VERBOSE == 1)
printf("\nic> no subsets defined. number of projection bins in 2D: %d\n", Nprj);
// all projections in
subs = (int*)malloc(Nprj * sizeof(int));
for (int i = 0; i<Nprj; i++) {
subs[i] = i;
}
}
else {
if (Cnt.VERBOSE == 1)
printf("\nic> subsets defined. number of subset projection bins in 2D: %d\n", Nprj);
subs = subs_;
}
float *bimg = (float*)PyArray_DATA(p_bim);
if (Cnt.VERBOSE == 1)
printf("ic> bck-prj image dimensions: %d, %d, %d\n", PyArray_DIM(p_bim, 0), PyArray_DIM(p_bim, 1), PyArray_DIM(p_bim, 2));
// sets the device on which to calculate
cudaSetDevice(Cnt.DEVID);
//<><><<><><><><><><><><><><><><><><><><><<><><><><<><><><><><><><><><><><><><><><><><<><><><><><><>
gpu_bprj(bimg, sino, li2rng, li2sn, li2nos, s2c, aw2ali, crs, subs, Nprj, Naw, N0crs, N1crs, Cnt);
//<><><><><><><><><><><>><><><><><><><><><<><><><><<><><><><><><><><><><><><><><><><><<><><><><><><>
//Clean up
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_aw2ali);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_DECREF(p_sino);
Py_DECREF(p_subs);
Py_DECREF(p_bim);
if (subs_[0] == -1) free(subs);
Py_INCREF(Py_None);
return Py_None;
}
//==============================================================================
// O S E M R E C O N S T R U C T I O N
//------------------------------------------------------------------------------
static PyObject *osem_rec(PyObject *self, PyObject *args)
{
//Structure of constants
Cnst Cnt;
//output image
PyObject * o_imgout;
//output image mask
PyObject * o_rcnmsk;
//Dictionary of scanner constants
PyObject * o_mmrcnst;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject * o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject * o_txLUT;
// subsets for OSEM, first the default
PyObject * o_subs;
// sinos using in reconstruction (reshaped for GPU execution)
PyObject * o_psng; //prompts (measured)
PyObject * o_rsng; //randoms
PyObject * o_ssng; //scatter
PyObject * o_nsng; //norm
PyObject * o_asng; //attenuation
//sensitivity image
PyObject * o_imgsens;
/* ^^^^^^^^^^^^^^^^^^^^^^^ Parse the input tuple ^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOO", &o_imgout, &o_rcnmsk, &o_psng, &o_rsng, &o_ssng, &o_nsng, &o_asng,
&o_imgsens, &o_txLUT, &o_axLUT, &o_subs, &o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject* pd_verbose = PyDict_GetItemString(o_mmrcnst, "VERBOSE");
Cnt.VERBOSE = (bool)PyInt_AS_LONG(pd_verbose);
PyObject* pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyInt_AS_LONG(pd_span);
PyObject* pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyInt_AS_LONG(pd_devid);
/* Interpret the input objects as numpy arrays. */
//axial LUTs:
PyObject* pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject* pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject* pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject* pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject* pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
//trasaxial sino LUTs:
PyObject* pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject* pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
PyObject* pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
//-- get the arrays from the dictionaries
//output backprojection iamge
PyObject *p_imgout = PyArray_FROM_OTF(o_imgout, NPY_FLOAT32, NPY_IN_ARRAY);
PyObject *p_rcnmsk = PyArray_FROM_OTF(o_rcnmsk, NPY_BOOL, NPY_IN_ARRAY);
//sino objects
PyObject *p_psng = PyArray_FROM_OTF(o_psng, NPY_UINT16, NPY_IN_ARRAY);
PyObject *p_rsng = PyArray_FROM_OTF(o_rsng, NPY_FLOAT32, NPY_IN_ARRAY);
PyObject *p_ssng = PyArray_FROM_OTF(o_ssng, NPY_FLOAT32, NPY_IN_ARRAY);
PyObject *p_nsng = PyArray_FROM_OTF(o_nsng, NPY_FLOAT32, NPY_IN_ARRAY);
PyObject *p_asng = PyArray_FROM_OTF(o_asng, NPY_FLOAT32, NPY_IN_ARRAY);
//subset definition
PyObject *p_subs = PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_IN_ARRAY);
//sensitivity image
PyObject *p_imgsens = PyArray_FROM_OTF(o_imgsens, NPY_FLOAT32, NPY_IN_ARRAY);
//axLUTs
PyObject *p_li2rno = PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2sn = PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2sn1 = PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_li2nos = PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_IN_ARRAY);
PyObject *p_li2rng = PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_IN_ARRAY);
//2D sino index LUT:
PyObject *p_aw2ali = PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_IN_ARRAY);
//sino to crystal, crystals
PyObject *p_s2c = PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_IN_ARRAY);
PyObject *p_crs = PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_imgout == NULL || p_rcnmsk == NULL || p_subs == NULL || p_psng == NULL || p_rsng == NULL || p_ssng == NULL || p_nsng == NULL || p_asng == NULL ||
p_imgsens == NULL || p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL || p_aw2ali == NULL || p_s2c == NULL || p_crs == NULL)
{
//output image
Py_XDECREF(p_imgout);
Py_XDECREF(p_rcnmsk);
//sino objects
Py_XDECREF(p_psng);
Py_XDECREF(p_rsng);
Py_XDECREF(p_ssng);
Py_XDECREF(p_nsng);
Py_XDECREF(p_asng);
//subsets
Py_XDECREF(p_subs);
Py_XDECREF(p_imgsens);
//axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
//2D sino LUT
Py_XDECREF(p_aw2ali);
//sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
return NULL;
}
float *imgout = (float*)PyArray_DATA(p_imgout);
bool *rcnmsk = (bool*)PyArray_DATA(p_rcnmsk);
unsigned short *psng = (unsigned short*)PyArray_DATA(p_psng);
float *rsng = (float*)PyArray_DATA(p_rsng);
float *ssng = (float*)PyArray_DATA(p_ssng);
float *nsng = (float*)PyArray_DATA(p_nsng);
float *asng = (float*)PyArray_DATA(p_asng);
float *imgsens = (float*)PyArray_DATA(p_imgsens);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short*)PyArray_DATA(p_li2sn);
}
else if (Cnt.SPN == 1) {
li2sn = (short*)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char*)PyArray_DATA(p_li2nos);
float *li2rng = (float*)PyArray_DATA(p_li2rng);
float *crs = (float*)PyArray_DATA(p_crs);
short *s2c = (short*)PyArray_DATA(p_s2c);
int *aw2ali = (int*)PyArray_DATA(p_aw2ali);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
// number of subsets
int Nsub = PyArray_DIM(p_subs, 0);
// number of elements used to store max. number of subsets projection - 1
int Nprj = PyArray_DIM(p_subs, 1);
if (Cnt.VERBOSE == 1) printf("ic> number of subsets = %d, and max. number of projections/subset = %d\n", Nsub, Nprj - 1);
int *subs = (int*)PyArray_DATA(p_subs);
// sets the device on which to calculate
CUDA_CHECK( cudaSetDevice(Cnt.DEVID) );
//<><><<><><><><<><><><><><><><><><><>
osem(imgout, rcnmsk, psng, rsng, ssng, nsng, asng, subs, imgsens,
li2rng, li2sn, li2nos, s2c, crs, Nsub, Nprj, N0crs, N1crs, Cnt);
//<><><><><><><><<><><><>><><><><><><>
//Clean up
Py_DECREF(p_imgout);
Py_DECREF(p_rcnmsk);
Py_DECREF(p_psng);
Py_DECREF(p_rsng);
Py_DECREF(p_ssng);
Py_DECREF(p_nsng);
Py_DECREF(p_asng);
Py_DECREF(p_subs);
Py_DECREF(p_imgsens);
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_aw2ali);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_INCREF(Py_None);
return Py_None;
}
|
945cb5eccc26a1c15cca01872bdefbf26106e554.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "traffic.h"
static const int kNumBlockSize = 256;
static const char kCellTypeNormal = 1;
static const char kCellTypeProducer = 2;
using IndexT = int;
using CellPointerT = IndexT;
#include "../dataset.h"
__managed__ CellBase **dev_cells;
// Need 2 arrays of both, so we can swap.
__device__ int *d_Car_active;
__device__ int *d_Car_active_2;
__managed__ CarBase **dev_cars;
__managed__ CarBase **dev_cars_2;
// For prefix sum array compaction.
__device__ int *d_prefix_sum_temp;
__device__ int *d_prefix_sum_output;
int *h_prefix_sum_temp;
int *h_prefix_sum_output;
int *h_Car_active;
int *h_Car_active_2;
__device__ int d_num_cells;
__device__ int d_num_cars;
__device__ int d_num_cars_2;
int host_num_cells;
int host_num_cars;
// TODO: Consider migrating to SoaAlloc.
TrafficLight *h_traffic_lights;
__managed__ TrafficLightBase **d_traffic_lights;
// Only for rendering.
__device__ int dev_num_cells;
__device__ float *dev_Cell_pos_x;
__device__ float *dev_Cell_pos_y;
__device__ bool *dev_Cell_occupied;
float *host_Cell_pos_x;
float *host_Cell_pos_y;
bool *host_Cell_occupied;
float *host_data_Cell_pos_x;
float *host_data_Cell_pos_y;
bool *host_data_Cell_occupied;
__device__ void Car_step_extend_path(IndexT self) {
CellBase *cell = dev_cars[self]->get_position();
CellBase *next_cell;
for (int i = 0; i < dev_cars[self]->get_velocity(); ++i) {
bool cond = cell->get_is_target();
if (cell->is_sink() || cond) {
break;
}
next_cell = dev_cars[self]->next_step(cell);
assert(next_cell != cell);
if (!next_cell->is_free())
break;
cell = next_cell;
dev_cars[self]->set_path(cell, i);
int path_len = dev_cars[self]->get_path_length();
dev_cars[self]->set_path_length(path_len + 1);
}
int path_len = dev_cars[self]->get_path_length();
dev_cars[self]->set_velocity(path_len);
}
__device__ void Car_step_constraint_velocity(IndexT self) {
// This is actually only needed for the very first iteration, because a car
// may be positioned on a traffic light cell.
int vel = dev_cars[self]->get_velocity();
CellBase *cell = dev_cars[self]->get_position();
if (vel > cell->get_current_max_velocity()) {
int max_velocity = cell->get_current_max_velocity();
dev_cars[self]->set_velocity(max_velocity);
}
int path_index = 0;
int distance = 1;
while (distance <= dev_cars[self]->get_velocity()) {
// Invariant: Movement of up to `distance - 1` many cells at `velocity_`
// is allowed.
// Now check if next cell can be entered.
CellBase *next_cell = dev_cars[self]->get_path(path_index);
// Avoid collision.
if (!next_cell->is_free()) {
// Cannot enter cell.
--distance;
dev_cars[self]->set_velocity(distance);
break;
} // else: Can enter next cell.
int curr_vel = dev_cars[self]->get_velocity();
if (curr_vel > next_cell->get_current_max_velocity()) {
// Car is too fast for this cell.
if (next_cell->get_current_max_velocity() > distance - 1) {
// Even if we slow down, we would still make progress.
int max = next_cell->get_current_max_velocity();
dev_cars[self]->set_velocity(max);
} else {
// Do not enter the next cell.
--distance;
assert(distance >= 0);
dev_cars[self]->set_velocity(distance);
break;
}
}
++distance;
++path_index;
}
--distance;
#ifndef NDEBUG
for (int i = 0; i < dev_cars[self]->get_velocity(); ++i) {
assert(dev_cars[self]->get_path(i)->is_free());
assert(i == 0 ||
dev_cars[self]->get_path(i - 1) != dev_cars[self]->get_path(i));
}
// TODO: Check why the cast is necessary.
assert(distance <= dev_cars[self]->get_velocity());
#endif // NDEBUG
}
__device__ void Car_step_move(IndexT self) {
CellBase *cell = dev_cars[self]->get_position();
for (int i = 0; i < dev_cars[self]->get_velocity(); ++i) {
assert(dev_cars[self]->get_path(i) != cell);
cell = dev_cars[self]->get_path(i);
assert(cell->is_free());
CellBase *ptr = dev_cars[self]->get_position();
ptr->release();
cell->occupy(dev_cars[self]);
dev_cars[self]->set_position(cell);
}
CellBase *ptr = dev_cars[self]->get_position();
bool cond = ptr->is_sink();
if (cond || ptr->get_is_target()) {
// Remove car from the simulation. Will be added again in the next
// iteration.
ptr->release();
dev_cars[self]->set_position(nullptr);
d_Car_active[self] = 0;
}
}
__device__ void Car_step_slow_down(IndexT self) {
// 20% change of slowdown.
int vel = dev_cars[self]->get_velocity();
if (dev_cars[self]->random_uni() < 0.2 && vel > 0) {
dev_cars[self]->set_velocity(vel - 1);
}
}
__device__ IndexT new_Car(int seed, IndexT cell, int max_velocity) {
IndexT idx = atomicAdd(&d_num_cars, 1);
assert(idx >= 0 && idx < kMaxNumCars);
assert(!d_Car_active[idx]);
dev_cars[idx]->set_position(dev_cells[cell]);
dev_cars[idx]->set_path_length(0);
dev_cars[idx]->set_velocity(0);
dev_cars[idx]->set_max_velocity(max_velocity);
d_Car_active[idx] = 1;
assert(dev_cells[cell]->is_free());
dev_cells[cell]->occupy(dev_cars[idx]);
hiprand_init(seed, 0, 0, &dev_cars[idx]->random_state);
return idx;
}
__device__ void ProducerCell_create_car(IndexT self) {
assert(dev_cells[self]->type == kCellTypeProducer);
if (dev_cells[self]->is_free()) {
float r = hiprand_uniform(&dev_cells[self]->random_state);
if (r < kCarAllocationRatio) {
IndexT new_car = new_Car(
/*seed=*/hiprand(&dev_cells[self]->random_state), /*cell=*/self,
/*max_velocity=*/hiprand(&dev_cells[self]->random_state) %
(kMaxVelocity / 2) +
kMaxVelocity / 2);
}
}
}
__device__ IndexT new_Cell(int max_velocity, float x, float y) {
IndexT idx = atomicAdd(&d_num_cells, 1);
dev_cells[idx]->car = nullptr;
dev_cells[idx]->max_velocity = max_velocity;
dev_cells[idx]->current_max_velocity = max_velocity;
dev_cells[idx]->num_incoming = 0;
dev_cells[idx]->num_outgoing = 0;
dev_cells[idx]->x = x;
dev_cells[idx]->y = y;
dev_cells[idx]->is_target = false;
dev_cells[idx]->type = kCellTypeNormal;
return idx;
}
__device__ IndexT new_ProducerCell(int max_velocity, float x, float y,
int seed) {
IndexT idx = new_Cell(max_velocity, x, y);
dev_cells[idx]->type = kCellTypeProducer;
hiprand_init(seed, 0, 0, &dev_cells[idx]->random_state);
return idx;
}
__global__ void kernel_traffic_light_step() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
if (d_traffic_lights[i]->get_num_cells() > 0) {
int timer = d_traffic_lights[i]->get_timer();
int phase_time = d_traffic_lights[i]->get_phase_time();
d_traffic_lights[i]->set_timer((timer + 1) % phase_time);
if (d_traffic_lights[i]->get_timer() == 0) {
int phase = d_traffic_lights[i]->get_phase();
assert(d_traffic_lights[i]->get_cell(phase) != nullptr);
phase = d_traffic_lights[i]->get_phase();
CellBase *ptr = d_traffic_lights[i]->get_cell(phase);
ptr->set_current_max_velocity(0);
int phase_2 = d_traffic_lights[i]->get_phase();
int num_cells = d_traffic_lights[i]->get_num_cells();
d_traffic_lights[i]->set_phase((phase_2 + 1) % num_cells);
phase_2 = d_traffic_lights[i]->get_phase();
ptr = d_traffic_lights[i]->get_cell(phase_2);
ptr->remove_speed_limit();
}
}
// d_traffic_lights[i]->step();
}
}
__global__ void kernel_create_nodes() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
hiprandState_t state;
hiprand_init(i, 0, 0, &state);
assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1);
assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j] = new_Cell(
/*max_velocity=*/hiprand(&state) % (kMaxVelocity / 2) +
kMaxVelocity / 2,
d_nodes[i].x, d_nodes[i].y);
}
}
}
__device__ IndexT connect_intersections(IndexT from, Node *target,
int incoming_idx,
hiprandState_t &state) {
// Create edge.
float dx = target->x - dev_cells[from]->x;
float dy = target->y - dev_cells[from]->y;
float dist = sqrt(dx * dx + dy * dy);
int steps = dist / kCellLength;
float step_x = dx / steps;
float step_y = dy / steps;
IndexT prev = from;
for (int j = 0; j < steps; ++j) {
float new_x = dev_cells[from]->x + j * step_x;
float new_y = dev_cells[from]->y + j * step_y;
assert(new_x >= 0 && new_x <= 1);
assert(new_y >= 0 && new_y <= 1);
IndexT next;
if (hiprand_uniform(&state) < kProducerRatio) {
next = new_ProducerCell(dev_cells[prev]->max_velocity, new_x, new_y,
hiprand(&state));
} else {
next = new_Cell(dev_cells[prev]->max_velocity, new_x, new_y);
}
if (hiprand_uniform(&state) < kTargetRatio) {
dev_cells[next]->set_target();
}
dev_cells[prev]->set_num_outgoing(1);
dev_cells[prev]->set_outgoing(0, dev_cells[next]);
dev_cells[next]->set_num_incoming(1);
dev_cells[next]->set_incoming(0, dev_cells[prev]);
prev = next;
}
// Connect to all outgoing nodes of target.
dev_cells[prev]->set_num_outgoing(target->num_outgoing);
for (int i = 0; i < target->num_outgoing; ++i) {
IndexT next = target->cell_out[i];
// num_incoming set later.
dev_cells[prev]->set_outgoing(i, dev_cells[next]);
dev_cells[next]->set_incoming(incoming_idx, dev_cells[prev]);
}
return prev;
}
__global__ void kernel_create_edges() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
hiprandState_t state;
hiprand_init(i, 0, 0, &state);
for (int k = 0; k < d_nodes[i].num_outgoing; ++k) {
int target = d_nodes[i].node_out[k];
int target_pos = d_nodes[i].node_out_pos[k];
IndexT last = connect_intersections(d_nodes[i].cell_out[k],
&d_nodes[target], target_pos, state);
dev_cells[last]->set_current_max_velocity(0);
d_nodes[target].cell_in[target_pos] = last;
}
}
}
__global__ void kernel_create_traffic_lights() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
new (d_traffic_lights[i]) TrafficLight(
/*num_cells=*/d_nodes[i].num_incoming,
/*phase_time=*/5);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
dev_cells[d_nodes[i].cell_out[j]]->set_num_incoming(
d_nodes[i].num_incoming);
}
for (int j = 0; j < d_nodes[i].num_incoming; ++j) {
d_traffic_lights[i]->set_cell(j, dev_cells[d_nodes[i].cell_in[j]]);
dev_cells[d_nodes[i].cell_in[j]]->set_current_max_velocity(
0); // Set to "red".
}
}
}
template <class Type, class TypeBase>
__global__ void device_alloc(TypeBase **ptr, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
ptr[i] = new Type();
assert(ptr[i] != nullptr);
}
}
void create_street_network() {
int zero = 0;
hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_nodes, sizeof(Node) * kNumIntersections);
hipMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node *), 0,
hipMemcpyHostToDevice);
hipMalloc(&d_traffic_lights, sizeof(TrafficLight *) * kNumIntersections);
hipLaunchKernelGGL(( device_alloc<TrafficLight, TrafficLightBase>)
, dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, d_traffic_lights, kNumIntersections);
gpuErrchk(hipDeviceSynchronize());
// Create basic structure on host.
create_network_structure();
hipLaunchKernelGGL(( kernel_create_nodes), dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_create_edges), dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_create_traffic_lights), (kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
// Allocate helper data structures for rendering.
hipMemcpyFromSymbol(&host_num_cells, d_num_cells, sizeof(int), 0,
hipMemcpyDeviceToHost);
hipMalloc(&host_Cell_pos_x, sizeof(float) * host_num_cells);
hipMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float *), 0,
hipMemcpyHostToDevice);
hipMalloc(&host_Cell_pos_y, sizeof(float) * host_num_cells);
hipMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float *), 0,
hipMemcpyHostToDevice);
hipMalloc(&host_Cell_occupied, sizeof(bool) * host_num_cells);
hipMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool *), 0,
hipMemcpyHostToDevice);
host_data_Cell_pos_x = (float *)malloc(sizeof(float) * host_num_cells);
host_data_Cell_pos_y = (float *)malloc(sizeof(float) * host_num_cells);
host_data_Cell_occupied = (bool *)malloc(sizeof(bool) * host_num_cells);
#ifndef NDEBUG
printf("Number of cells: %i\n", host_num_cells);
#endif // NDEBUG
}
void step_traffic_lights() {
// TODO: Consider migrating this to SoaAlloc.
hipLaunchKernelGGL(( kernel_traffic_light_step), (kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
__device__ void Cell_add_to_rendering_array(IndexT self) {
int idx = atomicAdd(&dev_num_cells, 1);
dev_Cell_pos_x[idx] = dev_cells[self]->x;
dev_Cell_pos_y[idx] = dev_cells[self]->y;
dev_Cell_occupied[idx] = !dev_cells[self]->is_free();
}
__global__ void kernel_Cell_add_to_rendering_array() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells;
i += blockDim.x * gridDim.x) {
Cell_add_to_rendering_array(i);
}
}
void transfer_data() {
int zero = 0;
hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_Cell_add_to_rendering_array),
dim3((host_num_cells + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipMemcpy(host_data_Cell_pos_x, host_Cell_pos_x,
sizeof(float) * host_num_cells, hipMemcpyDeviceToHost);
hipMemcpy(host_data_Cell_pos_y, host_Cell_pos_y,
sizeof(float) * host_num_cells, hipMemcpyDeviceToHost);
hipMemcpy(host_data_Cell_occupied, host_Cell_occupied,
sizeof(bool) * host_num_cells, hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
}
__global__ void kernel_ProducerCell_create_car() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells;
i += blockDim.x * gridDim.x) {
if (dev_cells[i]->type == kCellTypeProducer) {
ProducerCell_create_car(i);
}
}
}
__device__ void Car_step_prepare_path(IndexT self) {
dev_cars[self]->step_initialize_iteration();
dev_cars[self]->step_accelerate();
Car_step_extend_path(self);
Car_step_constraint_velocity(self);
Car_step_slow_down(self);
}
__global__ void kernel_Car_step_prepare_path() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
Car_step_prepare_path(i);
}
}
}
__global__ void kernel_fill_car_indices() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
d_Car_active[i] = 0;
d_Car_active_2[i] = 0;
}
}
__global__ void kernel_Car_step_move() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
Car_step_move(i);
}
}
}
__device__ int d_checksum;
__global__ void kernel_compute_checksum() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
atomicAdd(&d_checksum, 1);
}
}
}
int checksum() {
int zero = 0;
hipMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_compute_checksum), dim3(128), dim3(128), 0, 0, );
int result;
hipMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0,
hipMemcpyDeviceToHost);
return result;
}
void step() {
hipLaunchKernelGGL(( kernel_ProducerCell_create_car),
dim3((host_num_cells + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0,
hipMemcpyDeviceToHost);
step_traffic_lights();
hipLaunchKernelGGL(( kernel_Car_step_prepare_path),
dim3((host_num_cars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Car_step_move), dim3((host_num_cars + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void allocate_memory() {
hipMalloc(&dev_cells, sizeof(Cell *) * kMaxNumCells);
hipLaunchKernelGGL(( device_alloc<Cell, CellBase>)
, dim3((kMaxNumCells + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0,
dev_cells, kMaxNumCells);
gpuErrchk(hipDeviceSynchronize());
hipMalloc(&dev_cars, sizeof(Car *) * kMaxNumCars);
hipMalloc(&dev_cars_2, sizeof(Car *) * kMaxNumCars);
hipLaunchKernelGGL(( device_alloc<Car, CarBase>)
, dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0,
dev_cars, kMaxNumCars);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( device_alloc<Car>)
, dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0,
dev_cars_2, kMaxNumCars);
gpuErrchk(hipDeviceSynchronize());
hipMalloc(&h_Car_active, sizeof(int) * kMaxNumCars);
hipMemcpyToSymbol(d_Car_active, &h_Car_active, sizeof(int *), 0,
hipMemcpyHostToDevice);
// Car *h_cars_2;
// hipMalloc(&h_cars_2, sizeof(Car) * kMaxNumCars);
// hipMemcpyToSymbol(dev_cars_2, &h_cars_2, sizeof(Car *), 0,
// hipMemcpyHostToDevice);
hipMalloc(&h_Car_active_2, sizeof(int) * kMaxNumCars);
hipMemcpyToSymbol(d_Car_active_2, &h_Car_active_2, sizeof(int *), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_prefix_sum_temp, 3 * sizeof(int) * kMaxNumCars);
hipMemcpyToSymbol(d_prefix_sum_temp, &h_prefix_sum_temp, sizeof(int *), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_prefix_sum_output, sizeof(int) * kMaxNumCars);
hipMemcpyToSymbol(d_prefix_sum_output, &h_prefix_sum_output, sizeof(int *),
0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_fill_car_indices), dim3(128), dim3(128), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
int zero = 0;
hipMemcpyToSymbol(d_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_num_cars, &zero, sizeof(int), 0, hipMemcpyHostToDevice);
}
__global__ void kernel_compact_initialize() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kMaxNumCars;
i += blockDim.x * gridDim.x) {
d_Car_active_2[i] = 0;
}
}
__global__ void kernel_compact_cars() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
int target = d_prefix_sum_output[i];
// Copy i --> target.
// dev_cars_2[target] = dev_cars[i];
memcpy(dev_cars_2[target], dev_cars[i], sizeof(Car));
d_Car_active_2[target] = 1;
// Update pointer in Cell.
dev_cars[i]->position->car = dev_cars[target];
atomicAdd(&d_num_cars_2, 1);
}
}
}
__global__ void kernel_compact_swap_pointers() {
{
auto *tmp = dev_cars;
dev_cars = dev_cars_2;
dev_cars_2 = tmp;
}
{
auto *tmp = d_Car_active;
d_Car_active = d_Car_active_2;
d_Car_active_2 = tmp;
}
d_num_cars = d_num_cars_2;
}
void compact_car_array() {
int zero = 0;
hipMemcpyToSymbol(d_num_cars_2, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0,
hipMemcpyDeviceToHost);
// TODO: Prefix sum broken for num_objects < 256.
auto prefix_sum_size = host_num_cars < 256 ? 256 : host_num_cars;
size_t temp_size = 3 * kMaxNumCars;
hipcub::DeviceScan::ExclusiveSum(h_prefix_sum_temp, temp_size, h_Car_active,
h_prefix_sum_output, prefix_sum_size);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_compact_initialize), dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_compact_cars), dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_compact_swap_pointers), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
auto *tmp = h_Car_active;
h_Car_active = h_Car_active_2;
h_Car_active_2 = tmp;
}
int main(int /*argc*/, char ** /*argv*/) {
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
high_resolution_clock::time_point t1 = high_resolution_clock::now();
allocate_memory();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double> alloc_time = duration_cast<duration<double>>(t2 - t1);
printf("alloc_time : %f\n",alloc_time.count());
printf("mem alloc done\n");
create_street_network();
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
step();
compact_car_array();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto millis =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
#endif // NDEBUG
printf("%lu\n", millis);
} | 945cb5eccc26a1c15cca01872bdefbf26106e554.cu | #include "traffic.h"
static const int kNumBlockSize = 256;
static const char kCellTypeNormal = 1;
static const char kCellTypeProducer = 2;
using IndexT = int;
using CellPointerT = IndexT;
#include "../dataset.h"
__managed__ CellBase **dev_cells;
// Need 2 arrays of both, so we can swap.
__device__ int *d_Car_active;
__device__ int *d_Car_active_2;
__managed__ CarBase **dev_cars;
__managed__ CarBase **dev_cars_2;
// For prefix sum array compaction.
__device__ int *d_prefix_sum_temp;
__device__ int *d_prefix_sum_output;
int *h_prefix_sum_temp;
int *h_prefix_sum_output;
int *h_Car_active;
int *h_Car_active_2;
__device__ int d_num_cells;
__device__ int d_num_cars;
__device__ int d_num_cars_2;
int host_num_cells;
int host_num_cars;
// TODO: Consider migrating to SoaAlloc.
TrafficLight *h_traffic_lights;
__managed__ TrafficLightBase **d_traffic_lights;
// Only for rendering.
__device__ int dev_num_cells;
__device__ float *dev_Cell_pos_x;
__device__ float *dev_Cell_pos_y;
__device__ bool *dev_Cell_occupied;
float *host_Cell_pos_x;
float *host_Cell_pos_y;
bool *host_Cell_occupied;
float *host_data_Cell_pos_x;
float *host_data_Cell_pos_y;
bool *host_data_Cell_occupied;
__device__ void Car_step_extend_path(IndexT self) {
CellBase *cell = dev_cars[self]->get_position();
CellBase *next_cell;
for (int i = 0; i < dev_cars[self]->get_velocity(); ++i) {
bool cond = cell->get_is_target();
if (cell->is_sink() || cond) {
break;
}
next_cell = dev_cars[self]->next_step(cell);
assert(next_cell != cell);
if (!next_cell->is_free())
break;
cell = next_cell;
dev_cars[self]->set_path(cell, i);
int path_len = dev_cars[self]->get_path_length();
dev_cars[self]->set_path_length(path_len + 1);
}
int path_len = dev_cars[self]->get_path_length();
dev_cars[self]->set_velocity(path_len);
}
__device__ void Car_step_constraint_velocity(IndexT self) {
// This is actually only needed for the very first iteration, because a car
// may be positioned on a traffic light cell.
int vel = dev_cars[self]->get_velocity();
CellBase *cell = dev_cars[self]->get_position();
if (vel > cell->get_current_max_velocity()) {
int max_velocity = cell->get_current_max_velocity();
dev_cars[self]->set_velocity(max_velocity);
}
int path_index = 0;
int distance = 1;
while (distance <= dev_cars[self]->get_velocity()) {
// Invariant: Movement of up to `distance - 1` many cells at `velocity_`
// is allowed.
// Now check if next cell can be entered.
CellBase *next_cell = dev_cars[self]->get_path(path_index);
// Avoid collision.
if (!next_cell->is_free()) {
// Cannot enter cell.
--distance;
dev_cars[self]->set_velocity(distance);
break;
} // else: Can enter next cell.
int curr_vel = dev_cars[self]->get_velocity();
if (curr_vel > next_cell->get_current_max_velocity()) {
// Car is too fast for this cell.
if (next_cell->get_current_max_velocity() > distance - 1) {
// Even if we slow down, we would still make progress.
int max = next_cell->get_current_max_velocity();
dev_cars[self]->set_velocity(max);
} else {
// Do not enter the next cell.
--distance;
assert(distance >= 0);
dev_cars[self]->set_velocity(distance);
break;
}
}
++distance;
++path_index;
}
--distance;
#ifndef NDEBUG
for (int i = 0; i < dev_cars[self]->get_velocity(); ++i) {
assert(dev_cars[self]->get_path(i)->is_free());
assert(i == 0 ||
dev_cars[self]->get_path(i - 1) != dev_cars[self]->get_path(i));
}
// TODO: Check why the cast is necessary.
assert(distance <= dev_cars[self]->get_velocity());
#endif // NDEBUG
}
__device__ void Car_step_move(IndexT self) {
CellBase *cell = dev_cars[self]->get_position();
for (int i = 0; i < dev_cars[self]->get_velocity(); ++i) {
assert(dev_cars[self]->get_path(i) != cell);
cell = dev_cars[self]->get_path(i);
assert(cell->is_free());
CellBase *ptr = dev_cars[self]->get_position();
ptr->release();
cell->occupy(dev_cars[self]);
dev_cars[self]->set_position(cell);
}
CellBase *ptr = dev_cars[self]->get_position();
bool cond = ptr->is_sink();
if (cond || ptr->get_is_target()) {
// Remove car from the simulation. Will be added again in the next
// iteration.
ptr->release();
dev_cars[self]->set_position(nullptr);
d_Car_active[self] = 0;
}
}
__device__ void Car_step_slow_down(IndexT self) {
// 20% change of slowdown.
int vel = dev_cars[self]->get_velocity();
if (dev_cars[self]->random_uni() < 0.2 && vel > 0) {
dev_cars[self]->set_velocity(vel - 1);
}
}
__device__ IndexT new_Car(int seed, IndexT cell, int max_velocity) {
IndexT idx = atomicAdd(&d_num_cars, 1);
assert(idx >= 0 && idx < kMaxNumCars);
assert(!d_Car_active[idx]);
dev_cars[idx]->set_position(dev_cells[cell]);
dev_cars[idx]->set_path_length(0);
dev_cars[idx]->set_velocity(0);
dev_cars[idx]->set_max_velocity(max_velocity);
d_Car_active[idx] = 1;
assert(dev_cells[cell]->is_free());
dev_cells[cell]->occupy(dev_cars[idx]);
curand_init(seed, 0, 0, &dev_cars[idx]->random_state);
return idx;
}
__device__ void ProducerCell_create_car(IndexT self) {
assert(dev_cells[self]->type == kCellTypeProducer);
if (dev_cells[self]->is_free()) {
float r = curand_uniform(&dev_cells[self]->random_state);
if (r < kCarAllocationRatio) {
IndexT new_car = new_Car(
/*seed=*/curand(&dev_cells[self]->random_state), /*cell=*/self,
/*max_velocity=*/curand(&dev_cells[self]->random_state) %
(kMaxVelocity / 2) +
kMaxVelocity / 2);
}
}
}
__device__ IndexT new_Cell(int max_velocity, float x, float y) {
IndexT idx = atomicAdd(&d_num_cells, 1);
dev_cells[idx]->car = nullptr;
dev_cells[idx]->max_velocity = max_velocity;
dev_cells[idx]->current_max_velocity = max_velocity;
dev_cells[idx]->num_incoming = 0;
dev_cells[idx]->num_outgoing = 0;
dev_cells[idx]->x = x;
dev_cells[idx]->y = y;
dev_cells[idx]->is_target = false;
dev_cells[idx]->type = kCellTypeNormal;
return idx;
}
__device__ IndexT new_ProducerCell(int max_velocity, float x, float y,
int seed) {
IndexT idx = new_Cell(max_velocity, x, y);
dev_cells[idx]->type = kCellTypeProducer;
curand_init(seed, 0, 0, &dev_cells[idx]->random_state);
return idx;
}
__global__ void kernel_traffic_light_step() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
if (d_traffic_lights[i]->get_num_cells() > 0) {
int timer = d_traffic_lights[i]->get_timer();
int phase_time = d_traffic_lights[i]->get_phase_time();
d_traffic_lights[i]->set_timer((timer + 1) % phase_time);
if (d_traffic_lights[i]->get_timer() == 0) {
int phase = d_traffic_lights[i]->get_phase();
assert(d_traffic_lights[i]->get_cell(phase) != nullptr);
phase = d_traffic_lights[i]->get_phase();
CellBase *ptr = d_traffic_lights[i]->get_cell(phase);
ptr->set_current_max_velocity(0);
int phase_2 = d_traffic_lights[i]->get_phase();
int num_cells = d_traffic_lights[i]->get_num_cells();
d_traffic_lights[i]->set_phase((phase_2 + 1) % num_cells);
phase_2 = d_traffic_lights[i]->get_phase();
ptr = d_traffic_lights[i]->get_cell(phase_2);
ptr->remove_speed_limit();
}
}
// d_traffic_lights[i]->step();
}
}
__global__ void kernel_create_nodes() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
curandState_t state;
curand_init(i, 0, 0, &state);
assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1);
assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j] = new_Cell(
/*max_velocity=*/curand(&state) % (kMaxVelocity / 2) +
kMaxVelocity / 2,
d_nodes[i].x, d_nodes[i].y);
}
}
}
__device__ IndexT connect_intersections(IndexT from, Node *target,
int incoming_idx,
curandState_t &state) {
// Create edge.
float dx = target->x - dev_cells[from]->x;
float dy = target->y - dev_cells[from]->y;
float dist = sqrt(dx * dx + dy * dy);
int steps = dist / kCellLength;
float step_x = dx / steps;
float step_y = dy / steps;
IndexT prev = from;
for (int j = 0; j < steps; ++j) {
float new_x = dev_cells[from]->x + j * step_x;
float new_y = dev_cells[from]->y + j * step_y;
assert(new_x >= 0 && new_x <= 1);
assert(new_y >= 0 && new_y <= 1);
IndexT next;
if (curand_uniform(&state) < kProducerRatio) {
next = new_ProducerCell(dev_cells[prev]->max_velocity, new_x, new_y,
curand(&state));
} else {
next = new_Cell(dev_cells[prev]->max_velocity, new_x, new_y);
}
if (curand_uniform(&state) < kTargetRatio) {
dev_cells[next]->set_target();
}
dev_cells[prev]->set_num_outgoing(1);
dev_cells[prev]->set_outgoing(0, dev_cells[next]);
dev_cells[next]->set_num_incoming(1);
dev_cells[next]->set_incoming(0, dev_cells[prev]);
prev = next;
}
// Connect to all outgoing nodes of target.
dev_cells[prev]->set_num_outgoing(target->num_outgoing);
for (int i = 0; i < target->num_outgoing; ++i) {
IndexT next = target->cell_out[i];
// num_incoming set later.
dev_cells[prev]->set_outgoing(i, dev_cells[next]);
dev_cells[next]->set_incoming(incoming_idx, dev_cells[prev]);
}
return prev;
}
__global__ void kernel_create_edges() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
curandState_t state;
curand_init(i, 0, 0, &state);
for (int k = 0; k < d_nodes[i].num_outgoing; ++k) {
int target = d_nodes[i].node_out[k];
int target_pos = d_nodes[i].node_out_pos[k];
IndexT last = connect_intersections(d_nodes[i].cell_out[k],
&d_nodes[target], target_pos, state);
dev_cells[last]->set_current_max_velocity(0);
d_nodes[target].cell_in[target_pos] = last;
}
}
}
__global__ void kernel_create_traffic_lights() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections;
i += blockDim.x * gridDim.x) {
new (d_traffic_lights[i]) TrafficLight(
/*num_cells=*/d_nodes[i].num_incoming,
/*phase_time=*/5);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
dev_cells[d_nodes[i].cell_out[j]]->set_num_incoming(
d_nodes[i].num_incoming);
}
for (int j = 0; j < d_nodes[i].num_incoming; ++j) {
d_traffic_lights[i]->set_cell(j, dev_cells[d_nodes[i].cell_in[j]]);
dev_cells[d_nodes[i].cell_in[j]]->set_current_max_velocity(
0); // Set to "red".
}
}
}
template <class Type, class TypeBase>
__global__ void device_alloc(TypeBase **ptr, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
ptr[i] = new Type();
assert(ptr[i] != nullptr);
}
}
void create_street_network() {
int zero = 0;
cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_nodes, sizeof(Node) * kNumIntersections);
cudaMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&d_traffic_lights, sizeof(TrafficLight *) * kNumIntersections);
device_alloc<TrafficLight, TrafficLightBase>
<<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>(d_traffic_lights, kNumIntersections);
gpuErrchk(cudaDeviceSynchronize());
// Create basic structure on host.
create_network_structure();
kernel_create_nodes<<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_create_edges<<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_create_traffic_lights<<<(kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
// Allocate helper data structures for rendering.
cudaMemcpyFromSymbol(&host_num_cells, d_num_cells, sizeof(int), 0,
cudaMemcpyDeviceToHost);
cudaMalloc(&host_Cell_pos_x, sizeof(float) * host_num_cells);
cudaMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&host_Cell_pos_y, sizeof(float) * host_num_cells);
cudaMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&host_Cell_occupied, sizeof(bool) * host_num_cells);
cudaMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool *), 0,
cudaMemcpyHostToDevice);
host_data_Cell_pos_x = (float *)malloc(sizeof(float) * host_num_cells);
host_data_Cell_pos_y = (float *)malloc(sizeof(float) * host_num_cells);
host_data_Cell_occupied = (bool *)malloc(sizeof(bool) * host_num_cells);
#ifndef NDEBUG
printf("Number of cells: %i\n", host_num_cells);
#endif // NDEBUG
}
void step_traffic_lights() {
// TODO: Consider migrating this to SoaAlloc.
kernel_traffic_light_step<<<(kNumIntersections + kNumBlockSize - 1) /
kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
__device__ void Cell_add_to_rendering_array(IndexT self) {
int idx = atomicAdd(&dev_num_cells, 1);
dev_Cell_pos_x[idx] = dev_cells[self]->x;
dev_Cell_pos_y[idx] = dev_cells[self]->y;
dev_Cell_occupied[idx] = !dev_cells[self]->is_free();
}
__global__ void kernel_Cell_add_to_rendering_array() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells;
i += blockDim.x * gridDim.x) {
Cell_add_to_rendering_array(i);
}
}
void transfer_data() {
int zero = 0;
cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
kernel_Cell_add_to_rendering_array<<<
(host_num_cells + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpy(host_data_Cell_pos_x, host_Cell_pos_x,
sizeof(float) * host_num_cells, cudaMemcpyDeviceToHost);
cudaMemcpy(host_data_Cell_pos_y, host_Cell_pos_y,
sizeof(float) * host_num_cells, cudaMemcpyDeviceToHost);
cudaMemcpy(host_data_Cell_occupied, host_Cell_occupied,
sizeof(bool) * host_num_cells, cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void kernel_ProducerCell_create_car() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells;
i += blockDim.x * gridDim.x) {
if (dev_cells[i]->type == kCellTypeProducer) {
ProducerCell_create_car(i);
}
}
}
__device__ void Car_step_prepare_path(IndexT self) {
dev_cars[self]->step_initialize_iteration();
dev_cars[self]->step_accelerate();
Car_step_extend_path(self);
Car_step_constraint_velocity(self);
Car_step_slow_down(self);
}
__global__ void kernel_Car_step_prepare_path() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
Car_step_prepare_path(i);
}
}
}
__global__ void kernel_fill_car_indices() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
d_Car_active[i] = 0;
d_Car_active_2[i] = 0;
}
}
__global__ void kernel_Car_step_move() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
Car_step_move(i);
}
}
}
__device__ int d_checksum;
__global__ void kernel_compute_checksum() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
atomicAdd(&d_checksum, 1);
}
}
}
int checksum() {
int zero = 0;
cudaMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0, cudaMemcpyHostToDevice);
kernel_compute_checksum<<<128, 128>>>();
int result;
cudaMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0,
cudaMemcpyDeviceToHost);
return result;
}
void step() {
kernel_ProducerCell_create_car<<<
(host_num_cells + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0,
cudaMemcpyDeviceToHost);
step_traffic_lights();
kernel_Car_step_prepare_path<<<
(host_num_cars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Car_step_move<<<(host_num_cars + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void allocate_memory() {
cudaMalloc(&dev_cells, sizeof(Cell *) * kMaxNumCells);
device_alloc<Cell, CellBase>
<<<(kMaxNumCells + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(
dev_cells, kMaxNumCells);
gpuErrchk(cudaDeviceSynchronize());
cudaMalloc(&dev_cars, sizeof(Car *) * kMaxNumCars);
cudaMalloc(&dev_cars_2, sizeof(Car *) * kMaxNumCars);
device_alloc<Car, CarBase>
<<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(
dev_cars, kMaxNumCars);
gpuErrchk(cudaDeviceSynchronize());
device_alloc<Car>
<<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(
dev_cars_2, kMaxNumCars);
gpuErrchk(cudaDeviceSynchronize());
cudaMalloc(&h_Car_active, sizeof(int) * kMaxNumCars);
cudaMemcpyToSymbol(d_Car_active, &h_Car_active, sizeof(int *), 0,
cudaMemcpyHostToDevice);
// Car *h_cars_2;
// cudaMalloc(&h_cars_2, sizeof(Car) * kMaxNumCars);
// cudaMemcpyToSymbol(dev_cars_2, &h_cars_2, sizeof(Car *), 0,
// cudaMemcpyHostToDevice);
cudaMalloc(&h_Car_active_2, sizeof(int) * kMaxNumCars);
cudaMemcpyToSymbol(d_Car_active_2, &h_Car_active_2, sizeof(int *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_prefix_sum_temp, 3 * sizeof(int) * kMaxNumCars);
cudaMemcpyToSymbol(d_prefix_sum_temp, &h_prefix_sum_temp, sizeof(int *), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_prefix_sum_output, sizeof(int) * kMaxNumCars);
cudaMemcpyToSymbol(d_prefix_sum_output, &h_prefix_sum_output, sizeof(int *),
0, cudaMemcpyHostToDevice);
kernel_fill_car_indices<<<128, 128>>>();
gpuErrchk(cudaDeviceSynchronize());
int zero = 0;
cudaMemcpyToSymbol(d_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_num_cars, &zero, sizeof(int), 0, cudaMemcpyHostToDevice);
}
__global__ void kernel_compact_initialize() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kMaxNumCars;
i += blockDim.x * gridDim.x) {
d_Car_active_2[i] = 0;
}
}
__global__ void kernel_compact_cars() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars;
i += blockDim.x * gridDim.x) {
if (d_Car_active[i]) {
int target = d_prefix_sum_output[i];
// Copy i --> target.
// dev_cars_2[target] = dev_cars[i];
memcpy(dev_cars_2[target], dev_cars[i], sizeof(Car));
d_Car_active_2[target] = 1;
// Update pointer in Cell.
dev_cars[i]->position->car = dev_cars[target];
atomicAdd(&d_num_cars_2, 1);
}
}
}
__global__ void kernel_compact_swap_pointers() {
{
auto *tmp = dev_cars;
dev_cars = dev_cars_2;
dev_cars_2 = tmp;
}
{
auto *tmp = d_Car_active;
d_Car_active = d_Car_active_2;
d_Car_active_2 = tmp;
}
d_num_cars = d_num_cars_2;
}
void compact_car_array() {
int zero = 0;
cudaMemcpyToSymbol(d_num_cars_2, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
cudaMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0,
cudaMemcpyDeviceToHost);
// TODO: Prefix sum broken for num_objects < 256.
auto prefix_sum_size = host_num_cars < 256 ? 256 : host_num_cars;
size_t temp_size = 3 * kMaxNumCars;
cub::DeviceScan::ExclusiveSum(h_prefix_sum_temp, temp_size, h_Car_active,
h_prefix_sum_output, prefix_sum_size);
gpuErrchk(cudaDeviceSynchronize());
kernel_compact_initialize<<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_compact_cars<<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_compact_swap_pointers<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
auto *tmp = h_Car_active;
h_Car_active = h_Car_active_2;
h_Car_active_2 = tmp;
}
int main(int /*argc*/, char ** /*argv*/) {
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
high_resolution_clock::time_point t1 = high_resolution_clock::now();
allocate_memory();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double> alloc_time = duration_cast<duration<double>>(t2 - t1);
printf("alloc_time : %f\n",alloc_time.count());
printf("mem alloc done\n");
create_street_network();
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
step();
compact_car_array();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto millis =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
#endif // NDEBUG
printf("%lu\n", millis);
} |
3259d43520082e11615c42be3fdc6c42db1189ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "layer.hpp"
#include "math_functions.hpp"
#include "channel_scale_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_scale(const int num, const int channels, const int spatial_dim,
Dtype alpha, const Dtype* data, const Dtype* norm_data,
Dtype beta, Dtype* output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
output_data[index] = alpha * data[index] * norm_data[n * spatial_dim + s] + beta * output_data[index];
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim,
const Dtype* data, Dtype* sum_data) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
sum_data[index] = sum;
}
}
template <typename Dtype>
void ChannelScaleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* scale_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (do_forward_) {
if (global_scale_) {
int count = bottom[0]->count();
Dtype* scale = this->blobs_[0]->mutable_cpu_data();
Dtype mean_norm = bottom[1]->asum_data() / (Dtype)bottom[1]->count();
if (this->phase_ == TRAIN) {
if (scale[0] < 0) {
scale[0] = mean_norm;
}
else {
scale[0] = scale[0] * 0.99 + mean_norm * 0.01;
}
scale[0] = ::min(scale[0], max_global_scale_);
scale[0] = ::max(scale[0], min_global_scale_);
}
if (top.size() == 2) {
top[1]->mutable_cpu_data()[0] = scale[0];
}
caffe_gpu_scale(count, scale[0], bottom_data, top_data);
}
else {
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), bottom_data, scale_data, Dtype(0), top_data);
}
}
else {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ChannelScaleLayer);
} // namespace caffe | 3259d43520082e11615c42be3fdc6c42db1189ab.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "layer.hpp"
#include "math_functions.hpp"
#include "channel_scale_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_scale(const int num, const int channels, const int spatial_dim,
Dtype alpha, const Dtype* data, const Dtype* norm_data,
Dtype beta, Dtype* output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
output_data[index] = alpha * data[index] * norm_data[n * spatial_dim + s] + beta * output_data[index];
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim,
const Dtype* data, Dtype* sum_data) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
sum_data[index] = sum;
}
}
template <typename Dtype>
void ChannelScaleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* scale_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (do_forward_) {
if (global_scale_) {
int count = bottom[0]->count();
Dtype* scale = this->blobs_[0]->mutable_cpu_data();
Dtype mean_norm = bottom[1]->asum_data() / (Dtype)bottom[1]->count();
if (this->phase_ == TRAIN) {
if (scale[0] < 0) {
scale[0] = mean_norm;
}
else {
scale[0] = scale[0] * 0.99 + mean_norm * 0.01;
}
scale[0] = std::min(scale[0], max_global_scale_);
scale[0] = std::max(scale[0], min_global_scale_);
}
if (top.size() == 2) {
top[1]->mutable_cpu_data()[0] = scale[0];
}
caffe_gpu_scale(count, scale[0], bottom_data, top_data);
}
else {
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), bottom_data, scale_data, Dtype(0), top_data);
}
}
else {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ChannelScaleLayer);
} // namespace caffe |
c3535a41702b742a0ad023f71e91539cbb11b95c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:NOT_ALL_VERIFIED
//--blockDim=1024 --gridDim=1 --no-inline
//error: possible null pointer access
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foor(float *v, unsigned int size, unsigned int i)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
funcType f;
if (i == 1)
f = multiplyByTwo;
else if (i == 2)
f = divideByTwo;
else
f = NULL;
if (tid < size)
{
float x = (*f)(v, tid);
x += multiplyByTwo(v, tid);
}
}
| c3535a41702b742a0ad023f71e91539cbb11b95c.cu | //xfail:NOT_ALL_VERIFIED
//--blockDim=1024 --gridDim=1 --no-inline
//error: possible null pointer access
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foor(float *v, unsigned int size, unsigned int i)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
funcType f;
if (i == 1)
f = multiplyByTwo;
else if (i == 2)
f = divideByTwo;
else
f = NULL;
if (tid < size)
{
float x = (*f)(v, tid);
x += multiplyByTwo(v, tid);
}
}
|
87653125e941921e3510ab18b81cfca2d5a4f9a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 87653125e941921e3510ab18b81cfca2d5a4f9a2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sum<<<gridBlock,threadBlock>>>(a,b,c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sum<<<gridBlock,threadBlock>>>(a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sum<<<gridBlock,threadBlock>>>(a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bd7d6c0a10bd4c34c70fbb32fe45f73eee19df4f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matmulKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat1 = NULL;
hipMalloc(&mat1, XSIZE*YSIZE);
float *mat2 = NULL;
hipMalloc(&mat2, XSIZE*YSIZE);
float *matP = NULL;
hipMalloc(&matP, XSIZE*YSIZE);
int dim = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matmulKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,matP,dim);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matmulKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,matP,dim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matmulKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,matP,dim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bd7d6c0a10bd4c34c70fbb32fe45f73eee19df4f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matmulKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat1 = NULL;
cudaMalloc(&mat1, XSIZE*YSIZE);
float *mat2 = NULL;
cudaMalloc(&mat2, XSIZE*YSIZE);
float *matP = NULL;
cudaMalloc(&matP, XSIZE*YSIZE);
int dim = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matmulKernel<<<gridBlock,threadBlock>>>(mat1,mat2,matP,dim);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matmulKernel<<<gridBlock,threadBlock>>>(mat1,mat2,matP,dim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matmulKernel<<<gridBlock,threadBlock>>>(mat1,mat2,matP,dim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fb69ed9f2ed73873bd946d3d8b0776d8119938c0.hip | // !!! This is a file automatically generated by hipify!!!
#define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <needle.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
// includes, kernels
#include <needle_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
int *matrix_cuda, *matrix_cuda_out, *referrence_cuda;
int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
if (argc == 3)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
}
else{
usage(argc, argv);
}
if(atoi(argv[1])%16!=0){
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
output_itemsets[i*max_cols+j]=0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_cols; i++){
for (int j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( int j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
size = max_cols * max_rows;
hipMalloc((void**)& referrence_cuda, sizeof(int)*size);
hipMalloc((void**)& matrix_cuda, sizeof(int)*size);
hipMalloc((void**)& matrix_cuda_out, sizeof(int)*size);
hipMemcpy(referrence_cuda, referrence, sizeof(int) * size, hipMemcpyHostToDevice);
hipMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, hipMemcpyHostToDevice);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
hipLaunchKernelGGL(( needle_cuda_shared_1), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda, matrix_cuda_out
,max_cols, penalty, i, block_width);
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
hipLaunchKernelGGL(( needle_cuda_shared_2), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda, matrix_cuda_out
,max_cols, penalty, i, block_width);
}
hipMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, hipMemcpyDeviceToHost);
//#define TRACEBACK
#ifdef TRACEBACK
FILE *fpo = fopen("result.txt","w");
fprintf(fpo, "print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
//traceback = maximum(nw, w, n);
int new_nw, new_w, new_n;
new_nw = nw + referrence[i * max_cols + j];
new_w = w - penalty;
new_n = n - penalty;
traceback = maximum(new_nw, new_w, new_n);
if(traceback == new_nw)
traceback = nw;
if(traceback == new_w)
traceback = w;
if(traceback == new_n)
traceback = n;
fprintf(fpo, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
fclose(fpo);
#endif
hipFree(referrence_cuda);
hipFree(matrix_cuda);
hipFree(matrix_cuda_out);
free(referrence);
free(input_itemsets);
free(output_itemsets);
}
| fb69ed9f2ed73873bd946d3d8b0776d8119938c0.cu | #define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <needle.h>
#include <cuda.h>
#include <sys/time.h>
// includes, kernels
#include <needle_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
int *matrix_cuda, *matrix_cuda_out, *referrence_cuda;
int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
if (argc == 3)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
}
else{
usage(argc, argv);
}
if(atoi(argv[1])%16!=0){
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
output_itemsets[i*max_cols+j]=0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_cols; i++){
for (int j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( int j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
size = max_cols * max_rows;
cudaMalloc((void**)& referrence_cuda, sizeof(int)*size);
cudaMalloc((void**)& matrix_cuda, sizeof(int)*size);
cudaMalloc((void**)& matrix_cuda_out, sizeof(int)*size);
cudaMemcpy(referrence_cuda, referrence, sizeof(int) * size, cudaMemcpyHostToDevice);
cudaMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, cudaMemcpyHostToDevice);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_1<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda, matrix_cuda_out
,max_cols, penalty, i, block_width);
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_2<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda, matrix_cuda_out
,max_cols, penalty, i, block_width);
}
cudaMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, cudaMemcpyDeviceToHost);
//#define TRACEBACK
#ifdef TRACEBACK
FILE *fpo = fopen("result.txt","w");
fprintf(fpo, "print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
//traceback = maximum(nw, w, n);
int new_nw, new_w, new_n;
new_nw = nw + referrence[i * max_cols + j];
new_w = w - penalty;
new_n = n - penalty;
traceback = maximum(new_nw, new_w, new_n);
if(traceback == new_nw)
traceback = nw;
if(traceback == new_w)
traceback = w;
if(traceback == new_n)
traceback = n;
fprintf(fpo, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
fclose(fpo);
#endif
cudaFree(referrence_cuda);
cudaFree(matrix_cuda);
cudaFree(matrix_cuda_out);
free(referrence);
free(input_itemsets);
free(output_itemsets);
}
|
a20d1f867d7829cfc7f2501d068ce9aef496afba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "fhn_mod.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using modified FHN 1961 GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes) {
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; //u dimensionless
*((real * )((char *) sv + pitch * 1) + threadID) = 0.0f; //v dimensionless
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id);
for(int i = 0; i < NEQ; i++) {
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, int threadID_) {
//State variables
const real u = *((real*)((char*)sv_ + pitch * 0) + threadID_);
const real v = *((real*)((char*)sv_ + pitch * 1) + threadID_);
const real a = 0.2f;
const real b = 0.5f;
const real k = 36.0;
const real epsilon = 0.000150;
rDY_[0] = k*(u*(1.0f - u)*(u - a) - u*v) + stim_current;
rDY_[1] = k*epsilon*(b*u - v);
}
| a20d1f867d7829cfc7f2501d068ce9aef496afba.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "fhn_mod.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using modified FHN 1961 GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes) {
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; //u dimensionless
*((real * )((char *) sv + pitch * 1) + threadID) = 0.0f; //v dimensionless
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id);
for(int i = 0; i < NEQ; i++) {
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, int threadID_) {
//State variables
const real u = *((real*)((char*)sv_ + pitch * 0) + threadID_);
const real v = *((real*)((char*)sv_ + pitch * 1) + threadID_);
const real a = 0.2f;
const real b = 0.5f;
const real k = 36.0;
const real epsilon = 0.000150;
rDY_[0] = k*(u*(1.0f - u)*(u - a) - u*v) + stim_current;
rDY_[1] = k*epsilon*(b*u - v);
}
|
7115336a6462755c4a24710c5113aa77fcd9151d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <algorithm>
#include <chrono>
#include <hip/hip_runtime.h>
using namespace std;
__global__
void saxpy(size_t n, float alpha, float *a, float *b)
{
}
int main(int argc, char **argv)
{
size_t N = 1024;
float *a, *b, *res, *da, *db;
float alpha = 2.0f;
// Allocate and initialize vectors a and b on the CPU
a = (float *) malloc(N * sizeof(float));
b = (float *) malloc(N * sizeof(float));
res = (float *) malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
a[i] = i;
b[i] = 1.0f;
}
// Allocate device vectors da and db, then copy a and b into them
// A FAIRE
// Launch the CUDA kernel for saxpy
// A FAIRE
// Copy results back to the CPU
// A FAIRE
// Verify results
{
int i;
for (i = 0; i < N; i++) {
float temp = alpha * a[i] + b[i];
if (std::abs(res[i] - temp) / ::max(1e-6f, temp) > 1e-6) {
cout << res[i] << " " << temp << endl;
break;
}
}
if (i == N) {
cout << "saxpy on GPU is correct." << endl;
} else {
cout << "saxpy on GPU is incorrect on element " << i << "." << endl;
}
}
return 0;
}
| 7115336a6462755c4a24710c5113aa77fcd9151d.cu | #include <iostream>
#include <algorithm>
#include <chrono>
#include <cuda.h>
using namespace std;
__global__
void saxpy(size_t n, float alpha, float *a, float *b)
{
}
int main(int argc, char **argv)
{
size_t N = 1024;
float *a, *b, *res, *da, *db;
float alpha = 2.0f;
// Allocate and initialize vectors a and b on the CPU
a = (float *) malloc(N * sizeof(float));
b = (float *) malloc(N * sizeof(float));
res = (float *) malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
a[i] = i;
b[i] = 1.0f;
}
// Allocate device vectors da and db, then copy a and b into them
// A FAIRE
// Launch the CUDA kernel for saxpy
// A FAIRE
// Copy results back to the CPU
// A FAIRE
// Verify results
{
int i;
for (i = 0; i < N; i++) {
float temp = alpha * a[i] + b[i];
if (std::abs(res[i] - temp) / std::max(1e-6f, temp) > 1e-6) {
cout << res[i] << " " << temp << endl;
break;
}
}
if (i == N) {
cout << "saxpy on GPU is correct." << endl;
} else {
cout << "saxpy on GPU is incorrect on element " << i << "." << endl;
}
}
return 0;
}
|
cd704ad027be97470c1301e1eb768e3de95be37f.hip | // !!! This is a file automatically generated by hipify!!!
#define CHECK(call) \
{ \
const hipError_t error = call; \
if( error != hipSuccess ) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
} | cd704ad027be97470c1301e1eb768e3de95be37f.cu | #define CHECK(call) \
{ \
const cudaError_t error = call; \
if( error != cudaSuccess ) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} |
97a6d9877851dd656ee61555b3344d016daf5a5f.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_reduction_cuda
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
// The EIGEN_CUDACC_VER macro is provided by
// unsupported/Eigen/CXX11/Tensor included above
#if defined EIGEN_CUDACC_VER && EIGEN_CUDACC_VER >= 70500
#include <hip/hip_fp16.h>
#endif
template<typename Type, int DataLayout>
static void test_full_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<Type, 2, DataLayout> in(num_rows, num_cols);
in.setRandom();
Tensor<Type, 0, DataLayout> full_redux;
full_redux = in.sum();
std::size_t in_bytes = in.size() * sizeof(Type);
std::size_t out_bytes = full_redux.size() * sizeof(Type);
Type* gpu_in_ptr = static_cast<Type*>(gpu_device.allocate(in_bytes));
Type* gpu_out_ptr = static_cast<Type*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<Type, 2, DataLayout> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<Type, 0, DataLayout> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.sum();
Tensor<Type, 0, DataLayout> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
template<typename Type, int DataLayout>
static void test_first_dim_reductions() {
int dim_x = 33;
int dim_y = 1;
int dim_z = 128;
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
in.setRandom();
Eigen::array<int, 1> red_axis;
red_axis[0] = 0;
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data(T)
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
Type* out_data = (Type*)dev.allocate(dim_z*dim_y*sizeof(Type));
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_y, dim_z);
// Perform operation
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
gpu_out.device(dev) = gpu_in.sum(red_axis);
gpu_out.device(dev) += gpu_in.sum(red_axis);
Tensor<Type, 2, DataLayout> redux_gpu(dim_y, dim_z);
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
dev.synchronize();
// Check that the CPU and GPU reductions return the same result.
for (int i = 0; i < gpu_out.size(); ++i) {
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
}
dev.deallocate(in_data);
dev.deallocate(out_data);
}
template<typename Type, int DataLayout>
static void test_last_dim_reductions() {
int dim_x = 128;
int dim_y = 1;
int dim_z = 33;
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
in.setRandom();
Eigen::array<int, 1> red_axis;
red_axis[0] = 2;
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
Type* out_data = (Type*)dev.allocate(dim_x*dim_y*sizeof(Type));
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_x, dim_y);
// Perform operation
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
gpu_out.device(dev) = gpu_in.sum(red_axis);
gpu_out.device(dev) += gpu_in.sum(red_axis);
Tensor<Type, 2, DataLayout> redux_gpu(dim_x, dim_y);
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
dev.synchronize();
// Check that the CPU and GPU reductions return the same result.
for (int i = 0; i < gpu_out.size(); ++i) {
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
}
dev.deallocate(in_data);
dev.deallocate(out_data);
}
void test_cxx11_tensor_reduction_cuda() {
CALL_SUBTEST_1((test_full_reductions<float, ColMajor>()));
CALL_SUBTEST_1((test_full_reductions<double, ColMajor>()));
CALL_SUBTEST_2((test_full_reductions<float, RowMajor>()));
CALL_SUBTEST_2((test_full_reductions<double, RowMajor>()));
CALL_SUBTEST_3((test_first_dim_reductions<float, ColMajor>()));
CALL_SUBTEST_3((test_first_dim_reductions<double, ColMajor>()));
CALL_SUBTEST_4((test_first_dim_reductions<float, RowMajor>()));
// Outer reductions of doubles aren't supported just yet.
// CALL_SUBTEST_4((test_first_dim_reductions<double, RowMajor>()))
CALL_SUBTEST_5((test_last_dim_reductions<float, ColMajor>()));
// Outer reductions of doubles aren't supported just yet.
// CALL_SUBTEST_5((test_last_dim_reductions<double, ColMajor>()));
CALL_SUBTEST_6((test_last_dim_reductions<float, RowMajor>()));
CALL_SUBTEST_6((test_last_dim_reductions<double, RowMajor>()));
}
| 97a6d9877851dd656ee61555b3344d016daf5a5f.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_reduction_cuda
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
// The EIGEN_CUDACC_VER macro is provided by
// unsupported/Eigen/CXX11/Tensor included above
#if defined EIGEN_CUDACC_VER && EIGEN_CUDACC_VER >= 70500
#include <cuda_fp16.h>
#endif
template<typename Type, int DataLayout>
static void test_full_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<Type, 2, DataLayout> in(num_rows, num_cols);
in.setRandom();
Tensor<Type, 0, DataLayout> full_redux;
full_redux = in.sum();
std::size_t in_bytes = in.size() * sizeof(Type);
std::size_t out_bytes = full_redux.size() * sizeof(Type);
Type* gpu_in_ptr = static_cast<Type*>(gpu_device.allocate(in_bytes));
Type* gpu_out_ptr = static_cast<Type*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<Type, 2, DataLayout> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<Type, 0, DataLayout> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.sum();
Tensor<Type, 0, DataLayout> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
template<typename Type, int DataLayout>
static void test_first_dim_reductions() {
int dim_x = 33;
int dim_y = 1;
int dim_z = 128;
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
in.setRandom();
Eigen::array<int, 1> red_axis;
red_axis[0] = 0;
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data(T)
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
Type* out_data = (Type*)dev.allocate(dim_z*dim_y*sizeof(Type));
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_y, dim_z);
// Perform operation
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
gpu_out.device(dev) = gpu_in.sum(red_axis);
gpu_out.device(dev) += gpu_in.sum(red_axis);
Tensor<Type, 2, DataLayout> redux_gpu(dim_y, dim_z);
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
dev.synchronize();
// Check that the CPU and GPU reductions return the same result.
for (int i = 0; i < gpu_out.size(); ++i) {
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
}
dev.deallocate(in_data);
dev.deallocate(out_data);
}
template<typename Type, int DataLayout>
static void test_last_dim_reductions() {
int dim_x = 128;
int dim_y = 1;
int dim_z = 33;
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
in.setRandom();
Eigen::array<int, 1> red_axis;
red_axis[0] = 2;
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
Type* out_data = (Type*)dev.allocate(dim_x*dim_y*sizeof(Type));
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_x, dim_y);
// Perform operation
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
gpu_out.device(dev) = gpu_in.sum(red_axis);
gpu_out.device(dev) += gpu_in.sum(red_axis);
Tensor<Type, 2, DataLayout> redux_gpu(dim_x, dim_y);
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
dev.synchronize();
// Check that the CPU and GPU reductions return the same result.
for (int i = 0; i < gpu_out.size(); ++i) {
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
}
dev.deallocate(in_data);
dev.deallocate(out_data);
}
void test_cxx11_tensor_reduction_cuda() {
CALL_SUBTEST_1((test_full_reductions<float, ColMajor>()));
CALL_SUBTEST_1((test_full_reductions<double, ColMajor>()));
CALL_SUBTEST_2((test_full_reductions<float, RowMajor>()));
CALL_SUBTEST_2((test_full_reductions<double, RowMajor>()));
CALL_SUBTEST_3((test_first_dim_reductions<float, ColMajor>()));
CALL_SUBTEST_3((test_first_dim_reductions<double, ColMajor>()));
CALL_SUBTEST_4((test_first_dim_reductions<float, RowMajor>()));
// Outer reductions of doubles aren't supported just yet.
// CALL_SUBTEST_4((test_first_dim_reductions<double, RowMajor>()))
CALL_SUBTEST_5((test_last_dim_reductions<float, ColMajor>()));
// Outer reductions of doubles aren't supported just yet.
// CALL_SUBTEST_5((test_last_dim_reductions<double, ColMajor>()));
CALL_SUBTEST_6((test_last_dim_reductions<float, RowMajor>()));
CALL_SUBTEST_6((test_last_dim_reductions<double, RowMajor>()));
}
|
79c0833deb1601574eb97c33ab12d1e2112be1e5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
*/
#include <cstdio>
#include <hip/hip_runtime.h>
#include "blur_device.cuh"
__global__
void cudaBlurKernel(const float *raw_data, const float *blur_v, float *out_data,
const unsigned int n_frames, const unsigned int blur_v_size) {
// TODO: Fill in the implementation for the GPU-accelerated convolution.
//
// It may be helpful to use the information in the lecture slides, as well
// as the CPU implementation, as a reference.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
//printf("vsize: %d\n", &blur_v_size);
while (thread_index < n_frames) {
if (thread_index < blur_v_size) {
for (unsigned int i = 0; i <= thread_index; ++i) {
out_data[thread_index] += raw_data[thread_index - i] * blur_v[i];
}
} else {
for (unsigned int j = 0; j < blur_v_size; ++j) {
out_data[thread_index] += raw_data[thread_index - j] * blur_v[j];
}
}
thread_index += blockDim.x * gridDim.x;
//printf("b: %d\n", &blockIdx.x);
//printf("g: %d\n", &gridDim.x);
}
}
void cudaCallBlurKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// Call the kernel above this function.
hipLaunchKernelGGL(( cudaBlurKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, raw_data, blur_v, out_data, n_frames, blur_v_size);
}
| 79c0833deb1601574eb97c33ab12d1e2112be1e5.cu | /*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
*/
#include <cstdio>
#include <cuda_runtime.h>
#include "blur_device.cuh"
__global__
void cudaBlurKernel(const float *raw_data, const float *blur_v, float *out_data,
const unsigned int n_frames, const unsigned int blur_v_size) {
// TODO: Fill in the implementation for the GPU-accelerated convolution.
//
// It may be helpful to use the information in the lecture slides, as well
// as the CPU implementation, as a reference.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
//printf("vsize: %d\n", &blur_v_size);
while (thread_index < n_frames) {
if (thread_index < blur_v_size) {
for (unsigned int i = 0; i <= thread_index; ++i) {
out_data[thread_index] += raw_data[thread_index - i] * blur_v[i];
}
} else {
for (unsigned int j = 0; j < blur_v_size; ++j) {
out_data[thread_index] += raw_data[thread_index - j] * blur_v[j];
}
}
thread_index += blockDim.x * gridDim.x;
//printf("b: %d\n", &blockIdx.x);
//printf("g: %d\n", &gridDim.x);
}
}
void cudaCallBlurKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// Call the kernel above this function.
cudaBlurKernel<<<blocks, threadsPerBlock>>>(raw_data, blur_v, out_data, n_frames, blur_v_size);
}
|
363d0f9df5c0499f39869e859b69483a3ce44b24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void spmvCSRKernel(float *out, int *matCols, int *matRows,
float *matData, float *vec, int dim) {
//@@ insert spmv kernel for csr format
unsigned int iRow = blockIdx.x * blockDim.x + threadIdx.x;
if (iRow<dim){ //stay w/in matrix bounds
int iFlat=matRows[iRow];
float sum=0.0;
for (int iCol=matRows[iRow]; iCol<matRows[iRow+1]; iCol++){ //# elts in row
//every thread touches its own
sum += matData[iFlat]*vec[matCols[iFlat]];
iFlat++;
}
out[iRow] = sum;
} //iRow<dim
}
const unsigned int BLOCK_SIZE = 32;
__global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows, float *matData,
float *vec, int dim) {
//@@ insert spmv kernel for jds format
unsigned int iRow = blockIdx.x * blockDim.x + threadIdx.x;
if (iRow<dim){ //stay w/in matrix bounds
int rowInd = matRowPerm[iRow];
float sum = 0.0;
for (int iCol=0; iCol<matRows[iRow]; iCol++){ //# elts in row
int iFlat = matColStart[iCol]+iRow;
int colInd = matCols[iFlat];
sum += matData[iFlat]*vec[colInd];
}
out[rowInd] = sum;
}
}
static void spmvCSR(float *out, int *matCols, int *matRows, float *matData,
float *vec, int dim) {
//@@ invoke spmv kernel for csr format
dim3 gridDim((dim-1)/BLOCK_SIZE + 1, 1, 1);
dim3 blockDim(BLOCK_SIZE, 1, 1);
// Invoke CUDA kernel -----------------------------------------------------
hipLaunchKernelGGL(( spmvCSRKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, out,matCols,matRows,matData,vec,dim);
}
static void spmvJDS(float *out, int *matColStart, int *matCols, int *matRowPerm,
int *matRows, float *matData, float *vec, int dim) {
//@@ invoke spmv kernel for jds format
dim3 gridDim((dim-1)/BLOCK_SIZE + 1, 1, 1);
dim3 blockDim(BLOCK_SIZE, 1, 1);
// Invoke CUDA kernel -----------------------------------------------------
hipLaunchKernelGGL(( spmvJDSKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, out,matColStart,matCols,matRowPerm,matRows,matData,vec,dim);
}
void cpu_csr(float *out, int *matCols, int *matRows, float *matData,
float *vec, int dim) {
//testing on cpu first
//from lectures:
//data[nNonZeros], columnInds[nNonZeros], rowPointers[nRows+1]
//from class forum (piazza):
//The matrix is square and dim is the number of rows and columns in the matrix and number or elements in the vectors.
//matRows is the same as jdsRowsNNZ
int iRow, iCol;
int iFlat=0;
for (iRow=0; iRow<dim; iRow++){
//
for (iCol=matRows[iRow]; iCol<matRows[iRow+1]; iCol++){ //# elts in row
out[iRow] += matData[iFlat]*vec[matCols[iFlat]];
iFlat++;
}
}
}
void cpu_jds(float *out, int *matColStart, int *matCols, int *matRowPerm,
int *matRows, float *matData, float *vec, int dim){
//testing on cpu first
//from class forum:
/*
matColStart~jdsColStartIdx
MatCols~jdsColIdx
matRowPerm~jdsRowPerm
matData~jdsData
matRows~jdsRowsNNZ
*/
int iRow, iCol, rowInd, colInd;
int iFlat=0;
for (iRow=0; iRow<dim; iRow++){
//
rowInd = matRowPerm[iRow];
for (iCol=0; iCol<matRows[iRow]; iCol++){ //# elts in row
iFlat = matColStart[iCol]+iRow;
colInd = matCols[iFlat];
out[rowInd] += matData[iFlat]*vec[colInd];
}
}
}
int main(int argc, char **argv) {
wbArg_t args;
bool usingJDSQ;
int *hostCSRCols;
int *hostCSRRows;
float *hostCSRData;
int *hostJDSColStart;
int *hostJDSCols;
int *hostJDSRowPerm;
int *hostJDSRows;
float *hostJDSData;
float *hostVector;
float *hostOutput;
int *deviceCSRCols;
int *deviceCSRRows;
float *deviceCSRData;
int *deviceJDSColStart;
int *deviceJDSCols;
int *deviceJDSRowPerm;
int *deviceJDSRows;
float *deviceJDSData;
float *deviceVector;
float *deviceOutput;
int dim, ncols, nrows, ndata;
int maxRowNNZ;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
usingJDSQ = wbImport_flag(wbArg_getInputFile(args, 0)) == 1;
hostCSRCols = (int *)wbImport(wbArg_getInputFile(args, 1), &ncols, "Integer");
hostCSRRows = (int *)wbImport(wbArg_getInputFile(args, 2), &nrows, "Integer");
hostCSRData = (float *)wbImport(wbArg_getInputFile(args, 3), &ndata, "Real");
hostVector = (float *)wbImport(wbArg_getInputFile(args, 4), &dim, "Real");
//hostOutput = (float *)malloc(sizeof(float) * dim);
hostOutput = (float *)calloc(dim, sizeof(float)); //when just running cpu versions
wbTime_stop(Generic, "Importing data and creating memory on host");
if (usingJDSQ) {
CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm,
&hostJDSRows, &hostJDSColStart, &hostJDSCols, &hostJDSData);
maxRowNNZ = hostJDSRows[0];
}
wbTime_start(GPU, "Allocating GPU memory.");
if (usingJDSQ) {
hipMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ);
hipMalloc((void **)&deviceJDSCols, sizeof(int) * ndata);
hipMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim);
hipMalloc((void **)&deviceJDSRows, sizeof(int) * dim);
hipMalloc((void **)&deviceJDSData, sizeof(float) * ndata);
} else {
hipMalloc((void **)&deviceCSRCols, sizeof(int) * ncols);
hipMalloc((void **)&deviceCSRRows, sizeof(int) * nrows);
hipMalloc((void **)&deviceCSRData, sizeof(float) * ndata);
}
hipMalloc((void **)&deviceVector, sizeof(float) * dim);
hipMalloc((void **)&deviceOutput, sizeof(float) * dim);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
if (usingJDSQ) {
hipMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata,
hipMemcpyHostToDevice);
} else {
hipMemcpy(deviceCSRCols, hostCSRCols, sizeof(int) * ncols,
hipMemcpyHostToDevice);
hipMemcpy(deviceCSRRows, hostCSRRows, sizeof(int) * nrows,
hipMemcpyHostToDevice);
hipMemcpy(deviceCSRData, hostCSRData, sizeof(float) * ndata,
hipMemcpyHostToDevice);
}
hipMemcpy(deviceVector, hostVector, sizeof(float) * dim,
hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
if (usingJDSQ) {
spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols, deviceJDSRowPerm,
deviceJDSRows, deviceJDSData, deviceVector, dim);
} else {
spmvCSR(deviceOutput, deviceCSRCols, deviceCSRRows, deviceCSRData,
deviceVector, dim);
}
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostOutput, deviceOutput, sizeof(float) * dim,
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceCSRCols);
hipFree(deviceCSRRows);
hipFree(deviceCSRData);
hipFree(deviceVector);
hipFree(deviceOutput);
if (usingJDSQ) {
hipFree(deviceJDSColStart);
hipFree(deviceJDSCols);
hipFree(deviceJDSRowPerm);
hipFree(deviceJDSRows);
hipFree(deviceJDSData);
}
wbTime_stop(GPU, "Freeing GPU Memory");
// run my cpu versions
#if 0
if (usingJDSQ) {
cpu_jds(hostOutput, hostJDSColStart, hostJDSCols, hostJDSRowPerm,
hostJDSRows, hostJDSData, hostVector, dim);
}
else {
cpu_csr(hostOutput, hostCSRCols, hostCSRRows, hostCSRData, hostVector, dim);
}
#endif
//
wbSolution(args, hostOutput, dim);
free(hostCSRCols);
free(hostCSRRows);
free(hostCSRData);
free(hostVector);
free(hostOutput);
if (usingJDSQ) {
free(hostJDSColStart);
free(hostJDSCols);
free(hostJDSRowPerm);
free(hostJDSRows);
free(hostJDSData);
}
return 0;
}
| 363d0f9df5c0499f39869e859b69483a3ce44b24.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void spmvCSRKernel(float *out, int *matCols, int *matRows,
float *matData, float *vec, int dim) {
//@@ insert spmv kernel for csr format
unsigned int iRow = blockIdx.x * blockDim.x + threadIdx.x;
if (iRow<dim){ //stay w/in matrix bounds
int iFlat=matRows[iRow];
float sum=0.0;
for (int iCol=matRows[iRow]; iCol<matRows[iRow+1]; iCol++){ //# elts in row
//every thread touches its own
sum += matData[iFlat]*vec[matCols[iFlat]];
iFlat++;
}
out[iRow] = sum;
} //iRow<dim
}
const unsigned int BLOCK_SIZE = 32;
__global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows, float *matData,
float *vec, int dim) {
//@@ insert spmv kernel for jds format
unsigned int iRow = blockIdx.x * blockDim.x + threadIdx.x;
if (iRow<dim){ //stay w/in matrix bounds
int rowInd = matRowPerm[iRow];
float sum = 0.0;
for (int iCol=0; iCol<matRows[iRow]; iCol++){ //# elts in row
int iFlat = matColStart[iCol]+iRow;
int colInd = matCols[iFlat];
sum += matData[iFlat]*vec[colInd];
}
out[rowInd] = sum;
}
}
static void spmvCSR(float *out, int *matCols, int *matRows, float *matData,
float *vec, int dim) {
//@@ invoke spmv kernel for csr format
dim3 gridDim((dim-1)/BLOCK_SIZE + 1, 1, 1);
dim3 blockDim(BLOCK_SIZE, 1, 1);
// Invoke CUDA kernel -----------------------------------------------------
spmvCSRKernel <<< gridDim, blockDim >>> (out,matCols,matRows,matData,vec,dim);
}
static void spmvJDS(float *out, int *matColStart, int *matCols, int *matRowPerm,
int *matRows, float *matData, float *vec, int dim) {
//@@ invoke spmv kernel for jds format
dim3 gridDim((dim-1)/BLOCK_SIZE + 1, 1, 1);
dim3 blockDim(BLOCK_SIZE, 1, 1);
// Invoke CUDA kernel -----------------------------------------------------
spmvJDSKernel <<< gridDim, blockDim >>> (out,matColStart,matCols,matRowPerm,matRows,matData,vec,dim);
}
void cpu_csr(float *out, int *matCols, int *matRows, float *matData,
float *vec, int dim) {
//testing on cpu first
//from lectures:
//data[nNonZeros], columnInds[nNonZeros], rowPointers[nRows+1]
//from class forum (piazza):
//The matrix is square and dim is the number of rows and columns in the matrix and number or elements in the vectors.
//matRows is the same as jdsRowsNNZ
int iRow, iCol;
int iFlat=0;
for (iRow=0; iRow<dim; iRow++){
//
for (iCol=matRows[iRow]; iCol<matRows[iRow+1]; iCol++){ //# elts in row
out[iRow] += matData[iFlat]*vec[matCols[iFlat]];
iFlat++;
}
}
}
void cpu_jds(float *out, int *matColStart, int *matCols, int *matRowPerm,
int *matRows, float *matData, float *vec, int dim){
//testing on cpu first
//from class forum:
/*
matColStart~jdsColStartIdx
MatCols~jdsColIdx
matRowPerm~jdsRowPerm
matData~jdsData
matRows~jdsRowsNNZ
*/
int iRow, iCol, rowInd, colInd;
int iFlat=0;
for (iRow=0; iRow<dim; iRow++){
//
rowInd = matRowPerm[iRow];
for (iCol=0; iCol<matRows[iRow]; iCol++){ //# elts in row
iFlat = matColStart[iCol]+iRow;
colInd = matCols[iFlat];
out[rowInd] += matData[iFlat]*vec[colInd];
}
}
}
int main(int argc, char **argv) {
wbArg_t args;
bool usingJDSQ;
int *hostCSRCols;
int *hostCSRRows;
float *hostCSRData;
int *hostJDSColStart;
int *hostJDSCols;
int *hostJDSRowPerm;
int *hostJDSRows;
float *hostJDSData;
float *hostVector;
float *hostOutput;
int *deviceCSRCols;
int *deviceCSRRows;
float *deviceCSRData;
int *deviceJDSColStart;
int *deviceJDSCols;
int *deviceJDSRowPerm;
int *deviceJDSRows;
float *deviceJDSData;
float *deviceVector;
float *deviceOutput;
int dim, ncols, nrows, ndata;
int maxRowNNZ;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
usingJDSQ = wbImport_flag(wbArg_getInputFile(args, 0)) == 1;
hostCSRCols = (int *)wbImport(wbArg_getInputFile(args, 1), &ncols, "Integer");
hostCSRRows = (int *)wbImport(wbArg_getInputFile(args, 2), &nrows, "Integer");
hostCSRData = (float *)wbImport(wbArg_getInputFile(args, 3), &ndata, "Real");
hostVector = (float *)wbImport(wbArg_getInputFile(args, 4), &dim, "Real");
//hostOutput = (float *)malloc(sizeof(float) * dim);
hostOutput = (float *)calloc(dim, sizeof(float)); //when just running cpu versions
wbTime_stop(Generic, "Importing data and creating memory on host");
if (usingJDSQ) {
CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm,
&hostJDSRows, &hostJDSColStart, &hostJDSCols, &hostJDSData);
maxRowNNZ = hostJDSRows[0];
}
wbTime_start(GPU, "Allocating GPU memory.");
if (usingJDSQ) {
cudaMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ);
cudaMalloc((void **)&deviceJDSCols, sizeof(int) * ndata);
cudaMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim);
cudaMalloc((void **)&deviceJDSRows, sizeof(int) * dim);
cudaMalloc((void **)&deviceJDSData, sizeof(float) * ndata);
} else {
cudaMalloc((void **)&deviceCSRCols, sizeof(int) * ncols);
cudaMalloc((void **)&deviceCSRRows, sizeof(int) * nrows);
cudaMalloc((void **)&deviceCSRData, sizeof(float) * ndata);
}
cudaMalloc((void **)&deviceVector, sizeof(float) * dim);
cudaMalloc((void **)&deviceOutput, sizeof(float) * dim);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
if (usingJDSQ) {
cudaMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata,
cudaMemcpyHostToDevice);
} else {
cudaMemcpy(deviceCSRCols, hostCSRCols, sizeof(int) * ncols,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceCSRRows, hostCSRRows, sizeof(int) * nrows,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceCSRData, hostCSRData, sizeof(float) * ndata,
cudaMemcpyHostToDevice);
}
cudaMemcpy(deviceVector, hostVector, sizeof(float) * dim,
cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
if (usingJDSQ) {
spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols, deviceJDSRowPerm,
deviceJDSRows, deviceJDSData, deviceVector, dim);
} else {
spmvCSR(deviceOutput, deviceCSRCols, deviceCSRRows, deviceCSRData,
deviceVector, dim);
}
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * dim,
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceCSRCols);
cudaFree(deviceCSRRows);
cudaFree(deviceCSRData);
cudaFree(deviceVector);
cudaFree(deviceOutput);
if (usingJDSQ) {
cudaFree(deviceJDSColStart);
cudaFree(deviceJDSCols);
cudaFree(deviceJDSRowPerm);
cudaFree(deviceJDSRows);
cudaFree(deviceJDSData);
}
wbTime_stop(GPU, "Freeing GPU Memory");
// run my cpu versions
#if 0
if (usingJDSQ) {
cpu_jds(hostOutput, hostJDSColStart, hostJDSCols, hostJDSRowPerm,
hostJDSRows, hostJDSData, hostVector, dim);
}
else {
cpu_csr(hostOutput, hostCSRCols, hostCSRRows, hostCSRData, hostVector, dim);
}
#endif
//
wbSolution(args, hostOutput, dim);
free(hostCSRCols);
free(hostCSRRows);
free(hostCSRData);
free(hostVector);
free(hostOutput);
if (usingJDSQ) {
free(hostJDSColStart);
free(hostJDSCols);
free(hostJDSRowPerm);
free(hostJDSRows);
free(hostJDSData);
}
return 0;
}
|
62fdb19cdde10ff147e7a5c0bb4700060b216435.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d2r-512-8-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 62fdb19cdde10ff147e7a5c0bb4700060b216435.cu | #include <assert.h>
#include <stdio.h>
#include "box2d2r-512-8-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
7758074f25ad2c161405e0800d10d90605f1e1be.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <mpi.h>
#include "util.hpp"
#include "cuda_stream.hpp"
// 2D diffusion example with mpi
// the grid has a fixed width of nx=128
// the use specifies the height, ny, as a power of two
// note that nx and ny have 2 added to them to account for halos
//
// the domain decomposition is in the vertical
// ny is the height of the local sub-domain
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank);
template <typename T>
void fill_gpu(T *v, T value, int n);
__global__
void diffusion(double *x0, double *x1, int nx, int ny, double dt) {
// TODO : copy stencil implemented in diffusion2d.cu
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto j = threadIdx.y + blockDim.y*blockIdx.y;
auto pos = i+1 + (j+1)*nx;
if (i<nx-1 && j<ny-1) {
x1[pos] = x0[pos] + dt * (-4.*x0[pos]
+ x0[pos-nx] + x0[pos+nx]
+ x0[pos-1] + x0[pos+1]);
}
}
int main(int argc, char** argv) {
// set up parameters
// first argument is the y dimension = 2^arg
size_t pow = read_arg(argc, argv, 1, 8);
// second argument is the number of time steps
size_t nsteps = read_arg(argc, argv, 2, 100);
// set domain size
size_t nx = 128;
size_t ny = 1 << pow;
double dt = 0.1;
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
// calculate global domain sizes
if(ny%mpi_size) {
std::cout << "error : global domain dimension " << ny
<< "must be divisible by number of MPI ranks " << mpi_size
<< std::endl;
exit(1);
}
else if(mpi_rank==0) {
std::cout << "\n## " << mpi_size << " MPI ranks" << std::endl;
std::cout << "## " << nx << "x" << ny
<< " : " << nx << "x" << ny/mpi_size << " per rank"
<< " for " << nsteps << " time steps"
<< " (" << nx*ny << " grid points)"
<< std::endl;
}
ny /= mpi_size;
// adjust dimensions for halo
nx += 2;
ny += 2;
// allocate memory on device and host
// note : allocate enough memory for the halo around the boundary
auto buffer_size = nx*ny;
double *x_host = malloc_pinned<double>(buffer_size);
double *x0 = malloc_device<double>(buffer_size);
double *x1 = malloc_device<double>(buffer_size);
// set initial conditions of 0 everywhere
fill_gpu(x0, 0., buffer_size);
fill_gpu(x1, 0., buffer_size);
// set boundary conditions of 1 on south border
if(mpi_rank==0) { // south boundary
fill_gpu(x0, 1., nx);
fill_gpu(x1, 1., nx);
}
if(mpi_rank==mpi_size-1) { // north boundary
fill_gpu(x0+nx*(ny-1), 1., nx);
fill_gpu(x1+nx*(ny-1), 1., nx);
}
cuda_stream stream;
cuda_stream copy_stream();
auto start_event = stream.enqueue_event();
const dim3 block_dim(16, 16);
const dim3 grid_dim(
(nx-2+block_dim.x-1)/block_dim.x,
(ny-2+block_dim.y-1)/block_dim.y);
MPI_Status status_north;
MPI_Status status_south;
auto recv_buffer = malloc_pinned<double>(nx);
auto send_buffer = malloc_pinned<double>(nx);
// time stepping loop
for(auto step=0; step<nsteps; ++step) {
// perform halo exchange
// x0(:, 0) <- south
// x0(:, 1) -> south
// x0(:, ny-1) <- north
// x0(:, ny-2) -> north
#ifdef WITH_G2G
#else
if (mpi_rank>0) {
copy_to_host(x0+nx, send_buffer, nx);
MPI_Sendrecv(send_buffer, nx, MPI_DOUBLE,
mpi_rank-1, 0,
recv_buffer, nx, MPI_DOUBLE,
mpi_rank-1, 1,
MPI_COMM_WORLD, &status_south);
copy_to_device(recv_buffer, x0, nx);
}
if (mpi_rank<mpi_size-1) {
copy_to_host(x0+(ny-2)*nx, send_buffer, nx);
MPI_Sendrecv(send_buffer, nx, MPI_DOUBLE,
mpi_rank+1, 1,
recv_buffer, nx, MPI_DOUBLE,
mpi_rank+1, 0,
MPI_COMM_WORLD, &status_north);
copy_to_device(recv_buffer, x0+(ny-1)*nx, nx);
}
#endif
hipLaunchKernelGGL(( diffusion), dim3(grid_dim), dim3(block_dim), 0, 0, x0, x1, nx, ny, dt);
std::swap(x0, x1);
}
auto stop_event = stream.enqueue_event();
stop_event.wait();
copy_to_host<double>(x0, x_host, buffer_size);
double time = stop_event.time_since(start_event);
if(mpi_rank==0) {
std::cout << "## " << time << "s, "
<< nsteps*(nx-2)*(ny-2)*mpi_size / time << " points/second"
<< std::endl << std::endl;
std::cout << "writing to output.bin/bov" << std::endl;
}
write_to_file(nx, ny, x_host, mpi_size, mpi_rank);
MPI_Finalize();
return 0;
}
template <typename T>
__global__
void fill(T *v, T value, int n) {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid<n) {
v[tid] = value;
}
}
template <typename T>
void fill_gpu(T *v, T value, int n) {
auto block_dim = 192ul;
auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0);
hipLaunchKernelGGL(( fill<T>), dim3(grid_dim), dim3(block_dim), 0, 0, v, value, n);
}
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank) {
// collect the global solution to the root rank
auto block_size = nx*(ny-2); // discard first and last rows
std::vector<double> data_global(mpi_size*block_size);
MPI_Gather(data+nx, block_size, MPI_DOUBLE,
&data_global[0], block_size, MPI_DOUBLE,
0, MPI_COMM_WORLD);
if(mpi_rank==0) {
FILE* output = fopen("output.bin", "w");
fwrite(&data_global[0], sizeof(double), mpi_size* nx * (ny-2), output);
fclose(output);
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << nx << ", " << mpi_size*(ny-2) << ", 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl;
}
}
| 7758074f25ad2c161405e0800d10d90605f1e1be.cu | #include <fstream>
#include <iostream>
#include <vector>
#include <cuda.h>
#include <mpi.h>
#include "util.hpp"
#include "cuda_stream.hpp"
// 2D diffusion example with mpi
// the grid has a fixed width of nx=128
// the use specifies the height, ny, as a power of two
// note that nx and ny have 2 added to them to account for halos
//
// the domain decomposition is in the vertical
// ny is the height of the local sub-domain
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank);
template <typename T>
void fill_gpu(T *v, T value, int n);
__global__
void diffusion(double *x0, double *x1, int nx, int ny, double dt) {
// TODO : copy stencil implemented in diffusion2d.cu
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto j = threadIdx.y + blockDim.y*blockIdx.y;
auto pos = i+1 + (j+1)*nx;
if (i<nx-1 && j<ny-1) {
x1[pos] = x0[pos] + dt * (-4.*x0[pos]
+ x0[pos-nx] + x0[pos+nx]
+ x0[pos-1] + x0[pos+1]);
}
}
int main(int argc, char** argv) {
// set up parameters
// first argument is the y dimension = 2^arg
size_t pow = read_arg(argc, argv, 1, 8);
// second argument is the number of time steps
size_t nsteps = read_arg(argc, argv, 2, 100);
// set domain size
size_t nx = 128;
size_t ny = 1 << pow;
double dt = 0.1;
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
// calculate global domain sizes
if(ny%mpi_size) {
std::cout << "error : global domain dimension " << ny
<< "must be divisible by number of MPI ranks " << mpi_size
<< std::endl;
exit(1);
}
else if(mpi_rank==0) {
std::cout << "\n## " << mpi_size << " MPI ranks" << std::endl;
std::cout << "## " << nx << "x" << ny
<< " : " << nx << "x" << ny/mpi_size << " per rank"
<< " for " << nsteps << " time steps"
<< " (" << nx*ny << " grid points)"
<< std::endl;
}
ny /= mpi_size;
// adjust dimensions for halo
nx += 2;
ny += 2;
// allocate memory on device and host
// note : allocate enough memory for the halo around the boundary
auto buffer_size = nx*ny;
double *x_host = malloc_pinned<double>(buffer_size);
double *x0 = malloc_device<double>(buffer_size);
double *x1 = malloc_device<double>(buffer_size);
// set initial conditions of 0 everywhere
fill_gpu(x0, 0., buffer_size);
fill_gpu(x1, 0., buffer_size);
// set boundary conditions of 1 on south border
if(mpi_rank==0) { // south boundary
fill_gpu(x0, 1., nx);
fill_gpu(x1, 1., nx);
}
if(mpi_rank==mpi_size-1) { // north boundary
fill_gpu(x0+nx*(ny-1), 1., nx);
fill_gpu(x1+nx*(ny-1), 1., nx);
}
cuda_stream stream;
cuda_stream copy_stream();
auto start_event = stream.enqueue_event();
const dim3 block_dim(16, 16);
const dim3 grid_dim(
(nx-2+block_dim.x-1)/block_dim.x,
(ny-2+block_dim.y-1)/block_dim.y);
MPI_Status status_north;
MPI_Status status_south;
auto recv_buffer = malloc_pinned<double>(nx);
auto send_buffer = malloc_pinned<double>(nx);
// time stepping loop
for(auto step=0; step<nsteps; ++step) {
// perform halo exchange
// x0(:, 0) <- south
// x0(:, 1) -> south
// x0(:, ny-1) <- north
// x0(:, ny-2) -> north
#ifdef WITH_G2G
#else
if (mpi_rank>0) {
copy_to_host(x0+nx, send_buffer, nx);
MPI_Sendrecv(send_buffer, nx, MPI_DOUBLE,
mpi_rank-1, 0,
recv_buffer, nx, MPI_DOUBLE,
mpi_rank-1, 1,
MPI_COMM_WORLD, &status_south);
copy_to_device(recv_buffer, x0, nx);
}
if (mpi_rank<mpi_size-1) {
copy_to_host(x0+(ny-2)*nx, send_buffer, nx);
MPI_Sendrecv(send_buffer, nx, MPI_DOUBLE,
mpi_rank+1, 1,
recv_buffer, nx, MPI_DOUBLE,
mpi_rank+1, 0,
MPI_COMM_WORLD, &status_north);
copy_to_device(recv_buffer, x0+(ny-1)*nx, nx);
}
#endif
diffusion<<<grid_dim, block_dim>>>(x0, x1, nx, ny, dt);
std::swap(x0, x1);
}
auto stop_event = stream.enqueue_event();
stop_event.wait();
copy_to_host<double>(x0, x_host, buffer_size);
double time = stop_event.time_since(start_event);
if(mpi_rank==0) {
std::cout << "## " << time << "s, "
<< nsteps*(nx-2)*(ny-2)*mpi_size / time << " points/second"
<< std::endl << std::endl;
std::cout << "writing to output.bin/bov" << std::endl;
}
write_to_file(nx, ny, x_host, mpi_size, mpi_rank);
MPI_Finalize();
return 0;
}
template <typename T>
__global__
void fill(T *v, T value, int n) {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid<n) {
v[tid] = value;
}
}
template <typename T>
void fill_gpu(T *v, T value, int n) {
auto block_dim = 192ul;
auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0);
fill<T><<<grid_dim, block_dim>>>(v, value, n);
}
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank) {
// collect the global solution to the root rank
auto block_size = nx*(ny-2); // discard first and last rows
std::vector<double> data_global(mpi_size*block_size);
MPI_Gather(data+nx, block_size, MPI_DOUBLE,
&data_global[0], block_size, MPI_DOUBLE,
0, MPI_COMM_WORLD);
if(mpi_rank==0) {
FILE* output = fopen("output.bin", "w");
fwrite(&data_global[0], sizeof(double), mpi_size* nx * (ny-2), output);
fclose(output);
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << nx << ", " << mpi_size*(ny-2) << ", 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl;
}
}
|
d051a672b18a3455fdc314fb7592260763f7059e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample is a templatized version of the template project.
* It also shows how to correctly templatize dynamically allocated shared
* memory arrays.
* Device code.
*/
#ifndef _TEMPLATE_KERNEL_H_
#define _TEMPLATE_KERNEL_H_
#include <stdio.h>
#include "sharedmem.cuh"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
template<class T>
__global__ void
testKernel( T* g_idata, T* g_odata)
{
// Shared mem size is determined by the host app at run time
SharedMemory<T> smem;
T* sdata = smem.getPointer();
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
// use the bank checker macro to check for bank conflicts during host
// emulation
sdata[tid] = g_idata[tid];
__syncthreads();
// perform some computations
sdata[tid] = (T) num_threads * sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
#endif // #ifndef _TEMPLATE_KERNEL_H_
| d051a672b18a3455fdc314fb7592260763f7059e.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample is a templatized version of the template project.
* It also shows how to correctly templatize dynamically allocated shared
* memory arrays.
* Device code.
*/
#ifndef _TEMPLATE_KERNEL_H_
#define _TEMPLATE_KERNEL_H_
#include <stdio.h>
#include "sharedmem.cuh"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
template<class T>
__global__ void
testKernel( T* g_idata, T* g_odata)
{
// Shared mem size is determined by the host app at run time
SharedMemory<T> smem;
T* sdata = smem.getPointer();
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
// use the bank checker macro to check for bank conflicts during host
// emulation
sdata[tid] = g_idata[tid];
__syncthreads();
// perform some computations
sdata[tid] = (T) num_threads * sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
#endif // #ifndef _TEMPLATE_KERNEL_H_
|
00d7fc5013d2bec4912aeefbda4ee08fb2bafa66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Write GPU kernels to compete the functionality of estimating the integral via the trapezoidal rule. */
#include <stdio.h>
#include <math.h>
#define LEFT_ENDPOINT 10
#define RIGHT_ENDPOINT 1005
#define NUM_TRAPEZOIDS 1000000000
#define THREAD_BLOCK_SIZE 256
/* This function uses a compare and swap technique to acquire a mutex/lock. */
__device__ void lock(int *mutex)
{
while(atomicCAS(mutex, 0, 1) != 0);
}
/* This function uses an atomic exchange operation to release the mutex/lock. */
__device__ void unlock(int *mutex)
{
atomicExch(mutex, 0);
}
__device__ float f_device(float x) {
return (x + 1)/sqrt(x*x + x + 1);
} /* f */
__global__ void kernel_trap(float a, float b, int n, float h, double * result, int * mutex) {
__shared__ float area_per_thread[THREAD_BLOCK_SIZE]; // Allocate shared memory to hold the partial sums.
unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; // Obtain the thread ID.
unsigned int stride = blockDim.x * gridDim.x;
double sum = 0.0f;
unsigned int i = thread_id;
/* Compute your partial sum. */
while(i < n){
sum += f_device( a + i * (h));
i += stride;
}
sum = sum * (h);
area_per_thread[threadIdx.x] = (float)sum; // Copy sum to shared memory.
__syncthreads(); // Wait for all threads in the thread block to finish up.
/* Reduce the values generated by the thread block to a single value to be sent back to the CPU. */
i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i)
area_per_thread[threadIdx.x] += area_per_thread[threadIdx.x + i];
__syncthreads();
i /= 2;
}
/* Accumulate the sum computed by this thread block into the global shared variable. */
if(threadIdx.x == 0){
lock(mutex);
*result += area_per_thread[0];
unlock(mutex);
}
}
| 00d7fc5013d2bec4912aeefbda4ee08fb2bafa66.cu | /* Write GPU kernels to compete the functionality of estimating the integral via the trapezoidal rule. */
#include <stdio.h>
#include <math.h>
#define LEFT_ENDPOINT 10
#define RIGHT_ENDPOINT 1005
#define NUM_TRAPEZOIDS 1000000000
#define THREAD_BLOCK_SIZE 256
/* This function uses a compare and swap technique to acquire a mutex/lock. */
__device__ void lock(int *mutex)
{
while(atomicCAS(mutex, 0, 1) != 0);
}
/* This function uses an atomic exchange operation to release the mutex/lock. */
__device__ void unlock(int *mutex)
{
atomicExch(mutex, 0);
}
__device__ float f_device(float x) {
return (x + 1)/sqrt(x*x + x + 1);
} /* f */
__global__ void kernel_trap(float a, float b, int n, float h, double * result, int * mutex) {
__shared__ float area_per_thread[THREAD_BLOCK_SIZE]; // Allocate shared memory to hold the partial sums.
unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; // Obtain the thread ID.
unsigned int stride = blockDim.x * gridDim.x;
double sum = 0.0f;
unsigned int i = thread_id;
/* Compute your partial sum. */
while(i < n){
sum += f_device( a + i * (h));
i += stride;
}
sum = sum * (h);
area_per_thread[threadIdx.x] = (float)sum; // Copy sum to shared memory.
__syncthreads(); // Wait for all threads in the thread block to finish up.
/* Reduce the values generated by the thread block to a single value to be sent back to the CPU. */
i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i)
area_per_thread[threadIdx.x] += area_per_thread[threadIdx.x + i];
__syncthreads();
i /= 2;
}
/* Accumulate the sum computed by this thread block into the global shared variable. */
if(threadIdx.x == 0){
lock(mutex);
*result += area_per_thread[0];
unlock(mutex);
}
}
|
d8696fe1755ac63ee24d6483cd97ff6cfec0be26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+32;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-4);
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_35__;
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__var_1__[__iter_11__+(M)*(__iter_10__)] = __temp_35__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
hipMalloc(&__var_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
| d8696fe1755ac63ee24d6483cd97ff6cfec0be26.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+32;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-4);
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_35__;
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__var_1__[__iter_11__+(M)*(__iter_10__)] = __temp_35__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
cudaMalloc(&__var_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
9ae96885cec4f9801d5418e277321f21399ec5b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <cmath>
#include <string>
#include <omp.h>
#include <thread>
#include "diff1d.h"
#include "cuda_helper.h"
#define value_t float
#define index_t int
// constants
__constant__ value_t c_zero, c_one, c_two;
__global__ void kernel(index_t n, value_t r, value_t *u, value_t *u_new)
{
index_t lid = threadIdx.x;
index_t sid = lid + 1;
index_t width = blockDim.x;
index_t gid = blockIdx.x * width + threadIdx.x;
extern __shared__ value_t s_u[]; // width + 2
if (gid < n)
{
s_u[sid] = u[gid];
if (lid == 0 && gid != 0)
s_u[sid - 1] = u[gid - 1];
if (lid == width - 1 && gid != n - 1)
s_u[sid + 1] = u[gid + 1];
}
__syncthreads();
if (gid < n)
{
if (gid == 0)
u_new[gid] = c_zero;
else if (gid == n - 1)
u_new[gid] = c_zero;
else
u_new[gid] = (c_one - c_two * r) * s_u[sid] + r * (s_u[sid - 1] + s_u[sid + 1]);
}
}
struct diff1d_l2 : public diff1d<value_t, index_t>
{
void benchmark()
{
print_bench();
value_t *u = new value_t[total_size];
value_t *u_new = new value_t[total_size];
initial_condition(u, u_new);
value_t *d_u, *d_u_new;
checkCudaErrors(hipMalloc(&d_u, total_size * sizeof(value_t)));
checkCudaErrors(hipMalloc(&d_u_new, total_size * sizeof(value_t)));
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipMemcpy(d_u, u, total_size * sizeof(value_t), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_u_new, u_new, total_size * sizeof(value_t), hipMemcpyHostToDevice));
value_t zero = 0.0;
value_t one = 1.0;
value_t two = 2.0;
checkCudaErrors(hipMemcpyToSymbol(c_zero, &zero, sizeof(value_t), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(c_one, &one, sizeof(value_t), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(c_two, &two, sizeof(value_t), 0, hipMemcpyHostToDevice));
dim3 blockd3 = dim3(block, 1, 1);
dim3 grid = calc_grid1d(blockd3, total_size);
std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n"
<< " Grid size: " << grid.x << "\n";
int sm_memsize = (blockd3.x + 2) * sizeof(value_t);
std::cout << " Shared memory needed: " << sm_memsize << " Byte\n\n";
loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), sm_memsize, 0, total_size, r, d_u, d_u_new);
checkCudaErrorsAfterKernels;
// swap u and u_new
value_t *tmp = d_u;
d_u = d_u_new;
d_u_new = tmp;
loops++;
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
float du = 0;
checkCudaErrors(hipEventElapsedTime(&du, start, stop));
duration = 1.0e-3 * du;
checkCudaErrors(hipMemcpy(u, d_u, total_size * sizeof(value_t), hipMemcpyDeviceToHost));
value_t t = delta_t * value_t(loops);
test_result(u, t);
print_performance();
delete[] u;
delete[] u_new;
checkCudaErrors(hipFree(d_u));
checkCudaErrors(hipFree(d_u_new));
}
diff1d_l2(int narg, char **arg) : diff1d<value_t, index_t>(narg, arg) {}
};
int main(int narg, char **arg)
{
check_cuda_device();
diff1d_l2 test(narg, arg);
test.benchmark();
} | 9ae96885cec4f9801d5418e277321f21399ec5b8.cu | #include <chrono>
#include <cmath>
#include <string>
#include <omp.h>
#include <thread>
#include "diff1d.h"
#include "cuda_helper.h"
#define value_t float
#define index_t int
// constants
__constant__ value_t c_zero, c_one, c_two;
__global__ void kernel(index_t n, value_t r, value_t *u, value_t *u_new)
{
index_t lid = threadIdx.x;
index_t sid = lid + 1;
index_t width = blockDim.x;
index_t gid = blockIdx.x * width + threadIdx.x;
extern __shared__ value_t s_u[]; // width + 2
if (gid < n)
{
s_u[sid] = u[gid];
if (lid == 0 && gid != 0)
s_u[sid - 1] = u[gid - 1];
if (lid == width - 1 && gid != n - 1)
s_u[sid + 1] = u[gid + 1];
}
__syncthreads();
if (gid < n)
{
if (gid == 0)
u_new[gid] = c_zero;
else if (gid == n - 1)
u_new[gid] = c_zero;
else
u_new[gid] = (c_one - c_two * r) * s_u[sid] + r * (s_u[sid - 1] + s_u[sid + 1]);
}
}
struct diff1d_l2 : public diff1d<value_t, index_t>
{
void benchmark()
{
print_bench();
value_t *u = new value_t[total_size];
value_t *u_new = new value_t[total_size];
initial_condition(u, u_new);
value_t *d_u, *d_u_new;
checkCudaErrors(cudaMalloc(&d_u, total_size * sizeof(value_t)));
checkCudaErrors(cudaMalloc(&d_u_new, total_size * sizeof(value_t)));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaMemcpy(d_u, u, total_size * sizeof(value_t), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_u_new, u_new, total_size * sizeof(value_t), cudaMemcpyHostToDevice));
value_t zero = 0.0;
value_t one = 1.0;
value_t two = 2.0;
checkCudaErrors(cudaMemcpyToSymbol(c_zero, &zero, sizeof(value_t), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(c_one, &one, sizeof(value_t), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(c_two, &two, sizeof(value_t), 0, cudaMemcpyHostToDevice));
dim3 blockd3 = dim3(block, 1, 1);
dim3 grid = calc_grid1d(blockd3, total_size);
std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n"
<< " Grid size: " << grid.x << "\n";
int sm_memsize = (blockd3.x + 2) * sizeof(value_t);
std::cout << " Shared memory needed: " << sm_memsize << " Byte\n\n";
loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
kernel<<<grid, block, sm_memsize>>>(total_size, r, d_u, d_u_new);
checkCudaErrorsAfterKernels;
// swap u and u_new
value_t *tmp = d_u;
d_u = d_u_new;
d_u_new = tmp;
loops++;
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
float du = 0;
checkCudaErrors(cudaEventElapsedTime(&du, start, stop));
duration = 1.0e-3 * du;
checkCudaErrors(cudaMemcpy(u, d_u, total_size * sizeof(value_t), cudaMemcpyDeviceToHost));
value_t t = delta_t * value_t(loops);
test_result(u, t);
print_performance();
delete[] u;
delete[] u_new;
checkCudaErrors(cudaFree(d_u));
checkCudaErrors(cudaFree(d_u_new));
}
diff1d_l2(int narg, char **arg) : diff1d<value_t, index_t>(narg, arg) {}
};
int main(int narg, char **arg)
{
check_cuda_device();
diff1d_l2 test(narg, arg);
test.benchmark();
} |
3681c095a145f63f194c2cb1470afc05ef65fc2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
bool verify(int data[], int length)
{
for (int i = 1 ; i < length; ++i)
{
if (data[i] - data [i - 1] != i )
{ printf("error %d\n", i); return false; }
}
return true;
}
#define DUMP(x) printf("%s %d\n", #x, (int) props.x)
void dumpCUDAProps(hipDeviceProp_t & props)
{
DUMP(canMapHostMemory);
DUMP(clockRate);
DUMP(computeMode);
DUMP(deviceOverlap);
DUMP(integrated);
DUMP(kernelExecTimeoutEnabled);
DUMP(major);
DUMP(maxGridSize[0]);
DUMP(maxGridSize[1]);
DUMP(maxGridSize[2]);
DUMP(maxThreadsDim[0]);
DUMP(maxThreadsDim[1]);
DUMP(maxThreadsDim[2]);
DUMP(maxThreadsPerBlock);
DUMP(memPitch);
DUMP(minor);
DUMP(multiProcessorCount);
printf("name %s\n", props.name);
DUMP(regsPerBlock);
DUMP(sharedMemPerBlock);
DUMP(textureAlignment);
DUMP(totalConstMem);
DUMP(totalGlobalMem);
DUMP(warpSize);
}
#define BLOCK_SIZE 64
__global__ void prefixsumblock(int *in, int *out, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
out[x] = in[x];
__syncthreads();
for ( int i = 1; i < BLOCK_SIZE; i <<= 1)
{
if (threadIdx.x + i < BLOCK_SIZE && x + i < length)
{
out[x + i] = in[x] + in[x + i];
}
__syncthreads();
if (x < length)
in[x] = out[x];
__syncthreads();
}
}
__global__ void correctsumends(int *ends, int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
int end = ends[blockIdx.x];
out[x] = in[x] + end;
}
__global__ void gathersumends(int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x > 0)
out[x] = in[x * BLOCK_SIZE - 1];
else
out[x] = 0;
}
__global__ void zarro(int *data, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
data[x] = 0;
}
void prefixsum(int* in, int *out, int length)
{
int blocks = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( zarro), dim3(dimGrid), dim3(dimBlock), 0, 0, out, length);
hipLaunchKernelGGL(( prefixsumblock), dim3(dimGrid), dim3(dimBlock), 0, 0, in, out, length);
if (blocks > 1) {
int *devEnds;
int *devTmpEnds;
hipMalloc((void**) &devEnds, blocks * sizeof(int));
hipMalloc((void**) &devTmpEnds, blocks * sizeof(int));
int subblocks = (blocks + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 subgrid(subblocks, 1, 1);
dim3 subblock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( gathersumends), dim3(subgrid), dim3(subblock), 0, 0, out, devEnds);
prefixsum(devEnds, devTmpEnds, blocks);
hipFree(devEnds);
hipLaunchKernelGGL(( correctsumends), dim3(dimGrid), dim3(dimBlock), 0, 0, devTmpEnds, in, out);
hipFree(devTmpEnds);
}
}
void cudasummer(int data[], int length)
{
int *devIn, *devOut;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMalloc((void**) &devIn, length * sizeof(int));
hipMalloc((void**) &devOut, length * sizeof(int));
hipMemcpy(devIn, data, length * sizeof(int), hipMemcpyHostToDevice);
prefixsum(devIn, devOut, length);
hipMemcpy(data, devOut, length * sizeof(int), hipMemcpyDeviceToHost);
hipFree(devIn);
hipFree(devOut);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float t;
hipEventElapsedTime(&t, start, stop);
printf("Elapsed time %3fms\n", t);
hipEventDestroy(start);
hipEventDestroy(stop);
}
void devicesDump()
{
int deviceCount;
if( hipGetDeviceCount(&deviceCount) != hipSuccess )
{
printf("Could not get device count\n");
exit(1);
}
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
dumpCUDAProps(deviceProp);
}
}
int main(int argc, char *argv[])
{
int length;
if (argc < 2) {
length = 500;
}
else length = atoi(argv[1]);
int *data = (int*) malloc(length * sizeof(int));
for (int i = 0; i < length; ++i) {
data[i] = i; //rand();
}
devicesDump();
cudasummer(data, length);
if (length < 1000)
for (int i = 0 ; i < length; ++i)
{
printf("%d\n", data[i]);
}
verify(data, length);
return 0;
}
| 3681c095a145f63f194c2cb1470afc05ef65fc2a.cu | #include <stdio.h>
#include <stdlib.h>
bool verify(int data[], int length)
{
for (int i = 1 ; i < length; ++i)
{
if (data[i] - data [i - 1] != i )
{ printf("error %d\n", i); return false; }
}
return true;
}
#define DUMP(x) printf("%s %d\n", #x, (int) props.x)
void dumpCUDAProps(cudaDeviceProp & props)
{
DUMP(canMapHostMemory);
DUMP(clockRate);
DUMP(computeMode);
DUMP(deviceOverlap);
DUMP(integrated);
DUMP(kernelExecTimeoutEnabled);
DUMP(major);
DUMP(maxGridSize[0]);
DUMP(maxGridSize[1]);
DUMP(maxGridSize[2]);
DUMP(maxThreadsDim[0]);
DUMP(maxThreadsDim[1]);
DUMP(maxThreadsDim[2]);
DUMP(maxThreadsPerBlock);
DUMP(memPitch);
DUMP(minor);
DUMP(multiProcessorCount);
printf("name %s\n", props.name);
DUMP(regsPerBlock);
DUMP(sharedMemPerBlock);
DUMP(textureAlignment);
DUMP(totalConstMem);
DUMP(totalGlobalMem);
DUMP(warpSize);
}
#define BLOCK_SIZE 64
__global__ void prefixsumblock(int *in, int *out, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
out[x] = in[x];
__syncthreads();
for ( int i = 1; i < BLOCK_SIZE; i <<= 1)
{
if (threadIdx.x + i < BLOCK_SIZE && x + i < length)
{
out[x + i] = in[x] + in[x + i];
}
__syncthreads();
if (x < length)
in[x] = out[x];
__syncthreads();
}
}
__global__ void correctsumends(int *ends, int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
int end = ends[blockIdx.x];
out[x] = in[x] + end;
}
__global__ void gathersumends(int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x > 0)
out[x] = in[x * BLOCK_SIZE - 1];
else
out[x] = 0;
}
__global__ void zarro(int *data, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
data[x] = 0;
}
void prefixsum(int* in, int *out, int length)
{
int blocks = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
zarro<<<dimGrid, dimBlock>>>(out, length);
prefixsumblock<<<dimGrid, dimBlock>>>(in, out, length);
if (blocks > 1) {
int *devEnds;
int *devTmpEnds;
cudaMalloc((void**) &devEnds, blocks * sizeof(int));
cudaMalloc((void**) &devTmpEnds, blocks * sizeof(int));
int subblocks = (blocks + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 subgrid(subblocks, 1, 1);
dim3 subblock(BLOCK_SIZE, 1, 1);
gathersumends<<<subgrid, subblock>>>(out, devEnds);
prefixsum(devEnds, devTmpEnds, blocks);
cudaFree(devEnds);
correctsumends<<<dimGrid, dimBlock>>>(devTmpEnds, in, out);
cudaFree(devTmpEnds);
}
}
void cudasummer(int data[], int length)
{
int *devIn, *devOut;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**) &devIn, length * sizeof(int));
cudaMalloc((void**) &devOut, length * sizeof(int));
cudaMemcpy(devIn, data, length * sizeof(int), cudaMemcpyHostToDevice);
prefixsum(devIn, devOut, length);
cudaMemcpy(data, devOut, length * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(devIn);
cudaFree(devOut);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float t;
cudaEventElapsedTime(&t, start, stop);
printf("Elapsed time %3fms\n", t);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void devicesDump()
{
int deviceCount;
if( cudaGetDeviceCount(&deviceCount) != cudaSuccess )
{
printf("Could not get device count\n");
exit(1);
}
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
dumpCUDAProps(deviceProp);
}
}
int main(int argc, char *argv[])
{
int length;
if (argc < 2) {
length = 500;
}
else length = atoi(argv[1]);
int *data = (int*) malloc(length * sizeof(int));
for (int i = 0; i < length; ++i) {
data[i] = i; //rand();
}
devicesDump();
cudasummer(data, length);
if (length < 1000)
for (int i = 0 ; i < length; ++i)
{
printf("%d\n", data[i]);
}
verify(data, length);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.