hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
1f76776811ee97d15faa331c164794dfc41849ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "../../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* simpleDivergence demonstrates divergent code on the GPU and its impact on
* performance and CUDA metrics.
*/
__global__ void mathKernel1(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
if (tid % 2 == 0)
{
ia = 100.0f;
}
else
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
__global__ void mathKernel2(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
if ((tid / warpSize) % 2 == 0)
{
ia = 100.0f;
}
else
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
__global__ void mathKernel3(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
for (int i = 0; i < 100; ++i) {
bool ipred = (tid % i == 0);
if (ipred)
{
ia = 100.0f;
}
if (!ipred)
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
}
__global__ void mathKernel4(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
int itid = tid >> 5;
if (itid & 0x01 == 0)
{
ia = 100.0f;
}
else
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
__global__ void warmingup(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
if ((tid / warpSize) % 2 == 0)
{
ia = 100.0f;
}
else
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s using Device %d: %s\n", argv[0], dev, deviceProp.name);
// set up data size
int size = 64000;
int blocksize = 64;
if(argc > 1) blocksize = atoi(argv[1]);
if(argc > 2) size = atoi(argv[2]);
printf("Data size %d ", size);
// set up execution configuration
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("Execution Configure (block %d grid %d)\n", block.x, grid.x);
// allocate gpu memory
float *d_C;
size_t nBytes = size * sizeof(float);
CHECK(hipMalloc((float**)&d_C, nBytes));
// run a warmup kernel to remove overhead
size_t iStart, iElaps;
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( warmingup), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("warmup <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps );
CHECK(hipGetLastError());
// run kernel 1
iStart = seconds();
hipLaunchKernelGGL(( mathKernel1), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("mathKernel1 <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps );
CHECK(hipGetLastError());
// run kernel 3
iStart = seconds();
hipLaunchKernelGGL(( mathKernel2), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("mathKernel2 <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps );
CHECK(hipGetLastError());
// run kernel 3
iStart = seconds();
hipLaunchKernelGGL(( mathKernel3), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("mathKernel3 <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps);
CHECK(hipGetLastError());
// run kernel 4
iStart = seconds();
hipLaunchKernelGGL(( mathKernel4), dim3(grid), dim3(block), 0, 0, d_C);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("mathKernel4 <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps);
CHECK(hipGetLastError());
// free gpu memory and reset divece
CHECK(hipFree(d_C));
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
} | 1f76776811ee97d15faa331c164794dfc41849ac.cu | #include "../../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* simpleDivergence demonstrates divergent code on the GPU and its impact on
* performance and CUDA metrics.
*/
__global__ void mathKernel1(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
if (tid % 2 == 0)
{
ia = 100.0f;
}
else
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
__global__ void mathKernel2(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
if ((tid / warpSize) % 2 == 0)
{
ia = 100.0f;
}
else
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
__global__ void mathKernel3(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
for (int i = 0; i < 100; ++i) {
bool ipred = (tid % i == 0);
if (ipred)
{
ia = 100.0f;
}
if (!ipred)
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
}
__global__ void mathKernel4(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
int itid = tid >> 5;
if (itid & 0x01 == 0)
{
ia = 100.0f;
}
else
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
__global__ void warmingup(float *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
if ((tid / warpSize) % 2 == 0)
{
ia = 100.0f;
}
else
{
ib = 200.0f;
}
c[tid] = ia + ib;
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s using Device %d: %s\n", argv[0], dev, deviceProp.name);
// set up data size
int size = 64000;
int blocksize = 64;
if(argc > 1) blocksize = atoi(argv[1]);
if(argc > 2) size = atoi(argv[2]);
printf("Data size %d ", size);
// set up execution configuration
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("Execution Configure (block %d grid %d)\n", block.x, grid.x);
// allocate gpu memory
float *d_C;
size_t nBytes = size * sizeof(float);
CHECK(cudaMalloc((float**)&d_C, nBytes));
// run a warmup kernel to remove overhead
size_t iStart, iElaps;
CHECK(cudaDeviceSynchronize());
iStart = seconds();
warmingup<<<grid, block>>>(d_C);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("warmup <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps );
CHECK(cudaGetLastError());
// run kernel 1
iStart = seconds();
mathKernel1<<<grid, block>>>(d_C);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("mathKernel1 <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps );
CHECK(cudaGetLastError());
// run kernel 3
iStart = seconds();
mathKernel2<<<grid, block>>>(d_C);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("mathKernel2 <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps );
CHECK(cudaGetLastError());
// run kernel 3
iStart = seconds();
mathKernel3<<<grid, block>>>(d_C);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("mathKernel3 <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps);
CHECK(cudaGetLastError());
// run kernel 4
iStart = seconds();
mathKernel4<<<grid, block>>>(d_C);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("mathKernel4 <<< %4d %4d >>> elapsed %.8f sec \n", grid.x, block.x,
iElaps);
CHECK(cudaGetLastError());
// free gpu memory and reset divece
CHECK(cudaFree(d_C));
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
} |
0f7a7cb1b5c0d3440f97ed922126a5f2aba2aa6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ---------------------------------------------------------
// Author: Andy Zeng, Princeton University, 2016
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
// CUDA kernel function to integrate a TSDF voxel volume given depth images
__global__
void Integrate(float * cam_K, float * cam2base, float * depth_im,
int im_height, int im_width, int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z, float voxel_size, float trunc_margin,
float * voxel_grid_TSDF, float * voxel_grid_weight) {
int pt_grid_z = blockIdx.x;
int pt_grid_y = threadIdx.x;
for (int pt_grid_x = 0; pt_grid_x < voxel_grid_dim_x; ++pt_grid_x) {
// Convert voxel center from grid coordinates to base frame camera coordinates
float pt_base_x = voxel_grid_origin_x + pt_grid_x * voxel_size;
float pt_base_y = voxel_grid_origin_y + pt_grid_y * voxel_size;
float pt_base_z = voxel_grid_origin_z + pt_grid_z * voxel_size;
// Convert from base frame camera coordinates to current frame camera coordinates
float tmp_pt[3] = {0};
tmp_pt[0] = pt_base_x - cam2base[0 * 4 + 3];
tmp_pt[1] = pt_base_y - cam2base[1 * 4 + 3];
tmp_pt[2] = pt_base_z - cam2base[2 * 4 + 3];
float pt_cam_x = cam2base[0 * 4 + 0] * tmp_pt[0] + cam2base[1 * 4 + 0] * tmp_pt[1] + cam2base[2 * 4 + 0] * tmp_pt[2];
float pt_cam_y = cam2base[0 * 4 + 1] * tmp_pt[0] + cam2base[1 * 4 + 1] * tmp_pt[1] + cam2base[2 * 4 + 1] * tmp_pt[2];
float pt_cam_z = cam2base[0 * 4 + 2] * tmp_pt[0] + cam2base[1 * 4 + 2] * tmp_pt[1] + cam2base[2 * 4 + 2] * tmp_pt[2];
if (pt_cam_z <= 0)
continue;
int pt_pix_x = roundf(cam_K[0 * 3 + 0] * (pt_cam_x / pt_cam_z) + cam_K[0 * 3 + 2]);
int pt_pix_y = roundf(cam_K[1 * 3 + 1] * (pt_cam_y / pt_cam_z) + cam_K[1 * 3 + 2]);
if (pt_pix_x < 0 || pt_pix_x >= im_width || pt_pix_y < 0 || pt_pix_y >= im_height)
continue;
float depth_val = depth_im[pt_pix_y * im_width + pt_pix_x];
if (depth_val <= 0 || depth_val > 6)
continue;
float diff = depth_val - pt_cam_z;
if (diff <= -trunc_margin)
continue;
// Integrate
int volume_idx = pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x;
float dist = fmin(1.0f, diff / trunc_margin);
float weight_old = voxel_grid_weight[volume_idx];
float weight_new = weight_old + 1.0f;
voxel_grid_weight[volume_idx] = weight_new;
voxel_grid_TSDF[volume_idx] = (voxel_grid_TSDF[volume_idx] * weight_old + dist) / weight_new;
}
}
// Loads a binary file with depth data and generates a TSDF voxel volume (5m x 5m x 5m at 1cm resolution)
// Volume is aligned with respect to the camera coordinates of the first frame (a.k.a. base frame)
int main(int argc, char * argv[]) {
std::cout << "running tsdf fusion " << std::endl;
// Location of folder containing RGB-D frames and camera pose files
std::string data_path;
// location of camera intrinsics file
std::string cam_K_file;
float cam_K[3 * 3];
float base2world[4 * 4];
float cam2base[4 * 4];
float cam2world[4 * 4];
int im_width = 640;
int im_height = 480;
float depth_im[im_height * im_width];
// Voxel grid parameters (change these to change voxel grid resolution, etc.)
float voxel_grid_origin_x = 0.4f; // Location of voxel grid origin in base frame camera coordinates
float voxel_grid_origin_y = -0.3f;
float voxel_grid_origin_z = -0.2f;
float voxel_size = 0.002f;
float trunc_margin = voxel_size * 5;
int voxel_grid_dim_x = 500;
int voxel_grid_dim_y = 500;
int voxel_grid_dim_z = 500;
// Manual parameters
if (argc > 1) {
std::cout << "parsing data path\n";
std::cout << "argc " << argc << std::endl;
data_path = argv[1];
cam_K_file = argv[2];
}
if (argc > 3){
std::cout << "parsing additional parameters\n";
int counter = 3;
voxel_size = atof(argv[counter]);
counter++;
voxel_grid_dim_x = std::atoi(argv[counter]);
counter++;
voxel_grid_dim_y = std::atoi(argv[counter]);
counter++;
voxel_grid_dim_z = std::atoi(argv[counter]);
counter++;
voxel_grid_origin_x = std::atof(argv[counter]);
counter++;
voxel_grid_origin_y = std::atof(argv[counter]);
counter++;
voxel_grid_origin_z = std::atof(argv[counter]);
counter++;
std::cout << "finished parsing params\n";
}
trunc_margin = 5 * voxel_size;
std::cout << "data_path "<< data_path << std::endl;
// Read camera intrinsics
std::vector<float> cam_K_vec = LoadMatrixFromFile(cam_K_file, 3, 3);
std::copy(cam_K_vec.begin(), cam_K_vec.end(), cam_K);
// set base2world to be the identity
for(int i = 0; i < 16; i++){
base2world[i] = 0;
}
base2world[0] = 1;
base2world[5] = 1;
base2world[10] = 1;
base2world[15] = 1;
// for(int i = 0; i < 16; i++){
// std::cout << "base2world " << i << " = " << base2world[i] << std::endl;
// }
// Invert base frame camera pose to get world-to-base frame transform
float base2world_inv[16] = {0};
invert_matrix(base2world, base2world_inv);
// Initialize voxel grid
float * voxel_grid_TSDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
float * voxel_grid_weight = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
voxel_grid_TSDF[i] = 1.0f;
memset(voxel_grid_weight, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
// Load variables to GPU memory
float * gpu_voxel_grid_TSDF;
float * gpu_voxel_grid_weight;
hipMalloc(&gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
hipMalloc(&gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
checkCUDA(__LINE__, hipGetLastError());
hipMemcpy(gpu_voxel_grid_TSDF, voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_voxel_grid_weight, voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
checkCUDA(__LINE__, hipGetLastError());
float * gpu_cam_K;
float * gpu_cam2base;
float * gpu_depth_im;
hipMalloc(&gpu_cam_K, 3 * 3 * sizeof(float));
hipMemcpy(gpu_cam_K, cam_K, 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&gpu_cam2base, 4 * 4 * sizeof(float));
hipMalloc(&gpu_depth_im, im_height * im_width * sizeof(float));
checkCUDA(__LINE__, hipGetLastError());
// Loop through each depth frame and integrate TSDF voxel grid
int frame_idx = 0;
while(true){
if (frame_idx > 2000) {
break;
}
std::ostringstream curr_frame_prefix;
curr_frame_prefix << std::setw(6) << std::setfill('0') << frame_idx;
frame_idx++;
// Read base frame camera pose
std::string cam2world_file = data_path + "/" + curr_frame_prefix.str() + "_pose.txt";
// check if file exists, if not return
std::ifstream ifile(cam2world_file);
if (ifile.fail()) {
// The file doesn't exist, break out of while loop
continue;
}
std::vector<float> cam2world_vec = LoadMatrixFromFile(cam2world_file, 4, 4);
std::copy(cam2world_vec.begin(), cam2world_vec.end(), cam2world);
// // Read current frame depth
std::string depth_im_file = data_path + "/" + curr_frame_prefix.str() + "_depth.png";
ReadDepth(depth_im_file, im_height, im_width, depth_im);
// Compute relative camera pose (camera-to-base frame)
multiply_matrix(base2world_inv, cam2world, cam2base);
hipMemcpy(gpu_cam2base, cam2base, 4 * 4 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_depth_im, depth_im, im_height * im_width * sizeof(float), hipMemcpyHostToDevice);
checkCUDA(__LINE__, hipGetLastError());
std::cout << "Fusing: " << depth_im_file << std::endl;
hipLaunchKernelGGL(( Integrate) , dim3(voxel_grid_dim_z), dim3(voxel_grid_dim_y) , 0, 0, gpu_cam_K, gpu_cam2base, gpu_depth_im,
im_height, im_width, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z, voxel_size, trunc_margin,
gpu_voxel_grid_TSDF, gpu_voxel_grid_weight);
}
// Load TSDF voxel grid from GPU to CPU memory
hipMemcpy(voxel_grid_TSDF, gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(voxel_grid_weight, gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
checkCUDA(__LINE__, hipGetLastError());
std::string tsdf_ply_filename = data_path + "/tsdf.ply";
std::string tsdf_bin_filename = data_path + "/tsdf.bin";
// Compute surface points from TSDF voxel grid and save to point cloud .ply file
std::cout << "Saving surface point cloud (tsdf.ply)..." << std::endl;
std::cout << "tsdf_bin_filename " << tsdf_bin_filename << std::endl;
SaveVoxelGrid2SurfacePointCloud(tsdf_ply_filename, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_size, voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_grid_TSDF, voxel_grid_weight, 0.2f, 0.0f);
// Save TSDF voxel grid and its parameters to disk as binary file (float array)
std::cout << "Saving TSDF voxel grid values to disk (tsdf.bin)..." << std::endl;
std::ofstream outFile(tsdf_bin_filename, std::ios::binary | std::ios::out);
float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
outFile.write((char*)&voxel_grid_dim_xf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_yf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_zf, sizeof(float));
outFile.write((char*)&voxel_grid_origin_x, sizeof(float));
outFile.write((char*)&voxel_grid_origin_y, sizeof(float));
outFile.write((char*)&voxel_grid_origin_z, sizeof(float));
outFile.write((char*)&voxel_size, sizeof(float));
outFile.write((char*)&trunc_margin, sizeof(float));
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
outFile.write((char*)&voxel_grid_TSDF[i], sizeof(float));
outFile.close();
return 0;
}
| 0f7a7cb1b5c0d3440f97ed922126a5f2aba2aa6c.cu | // ---------------------------------------------------------
// Author: Andy Zeng, Princeton University, 2016
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
// CUDA kernel function to integrate a TSDF voxel volume given depth images
__global__
void Integrate(float * cam_K, float * cam2base, float * depth_im,
int im_height, int im_width, int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z, float voxel_size, float trunc_margin,
float * voxel_grid_TSDF, float * voxel_grid_weight) {
int pt_grid_z = blockIdx.x;
int pt_grid_y = threadIdx.x;
for (int pt_grid_x = 0; pt_grid_x < voxel_grid_dim_x; ++pt_grid_x) {
// Convert voxel center from grid coordinates to base frame camera coordinates
float pt_base_x = voxel_grid_origin_x + pt_grid_x * voxel_size;
float pt_base_y = voxel_grid_origin_y + pt_grid_y * voxel_size;
float pt_base_z = voxel_grid_origin_z + pt_grid_z * voxel_size;
// Convert from base frame camera coordinates to current frame camera coordinates
float tmp_pt[3] = {0};
tmp_pt[0] = pt_base_x - cam2base[0 * 4 + 3];
tmp_pt[1] = pt_base_y - cam2base[1 * 4 + 3];
tmp_pt[2] = pt_base_z - cam2base[2 * 4 + 3];
float pt_cam_x = cam2base[0 * 4 + 0] * tmp_pt[0] + cam2base[1 * 4 + 0] * tmp_pt[1] + cam2base[2 * 4 + 0] * tmp_pt[2];
float pt_cam_y = cam2base[0 * 4 + 1] * tmp_pt[0] + cam2base[1 * 4 + 1] * tmp_pt[1] + cam2base[2 * 4 + 1] * tmp_pt[2];
float pt_cam_z = cam2base[0 * 4 + 2] * tmp_pt[0] + cam2base[1 * 4 + 2] * tmp_pt[1] + cam2base[2 * 4 + 2] * tmp_pt[2];
if (pt_cam_z <= 0)
continue;
int pt_pix_x = roundf(cam_K[0 * 3 + 0] * (pt_cam_x / pt_cam_z) + cam_K[0 * 3 + 2]);
int pt_pix_y = roundf(cam_K[1 * 3 + 1] * (pt_cam_y / pt_cam_z) + cam_K[1 * 3 + 2]);
if (pt_pix_x < 0 || pt_pix_x >= im_width || pt_pix_y < 0 || pt_pix_y >= im_height)
continue;
float depth_val = depth_im[pt_pix_y * im_width + pt_pix_x];
if (depth_val <= 0 || depth_val > 6)
continue;
float diff = depth_val - pt_cam_z;
if (diff <= -trunc_margin)
continue;
// Integrate
int volume_idx = pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x;
float dist = fmin(1.0f, diff / trunc_margin);
float weight_old = voxel_grid_weight[volume_idx];
float weight_new = weight_old + 1.0f;
voxel_grid_weight[volume_idx] = weight_new;
voxel_grid_TSDF[volume_idx] = (voxel_grid_TSDF[volume_idx] * weight_old + dist) / weight_new;
}
}
// Loads a binary file with depth data and generates a TSDF voxel volume (5m x 5m x 5m at 1cm resolution)
// Volume is aligned with respect to the camera coordinates of the first frame (a.k.a. base frame)
int main(int argc, char * argv[]) {
std::cout << "running tsdf fusion " << std::endl;
// Location of folder containing RGB-D frames and camera pose files
std::string data_path;
// location of camera intrinsics file
std::string cam_K_file;
float cam_K[3 * 3];
float base2world[4 * 4];
float cam2base[4 * 4];
float cam2world[4 * 4];
int im_width = 640;
int im_height = 480;
float depth_im[im_height * im_width];
// Voxel grid parameters (change these to change voxel grid resolution, etc.)
float voxel_grid_origin_x = 0.4f; // Location of voxel grid origin in base frame camera coordinates
float voxel_grid_origin_y = -0.3f;
float voxel_grid_origin_z = -0.2f;
float voxel_size = 0.002f;
float trunc_margin = voxel_size * 5;
int voxel_grid_dim_x = 500;
int voxel_grid_dim_y = 500;
int voxel_grid_dim_z = 500;
// Manual parameters
if (argc > 1) {
std::cout << "parsing data path\n";
std::cout << "argc " << argc << std::endl;
data_path = argv[1];
cam_K_file = argv[2];
}
if (argc > 3){
std::cout << "parsing additional parameters\n";
int counter = 3;
voxel_size = atof(argv[counter]);
counter++;
voxel_grid_dim_x = std::atoi(argv[counter]);
counter++;
voxel_grid_dim_y = std::atoi(argv[counter]);
counter++;
voxel_grid_dim_z = std::atoi(argv[counter]);
counter++;
voxel_grid_origin_x = std::atof(argv[counter]);
counter++;
voxel_grid_origin_y = std::atof(argv[counter]);
counter++;
voxel_grid_origin_z = std::atof(argv[counter]);
counter++;
std::cout << "finished parsing params\n";
}
trunc_margin = 5 * voxel_size;
std::cout << "data_path "<< data_path << std::endl;
// Read camera intrinsics
std::vector<float> cam_K_vec = LoadMatrixFromFile(cam_K_file, 3, 3);
std::copy(cam_K_vec.begin(), cam_K_vec.end(), cam_K);
// set base2world to be the identity
for(int i = 0; i < 16; i++){
base2world[i] = 0;
}
base2world[0] = 1;
base2world[5] = 1;
base2world[10] = 1;
base2world[15] = 1;
// for(int i = 0; i < 16; i++){
// std::cout << "base2world " << i << " = " << base2world[i] << std::endl;
// }
// Invert base frame camera pose to get world-to-base frame transform
float base2world_inv[16] = {0};
invert_matrix(base2world, base2world_inv);
// Initialize voxel grid
float * voxel_grid_TSDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
float * voxel_grid_weight = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
voxel_grid_TSDF[i] = 1.0f;
memset(voxel_grid_weight, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
// Load variables to GPU memory
float * gpu_voxel_grid_TSDF;
float * gpu_voxel_grid_weight;
cudaMalloc(&gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
cudaMalloc(&gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
checkCUDA(__LINE__, cudaGetLastError());
cudaMemcpy(gpu_voxel_grid_TSDF, voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_voxel_grid_weight, voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
checkCUDA(__LINE__, cudaGetLastError());
float * gpu_cam_K;
float * gpu_cam2base;
float * gpu_depth_im;
cudaMalloc(&gpu_cam_K, 3 * 3 * sizeof(float));
cudaMemcpy(gpu_cam_K, cam_K, 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&gpu_cam2base, 4 * 4 * sizeof(float));
cudaMalloc(&gpu_depth_im, im_height * im_width * sizeof(float));
checkCUDA(__LINE__, cudaGetLastError());
// Loop through each depth frame and integrate TSDF voxel grid
int frame_idx = 0;
while(true){
if (frame_idx > 2000) {
break;
}
std::ostringstream curr_frame_prefix;
curr_frame_prefix << std::setw(6) << std::setfill('0') << frame_idx;
frame_idx++;
// Read base frame camera pose
std::string cam2world_file = data_path + "/" + curr_frame_prefix.str() + "_pose.txt";
// check if file exists, if not return
std::ifstream ifile(cam2world_file);
if (ifile.fail()) {
// The file doesn't exist, break out of while loop
continue;
}
std::vector<float> cam2world_vec = LoadMatrixFromFile(cam2world_file, 4, 4);
std::copy(cam2world_vec.begin(), cam2world_vec.end(), cam2world);
// // Read current frame depth
std::string depth_im_file = data_path + "/" + curr_frame_prefix.str() + "_depth.png";
ReadDepth(depth_im_file, im_height, im_width, depth_im);
// Compute relative camera pose (camera-to-base frame)
multiply_matrix(base2world_inv, cam2world, cam2base);
cudaMemcpy(gpu_cam2base, cam2base, 4 * 4 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_depth_im, depth_im, im_height * im_width * sizeof(float), cudaMemcpyHostToDevice);
checkCUDA(__LINE__, cudaGetLastError());
std::cout << "Fusing: " << depth_im_file << std::endl;
Integrate <<< voxel_grid_dim_z, voxel_grid_dim_y >>>(gpu_cam_K, gpu_cam2base, gpu_depth_im,
im_height, im_width, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z, voxel_size, trunc_margin,
gpu_voxel_grid_TSDF, gpu_voxel_grid_weight);
}
// Load TSDF voxel grid from GPU to CPU memory
cudaMemcpy(voxel_grid_TSDF, gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(voxel_grid_weight, gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDA(__LINE__, cudaGetLastError());
std::string tsdf_ply_filename = data_path + "/tsdf.ply";
std::string tsdf_bin_filename = data_path + "/tsdf.bin";
// Compute surface points from TSDF voxel grid and save to point cloud .ply file
std::cout << "Saving surface point cloud (tsdf.ply)..." << std::endl;
std::cout << "tsdf_bin_filename " << tsdf_bin_filename << std::endl;
SaveVoxelGrid2SurfacePointCloud(tsdf_ply_filename, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_size, voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_grid_TSDF, voxel_grid_weight, 0.2f, 0.0f);
// Save TSDF voxel grid and its parameters to disk as binary file (float array)
std::cout << "Saving TSDF voxel grid values to disk (tsdf.bin)..." << std::endl;
std::ofstream outFile(tsdf_bin_filename, std::ios::binary | std::ios::out);
float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
outFile.write((char*)&voxel_grid_dim_xf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_yf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_zf, sizeof(float));
outFile.write((char*)&voxel_grid_origin_x, sizeof(float));
outFile.write((char*)&voxel_grid_origin_y, sizeof(float));
outFile.write((char*)&voxel_grid_origin_z, sizeof(float));
outFile.write((char*)&voxel_size, sizeof(float));
outFile.write((char*)&trunc_margin, sizeof(float));
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
outFile.write((char*)&voxel_grid_TSDF[i], sizeof(float));
outFile.close();
return 0;
}
|
9036aaa3e13589f244a92ca7b41f47e728f2a86f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/utils/device/block.hpp"
#include "utils/vector_operations.hpp"
#include "pcl/gpu/utils/device/funcattrib.hpp"
#include "pcl/gpu/features/device/pair_features.hpp"
using namespace pcl::gpu;
namespace pcl
{
namespace device
{
template<bool pack_rgb>
struct Repack
{
enum
{
CTA_SIZE = 256,
WARPS = CTA_SIZE/Warp::WARP_SIZE
};
const PointType* cloud;
const NormalType* normals;
int work_size;
PtrStep<int> gindices;
const int* sizes;
mutable PtrStep<float> output;
int max_elems;
__device__ void operator()() const
{
int idx = WARPS * blockIdx.x + Warp::id();
if (idx >= work_size)
return;
const int *nbeg = gindices.ptr(idx);
int size = sizes[idx];
int idx_shift = max_elems * idx;
for(int i = Warp::laneId(); i < size; i += Warp::STRIDE)
{
int cloud_index = nbeg[i];
float3 p;
if (pack_rgb)
{
int color;
p = fetchXYZRGB(cloud, cloud_index, color);
output.ptr(6)[i + idx_shift] = __int_as_float(color);
}
else
p = fetch(cloud, cloud_index);
output.ptr(0)[i + idx_shift] = p.x;
output.ptr(1)[i + idx_shift] = p.y;
output.ptr(2)[i + idx_shift] = p.z;
float3 n = fetch(normals, cloud_index);
output.ptr(3)[i + idx_shift] = n.x;
output.ptr(4)[i + idx_shift] = n.y;
output.ptr(5)[i + idx_shift] = n.z;
}
}
template<class It>
__device__ __forceinline__ float3 fetch(It ptr, int index) const
{
//return tr(ptr[index]);
return *(float3*)&ptr[index];
}
__forceinline__ __device__ float3 fetchXYZRGB(const PointXYZRGB* data, int index, int& color) const
{
float4 xyzrgb = data[index];
color = __float_as_int(xyzrgb.w);
return make_float3(xyzrgb.x, xyzrgb.y, xyzrgb.z);
}
};
template<bool enable_rgb>
struct Pfh125
{
enum
{
CTA_SIZE = 256,
NR_SPLIT = 5,
NR_SPLIT_2 = NR_SPLIT * NR_SPLIT,
NR_SPLIT_3 = NR_SPLIT_2 * NR_SPLIT,
FSize = NR_SPLIT * NR_SPLIT * NR_SPLIT * (enable_rgb ? 2 : 1)
};
std::size_t work_size;
const int* sizes;
PtrStep<float> rpk;
int max_elems;
mutable PtrStep<float> output;
__device__ __forceinline__ void operator()() const
{
int idx = blockIdx.x;
if (idx >= work_size)
return;
int size = sizes[idx];
int size2 = size * size;
int idx_shift = max_elems * idx;
float hist_incr = 100.f / (size2 - 1);
__shared__ float pfh_histogram[FSize];
Block::fill(pfh_histogram, pfh_histogram + FSize, 0.f);
__syncthreads();
// Iterate over all the points in the neighborhood
int i = threadIdx.y * blockDim.x + threadIdx.x;
int stride = Block::stride();
for( ; i < size2; i += stride )
{
int i_idx = i / size + idx_shift;
int j_idx = i % size + idx_shift;
if (i_idx != j_idx)
{
float3 pi, ni, pj, nj;
pi.x = rpk.ptr(0)[i_idx];
pj.x = rpk.ptr(0)[j_idx];
pi.y = rpk.ptr(1)[i_idx];
pj.y = rpk.ptr(1)[j_idx];
pi.z = rpk.ptr(2)[i_idx];
pj.z = rpk.ptr(2)[j_idx];
ni.x = rpk.ptr(3)[i_idx];
nj.x = rpk.ptr(3)[j_idx];
ni.y = rpk.ptr(4)[i_idx];
nj.y = rpk.ptr(4)[j_idx];
ni.z = rpk.ptr(5)[i_idx];
nj.z = rpk.ptr(5)[j_idx];
float f1, f2, f3, f4;
// Compute the pair NNi to NNj
computePairFeatures (pi, ni, pj, nj, f1, f2, f3, f4);
//if (computePairFeatures (pi, ni, pj, nj, f1, f2, f3, f4))
{
// Normalize the f1, f2, f3 features and push them in the histogram
//Using floorf due to changes to MSVC 16.9. See details here: https://devtalk.blender.org/t/cuda-compile-error-windows-10/17886/4
//floorf is without std:: see why here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79700
int find0 = floorf( NR_SPLIT * ((f1 + PI) * (1.f / (2.f * PI))) );
find0 = min(NR_SPLIT - 1, max(0, find0));
int find1 = floorf( NR_SPLIT * ( (f2 + 1.f) * 0.5f ) );
find1 = min(NR_SPLIT - 1, max(0, find1));
int find2 = floorf( NR_SPLIT * ( (f3 + 1.f) * 0.5f ) );
find2 = min(NR_SPLIT - 1, max(0, find2));
int h_index = find0 + NR_SPLIT * find1 + NR_SPLIT_2 * find2;
atomicAdd(pfh_histogram + h_index, hist_incr);
if (enable_rgb)
{
int ci = __float_as_int(rpk.ptr(6)[i_idx]);
int cj = __float_as_int(rpk.ptr(6)[j_idx]);
float f5, f6, f7;
computeRGBPairFeatures_RGBOnly(ci, cj, f5, f6, f7);
// color ratios are in [-1, 1]
int find4 = floorf(NR_SPLIT * ((f5 + 1.f) * 0.5f));
find4 = min(NR_SPLIT - 1, max(0, find4));
int find5 = floorf(NR_SPLIT * ((f6 + 1.f) * 0.5f));
find5 = min(NR_SPLIT - 1, max(0, find5));
int find6 = floorf(NR_SPLIT * ((f7 + 1.f) * 0.5f));
find6 = min(NR_SPLIT - 1, max(0, find6));
// and the colors
h_index = NR_SPLIT_3 + find4 + NR_SPLIT * find5 + NR_SPLIT_2 * find6;
atomicAdd(pfh_histogram + h_index, hist_incr);
}
}
}
}
__syncthreads();
Block::copy(pfh_histogram, pfh_histogram + FSize, output.ptr(idx));
}
template<class It>
__device__ __forceinline__ float3 fetch(It ptr, int index) const
{
//return tr(ptr[index]);
return *(float3*)&ptr[index];
}
};
__global__ void repackKernel(const Repack<false> repack) { repack(); }
__global__ void pfhKernel(const Pfh125<false> pfh125) { pfh125(); }
}
}
void pcl::device::repackToAosForPfh(const PointCloud& cloud, const Normals& normals, const NeighborIndices& neighbours, DeviceArray2D<float>& data_rpk, int& max_elems_rpk)
{
max_elems_rpk = (neighbours.max_elems/32 + 1) * 32;
data_rpk.create(6, (int)neighbours.sizes.size() * max_elems_rpk);
Repack<false> rpk;
rpk.sizes = neighbours.sizes;
rpk.gindices = neighbours;
rpk.cloud = cloud;
rpk.normals = normals;
rpk.work_size = (int)neighbours.sizes.size();
rpk.output = data_rpk;
rpk.max_elems = max_elems_rpk;
int block = Repack<false>::CTA_SIZE;
int grid = divUp(rpk.work_size, Repack<false>::WARPS);
hipLaunchKernelGGL(( device::repackKernel), dim3(grid), dim3(block), 0, 0, rpk);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
//printFuncAttrib(repackKernel);
}
void pcl::device::computePfh125(const DeviceArray2D<float>& data_rpk, int max_elems_rpk, const NeighborIndices& neighbours, DeviceArray2D<PFHSignature125>& features)
{
Pfh125<false> fph;
fph.work_size = neighbours.sizes.size();
fph.sizes = neighbours.sizes;
fph.rpk = data_rpk;
fph.max_elems = max_elems_rpk;
fph.output = features;
int block = Pfh125<false>::CTA_SIZE;
int grid = (int)fph.work_size;
hipLaunchKernelGGL(( device::pfhKernel), dim3(grid), dim3(block), 0, 0, fph);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
//printFuncAttrib(pfhKernel);
}
namespace pcl
{
namespace device
{
__global__ void repackRgbKernel(const Repack<true> repack) { repack(); }
__global__ void pfhRgbKernel(const Pfh125<true> pfhrgb125) { pfhrgb125(); }
}
}
void pcl::device::repackToAosForPfhRgb(const PointCloud& cloud, const Normals& normals, const NeighborIndices& neighbours, DeviceArray2D<float>& data_rpk, int& max_elems_rpk)
{
max_elems_rpk = (neighbours.max_elems/32 + 1) * 32;
data_rpk.create(7, (int)neighbours.sizes.size() * max_elems_rpk);
Repack<true> rpk;
rpk.sizes = neighbours.sizes;
rpk.gindices = neighbours;
rpk.cloud = cloud;
rpk.normals = normals;
rpk.work_size = (int)neighbours.sizes.size();
rpk.output = data_rpk;
rpk.max_elems = max_elems_rpk;
int block = Repack<true>::CTA_SIZE;
int grid = divUp(rpk.work_size, Repack<true>::WARPS);
hipLaunchKernelGGL(( device::repackRgbKernel), dim3(grid), dim3(block), 0, 0, rpk);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
//printFuncAttrib(repackRgbKernel);
}
void pcl::device::computePfhRgb250(const DeviceArray2D<float>& data_rpk, int max_elems_rpk, const NeighborIndices& neighbours, DeviceArray2D<PFHRGBSignature250>& features)
{
Pfh125<true> pfhrgb;
pfhrgb.work_size = neighbours.sizes.size();
pfhrgb.sizes = neighbours.sizes;
pfhrgb.rpk = data_rpk;
pfhrgb.max_elems = max_elems_rpk;
pfhrgb.output = features;
int block = Pfh125<true>::CTA_SIZE;
int grid = (int)pfhrgb.work_size;
hipLaunchKernelGGL(( device::pfhRgbKernel), dim3(grid), dim3(block), 0, 0, pfhrgb);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
//printFuncAttrib(pfhRgbKernel);
} | 9036aaa3e13589f244a92ca7b41f47e728f2a86f.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/utils/device/block.hpp"
#include "utils/vector_operations.hpp"
#include "pcl/gpu/utils/device/funcattrib.hpp"
#include "pcl/gpu/features/device/pair_features.hpp"
using namespace pcl::gpu;
namespace pcl
{
namespace device
{
template<bool pack_rgb>
struct Repack
{
enum
{
CTA_SIZE = 256,
WARPS = CTA_SIZE/Warp::WARP_SIZE
};
const PointType* cloud;
const NormalType* normals;
int work_size;
PtrStep<int> gindices;
const int* sizes;
mutable PtrStep<float> output;
int max_elems;
__device__ void operator()() const
{
int idx = WARPS * blockIdx.x + Warp::id();
if (idx >= work_size)
return;
const int *nbeg = gindices.ptr(idx);
int size = sizes[idx];
int idx_shift = max_elems * idx;
for(int i = Warp::laneId(); i < size; i += Warp::STRIDE)
{
int cloud_index = nbeg[i];
float3 p;
if (pack_rgb)
{
int color;
p = fetchXYZRGB(cloud, cloud_index, color);
output.ptr(6)[i + idx_shift] = __int_as_float(color);
}
else
p = fetch(cloud, cloud_index);
output.ptr(0)[i + idx_shift] = p.x;
output.ptr(1)[i + idx_shift] = p.y;
output.ptr(2)[i + idx_shift] = p.z;
float3 n = fetch(normals, cloud_index);
output.ptr(3)[i + idx_shift] = n.x;
output.ptr(4)[i + idx_shift] = n.y;
output.ptr(5)[i + idx_shift] = n.z;
}
}
template<class It>
__device__ __forceinline__ float3 fetch(It ptr, int index) const
{
//return tr(ptr[index]);
return *(float3*)&ptr[index];
}
__forceinline__ __device__ float3 fetchXYZRGB(const PointXYZRGB* data, int index, int& color) const
{
float4 xyzrgb = data[index];
color = __float_as_int(xyzrgb.w);
return make_float3(xyzrgb.x, xyzrgb.y, xyzrgb.z);
}
};
template<bool enable_rgb>
struct Pfh125
{
enum
{
CTA_SIZE = 256,
NR_SPLIT = 5,
NR_SPLIT_2 = NR_SPLIT * NR_SPLIT,
NR_SPLIT_3 = NR_SPLIT_2 * NR_SPLIT,
FSize = NR_SPLIT * NR_SPLIT * NR_SPLIT * (enable_rgb ? 2 : 1)
};
std::size_t work_size;
const int* sizes;
PtrStep<float> rpk;
int max_elems;
mutable PtrStep<float> output;
__device__ __forceinline__ void operator()() const
{
int idx = blockIdx.x;
if (idx >= work_size)
return;
int size = sizes[idx];
int size2 = size * size;
int idx_shift = max_elems * idx;
float hist_incr = 100.f / (size2 - 1);
__shared__ float pfh_histogram[FSize];
Block::fill(pfh_histogram, pfh_histogram + FSize, 0.f);
__syncthreads();
// Iterate over all the points in the neighborhood
int i = threadIdx.y * blockDim.x + threadIdx.x;
int stride = Block::stride();
for( ; i < size2; i += stride )
{
int i_idx = i / size + idx_shift;
int j_idx = i % size + idx_shift;
if (i_idx != j_idx)
{
float3 pi, ni, pj, nj;
pi.x = rpk.ptr(0)[i_idx];
pj.x = rpk.ptr(0)[j_idx];
pi.y = rpk.ptr(1)[i_idx];
pj.y = rpk.ptr(1)[j_idx];
pi.z = rpk.ptr(2)[i_idx];
pj.z = rpk.ptr(2)[j_idx];
ni.x = rpk.ptr(3)[i_idx];
nj.x = rpk.ptr(3)[j_idx];
ni.y = rpk.ptr(4)[i_idx];
nj.y = rpk.ptr(4)[j_idx];
ni.z = rpk.ptr(5)[i_idx];
nj.z = rpk.ptr(5)[j_idx];
float f1, f2, f3, f4;
// Compute the pair NNi to NNj
computePairFeatures (pi, ni, pj, nj, f1, f2, f3, f4);
//if (computePairFeatures (pi, ni, pj, nj, f1, f2, f3, f4))
{
// Normalize the f1, f2, f3 features and push them in the histogram
//Using floorf due to changes to MSVC 16.9. See details here: https://devtalk.blender.org/t/cuda-compile-error-windows-10/17886/4
//floorf is without std:: see why here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79700
int find0 = floorf( NR_SPLIT * ((f1 + PI) * (1.f / (2.f * PI))) );
find0 = min(NR_SPLIT - 1, max(0, find0));
int find1 = floorf( NR_SPLIT * ( (f2 + 1.f) * 0.5f ) );
find1 = min(NR_SPLIT - 1, max(0, find1));
int find2 = floorf( NR_SPLIT * ( (f3 + 1.f) * 0.5f ) );
find2 = min(NR_SPLIT - 1, max(0, find2));
int h_index = find0 + NR_SPLIT * find1 + NR_SPLIT_2 * find2;
atomicAdd(pfh_histogram + h_index, hist_incr);
if (enable_rgb)
{
int ci = __float_as_int(rpk.ptr(6)[i_idx]);
int cj = __float_as_int(rpk.ptr(6)[j_idx]);
float f5, f6, f7;
computeRGBPairFeatures_RGBOnly(ci, cj, f5, f6, f7);
// color ratios are in [-1, 1]
int find4 = floorf(NR_SPLIT * ((f5 + 1.f) * 0.5f));
find4 = min(NR_SPLIT - 1, max(0, find4));
int find5 = floorf(NR_SPLIT * ((f6 + 1.f) * 0.5f));
find5 = min(NR_SPLIT - 1, max(0, find5));
int find6 = floorf(NR_SPLIT * ((f7 + 1.f) * 0.5f));
find6 = min(NR_SPLIT - 1, max(0, find6));
// and the colors
h_index = NR_SPLIT_3 + find4 + NR_SPLIT * find5 + NR_SPLIT_2 * find6;
atomicAdd(pfh_histogram + h_index, hist_incr);
}
}
}
}
__syncthreads();
Block::copy(pfh_histogram, pfh_histogram + FSize, output.ptr(idx));
}
template<class It>
__device__ __forceinline__ float3 fetch(It ptr, int index) const
{
//return tr(ptr[index]);
return *(float3*)&ptr[index];
}
};
__global__ void repackKernel(const Repack<false> repack) { repack(); }
__global__ void pfhKernel(const Pfh125<false> pfh125) { pfh125(); }
}
}
void pcl::device::repackToAosForPfh(const PointCloud& cloud, const Normals& normals, const NeighborIndices& neighbours, DeviceArray2D<float>& data_rpk, int& max_elems_rpk)
{
max_elems_rpk = (neighbours.max_elems/32 + 1) * 32;
data_rpk.create(6, (int)neighbours.sizes.size() * max_elems_rpk);
Repack<false> rpk;
rpk.sizes = neighbours.sizes;
rpk.gindices = neighbours;
rpk.cloud = cloud;
rpk.normals = normals;
rpk.work_size = (int)neighbours.sizes.size();
rpk.output = data_rpk;
rpk.max_elems = max_elems_rpk;
int block = Repack<false>::CTA_SIZE;
int grid = divUp(rpk.work_size, Repack<false>::WARPS);
device::repackKernel<<<grid, block>>>(rpk);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
//printFuncAttrib(repackKernel);
}
void pcl::device::computePfh125(const DeviceArray2D<float>& data_rpk, int max_elems_rpk, const NeighborIndices& neighbours, DeviceArray2D<PFHSignature125>& features)
{
Pfh125<false> fph;
fph.work_size = neighbours.sizes.size();
fph.sizes = neighbours.sizes;
fph.rpk = data_rpk;
fph.max_elems = max_elems_rpk;
fph.output = features;
int block = Pfh125<false>::CTA_SIZE;
int grid = (int)fph.work_size;
device::pfhKernel<<<grid, block>>>(fph);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
//printFuncAttrib(pfhKernel);
}
namespace pcl
{
namespace device
{
__global__ void repackRgbKernel(const Repack<true> repack) { repack(); }
__global__ void pfhRgbKernel(const Pfh125<true> pfhrgb125) { pfhrgb125(); }
}
}
void pcl::device::repackToAosForPfhRgb(const PointCloud& cloud, const Normals& normals, const NeighborIndices& neighbours, DeviceArray2D<float>& data_rpk, int& max_elems_rpk)
{
max_elems_rpk = (neighbours.max_elems/32 + 1) * 32;
data_rpk.create(7, (int)neighbours.sizes.size() * max_elems_rpk);
Repack<true> rpk;
rpk.sizes = neighbours.sizes;
rpk.gindices = neighbours;
rpk.cloud = cloud;
rpk.normals = normals;
rpk.work_size = (int)neighbours.sizes.size();
rpk.output = data_rpk;
rpk.max_elems = max_elems_rpk;
int block = Repack<true>::CTA_SIZE;
int grid = divUp(rpk.work_size, Repack<true>::WARPS);
device::repackRgbKernel<<<grid, block>>>(rpk);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
//printFuncAttrib(repackRgbKernel);
}
void pcl::device::computePfhRgb250(const DeviceArray2D<float>& data_rpk, int max_elems_rpk, const NeighborIndices& neighbours, DeviceArray2D<PFHRGBSignature250>& features)
{
Pfh125<true> pfhrgb;
pfhrgb.work_size = neighbours.sizes.size();
pfhrgb.sizes = neighbours.sizes;
pfhrgb.rpk = data_rpk;
pfhrgb.max_elems = max_elems_rpk;
pfhrgb.output = features;
int block = Pfh125<true>::CTA_SIZE;
int grid = (int)pfhrgb.work_size;
device::pfhRgbKernel<<<grid, block>>>(pfhrgb);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
//printFuncAttrib(pfhRgbKernel);
} |
83adac0b87efe4ed3774a717b50cebc0cbd684c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <text/subword/detail/hash_utils.cuh>
#include <text/subword/detail/tokenizer_utils.cuh>
#include <text/subword/detail/wordpiece_tokenizer.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/utilities/error.hpp>
#include <nvtext/subword_tokenize.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/for_each.h>
#include <thrust/remove.h>
#include <thrust/transform_scan.h>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Initializes the token-ids, word-indices, and token counts vectors.
*
* Each thread process a single code point from `code_points`.
* This also locates the start and end of each word within the `code_points` buffer.
* A word start is identified as a non-space character that appears right after a space.
* A word end is identified as a space character that appears right after a non-space one.
* If the code point at this thread does not represent a word start or word end,
* a max uint32_t value is written to the appropriate vector instead.
* A post processing step is required to filter the relevant values in these
* vectors.
*
* It is guaranteed that the same number of valid values will be written to both the
* start and end indices and that after the select step, the two arrays will be aligned.
* That is, `start_word_indices[word]` and `end_word_indices[word]` are the start and
* end for the same word.
*
* Memory required is 13 bytes per code point values:
* - 4 bytes each for `start_word_indices` and `end_word_indices`
* - 4 bytes for each `token_ids`
* - 1 byte for each each `tokens_per_word`
* Also, there is a code point value for each byte in the input strings.
*
* @param code_points[in] A pointer to the code points in the strings after normalization.
* @param start_word_indices[out] An array of size `num_code_points` which will contain the
* starting index for each word.
* @param end_word_indices[out] An array of size `num_code_points` which will contain the
* ending index for each word.
* @param num_code_points The total number of code_points.
* @param token_ids[out] An array of size `num_code_points` which will hold the token ids.
* This kernel just sets all the values to max uint32_t.
* @param tokens_per_word[out] An array of size `num_code_points` which hold the number of
* tokens. This kernel just sets all the values to 0.
*/
__global__ void init_data_and_mark_word_start_and_ends(uint32_t const* code_points,
uint32_t* start_word_indices,
uint32_t* end_word_indices,
size_t num_code_points,
uint32_t* token_ids,
uint8_t* tokens_per_word)
{
uint32_t char_for_thread = blockDim.x * blockIdx.x + threadIdx.x;
// Deal with the start_word_indices array
if (char_for_thread < num_code_points) {
uint32_t val_to_write = std::numeric_limits<uint32_t>::max();
if ((code_points[char_for_thread] != SPACE_CODE_POINT) && (char_for_thread > 0) &&
(code_points[char_for_thread - 1] == SPACE_CODE_POINT)) {
val_to_write = char_for_thread;
}
start_word_indices[char_for_thread] = val_to_write;
// Deal with the end_word_indices_array
val_to_write = std::numeric_limits<uint32_t>::max();
if ((code_points[char_for_thread] != SPACE_CODE_POINT) &&
(char_for_thread + 1 < num_code_points) &&
(code_points[char_for_thread + 1] == SPACE_CODE_POINT)) {
val_to_write = char_for_thread + 1;
}
end_word_indices[char_for_thread] = val_to_write;
token_ids[char_for_thread] = std::numeric_limits<uint32_t>::max();
tokens_per_word[char_for_thread] = 0;
}
}
/**
* @brief Resolves the string boundaries for the start and end words.
*
* This kernel should be called after `mark_word_start_and_ends` with at
* least `num_strings` total threads.
*
* The start and end indices are updated to honor the string boundaries
* within the strings array. This corrects any word ranges that span across
* individual strings.
*
* @param code_points A pointer to the code points in the strings.
* @param strings_offsets An array containing the index of the starting character of each string
* with an extra space at the end containing the total number of characters. As a result,
* this array is of length num_strings + 1.
* @param start_word_indices An array which will contain the starting index for each word scattered
* throughout. If an index does not represent a word start, the max-uint32_t value is written
* to indicate this.
* @param end_word_indices An array which will contain the one past the end index for each word
* scattered throughout. If an index does not represent a word end, the max uint32_t value is
* written to indicate this.
* @param num_strings The total number of strings to be processed.
*/
__global__ void mark_string_start_and_ends(uint32_t const* code_points,
uint32_t const* strings_offsets,
uint32_t* start_word_indices,
uint32_t* end_word_indices,
uint32_t num_strings)
{
uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x;
// Ensure the starting character of each strings is written to the word start array.
if (idx <= num_strings) {
auto const offset = strings_offsets[idx];
if ((idx < num_strings) && (code_points[offset] != SPACE_CODE_POINT)) {
start_word_indices[offset] = offset;
}
if ((offset > 0) && (code_points[offset - 1] != SPACE_CODE_POINT)) {
end_word_indices[offset - 1] = offset;
}
}
}
/**
* @brief Converts words into token ids.
*
* Each thread is assigned a word to convert based on the `hash_table`. Each thread converts
* its word and writes the number of tokens it found in the `tokens_per_word` array.
*
* The `tokens_per_word` array is kept to the length `num_code_points + 1`. This means each thread
* can write its number of tokens to the `tokens_per_word` corresponding to the starting
* character of each word. Since strings must start at some word, we can prefix sum this array
* and use the strings_lengths code point offsets to directly index the number of tokens in each
* string.
*
* The `token_ids` array should be initialized to the max uint32_t before calling this kernel.
*
* @param code_points An array containing all of the code points to be processed
* @param hash_table An array containing the flattened hash table with key, value pairs
* packed in 64-bits
* @param bin_coefficients A pointer to the GPU pointer containing the hashing parameters for
* each hash bin on the GPU.
* @param bin_offsets: A pointer to the GPU pointer containing the start index of each bin in
* the flattened hash table.
* @param token_ids The index for each token found during tokenization. This is of length
* num_code_points. In most cases, multiple characters will collapse to one token. In these
* cases, the max uint32_t will be in place. Cub will be used later to filter out these
* invalid ids later.
* @param word_starts An array of length `num_code_points`. The first total word elements contains
* the index of the first character for each word.
* @param word_ends An array of length num_code_points. The first total_words elements contains the
* past the end index for each word. This array is kept aligned with the initial
* token_ids array containing the word start code points.
* `word_ends[word] - filtered_start_indices[word] = word_length`
* @param tokens_per_word An array of size num_code_points that will contain the number of tokens in
* each word in a string. This array can be exclusive summed and the result used in
* conjunction with the strings lengths array to find the tokens in each string. This is
* possible since the number of tokens in each word will be placed at the index corresponding
* to the start character of a word. If we assume prefix_summed is the prefix sum of the
* tokens_per_word array, then `prefix_summed[strings_lengths[string_idx] - 1]` is the number
* of tokens found before the start of string.
* @param unk_token_id The token id to be place for unknown tokens
* @param max_word_length The maximum length of a word. Any word longer than this length is
* replaced by the unknown token.
* @param total_words The total number of white space separated words
* @param outer_hash_a_param The a parameter for the outer hash
* @param outer_hash_b_param: The b parameter for the outer hash
* @param num_outer_bins: The number of bins for the outer hash
*/
__global__ void kernel_wordpiece_tokenizer(uint32_t const* code_points,
uint64_t const* hash_table,
uint64_t const* bin_coefficients,
uint16_t const* bin_offsets,
uint16_t unk_token_id,
uint32_t outer_hash_a_param,
uint32_t outer_hash_b_param,
uint16_t num_outer_bins,
uint32_t const* word_starts,
uint32_t const* word_ends,
uint32_t max_word_length,
uint32_t total_words,
uint32_t* token_ids,
uint8_t* tokens_per_word)
{
uint32_t const word_to_tokenize = blockDim.x * blockIdx.x + threadIdx.x;
if (word_to_tokenize >= total_words) return;
// Each thread gets the start code_point offset for each word and resets the token_id memory to
// the default value. In a post processing step, all of these values will be removed.
auto const token_start = word_starts[word_to_tokenize];
auto const token_end = word_ends[word_to_tokenize];
auto const word_length = token_end - token_start;
// The sdbm hash of "##"
constexpr uint32_t hashtag_hash = 2296000;
uint16_t num_values_tokenized = 0;
// initialize start, end
uint32_t start = token_start;
uint32_t end = token_end;
if (word_length > max_word_length) {
start = token_end;
num_values_tokenized = 1;
token_ids[token_start] = unk_token_id;
tokens_per_word[token_start] = num_values_tokenized;
}
while (start < token_end) {
end = token_end;
// init token_id to no token
int token_id = -1;
// compute current length
uint32_t const length = token_end - start;
uint64_t substr_hash =
sdbm_hash(code_points + start, length, start == token_start ? 0 : hashtag_hash);
while (start < end) {
token_id = retrieve(substr_hash,
outer_hash_a_param,
outer_hash_b_param,
num_outer_bins,
hash_table,
bin_coefficients,
bin_offsets);
if (token_id != -1) { break; }
--end;
// Pop off the last value from the substr hash
substr_hash = prev_sdbm_hash(substr_hash, code_points[end]);
}
if (token_id == -1) {
end = token_end;
token_id = unk_token_id;
// We need to clean up the global array. This case is very uncommon.
// Only 0.016% of words cannot be resolved to a token from the squad dev set.
for (uint32_t i = 1; i < num_values_tokenized; ++i) {
token_ids[token_start + i] = std::numeric_limits<uint32_t>::max();
}
num_values_tokenized = 0;
}
token_ids[token_start + num_values_tokenized] = token_id;
++num_values_tokenized;
start = end;
}
tokens_per_word[token_start] = num_values_tokenized;
}
} // namespace
wordpiece_tokenizer::wordpiece_tokenizer(hashed_vocabulary const& vocab_table,
uint32_t max_rows_final_tensor,
uint32_t max_sequence_length,
uint32_t stride,
bool do_truncate,
bool do_lower_case,
rmm::cuda_stream_view stream,
uint32_t max_word_length)
: vocab_table(vocab_table),
normalizer(stream, do_lower_case),
max_sequence_length{max_sequence_length},
stride(stride),
do_truncate(do_truncate),
max_word_length{max_word_length}
{
}
uvector_pair wordpiece_tokenizer::tokenize(char const* d_strings,
uint32_t const* d_offsets,
uint32_t num_strings,
rmm::cuda_stream_view stream)
{
auto cps_and_offsets = normalizer.normalize(d_strings, d_offsets, num_strings, stream);
tokenize(cps_and_offsets, stream);
return uvector_pair(std::move(cps_and_offsets.first), std::move(cps_and_offsets.second));
}
struct copy_if_fn { // inline lambda not allowed in private or protected member function
__device__ bool operator()(uint32_t cp) { return cp != std::numeric_limits<uint32_t>::max(); }
};
struct tranform_fn { // just converting uint8 value to uint32
__device__ uint32_t operator()(uint8_t count) { return count; }
};
void wordpiece_tokenizer::tokenize(uvector_pair& cps_and_offsets, rmm::cuda_stream_view stream)
{
uint32_t* device_code_points = cps_and_offsets.first->data();
size_t const num_code_points = cps_and_offsets.first->size();
uint32_t* device_strings_offsets = cps_and_offsets.second->data();
uint32_t const num_strings = cps_and_offsets.second->size() - 1;
const size_t four_byte_cp_chunks = 1 + (num_code_points - 1) / sizeof(uint32_t);
const size_t rounded_num_cps = sizeof(uint32_t) * four_byte_cp_chunks;
rmm::device_uvector<uint8_t> device_tokens_per_word(rounded_num_cps, stream);
rmm::device_uvector<uint32_t> device_token_ids(num_code_points, stream);
rmm::device_uvector<uint32_t> device_word_indices(2 * num_code_points, stream);
// make device_start_word_indices and device_end_word_indices contiguous
uint32_t* device_start_word_indices = device_word_indices.data();
uint32_t* device_end_word_indices = device_start_word_indices + num_code_points;
cudf::detail::grid_1d const grid_init{static_cast<cudf::size_type>(num_code_points),
THREADS_PER_BLOCK};
hipLaunchKernelGGL(( detail::init_data_and_mark_word_start_and_ends), dim3(grid_init.num_blocks),
dim3(grid_init.num_threads_per_block),
0,
stream.value(), device_code_points,
device_start_word_indices,
device_end_word_indices,
num_code_points,
device_token_ids.data(),
device_tokens_per_word.data());
CHECK_CUDA(stream.value());
cudf::detail::grid_1d const grid_mark{static_cast<cudf::size_type>(num_strings + 1),
THREADS_PER_BLOCK};
hipLaunchKernelGGL(( detail::mark_string_start_and_ends), dim3(grid_mark.num_blocks),
dim3(grid_mark.num_threads_per_block),
0,
stream.value(), device_code_points,
device_strings_offsets,
device_start_word_indices,
device_end_word_indices,
num_strings);
CHECK_CUDA(stream.value());
// Now start_word_indices has the word starts scattered throughout the array. We need to select
// all values not equal to the max uint32_t and place them at the start of the array. We leverage
// the fact that the start_word_indices and the end_word indices are contiguous to only launch one
// device select kernel.
auto const execpol = rmm::exec_policy(stream);
auto itr_end = thrust::remove(execpol->on(stream.value()),
device_word_indices.begin(),
device_word_indices.end(),
std::numeric_limits<uint32_t>::max());
// The number of tokens selected will be double the number of words since we
// select from both the start and end index arrays.
uint32_t const num_words = thrust::distance(device_word_indices.begin(), itr_end) / 2;
// We need to change the end_word_indices pointer after the selection is complete
device_end_word_indices = device_start_word_indices + num_words;
cudf::detail::grid_1d const grid{static_cast<cudf::size_type>(num_words), THREADS_PER_BLOCK};
detail::
hipLaunchKernelGGL(( kernel_wordpiece_tokenizer), dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(),
device_code_points,
vocab_table.table->view().data<uint64_t>(),
vocab_table.bin_coefficients->view().data<uint64_t>(),
vocab_table.bin_offsets->view().data<uint16_t>(),
vocab_table.unknown_token_id,
vocab_table.outer_hash_a,
vocab_table.outer_hash_b,
vocab_table.num_bins,
device_start_word_indices,
device_end_word_indices,
max_word_length,
num_words,
device_token_ids.data(),
device_tokens_per_word.data());
CHECK_CUDA(stream.value());
// Repurpose the input array for the token ids. In the worst case, each code point ends up being a
// token so this will always have enough memory to store the contiguous tokens.
uint32_t* contiguous_token_ids = device_code_points;
thrust::copy_if(execpol->on(stream.value()),
device_token_ids.begin(),
device_token_ids.end(),
contiguous_token_ids,
copy_if_fn{});
// Repurpose start word indices since it is the same size and type as the required output.
uint32_t* token_id_counts = device_start_word_indices;
thrust::transform_inclusive_scan(execpol->on(stream.value()),
device_tokens_per_word.data(),
device_tokens_per_word.data() + num_code_points,
token_id_counts,
tranform_fn{},
thrust::plus<uint32_t>());
// Update the device_strings_offsets using the token_id_counts
thrust::for_each_n(rmm::exec_policy(stream)->on(stream.value()),
thrust::make_counting_iterator<uint32_t>(1),
num_strings,
update_strings_lengths_fn{token_id_counts, device_strings_offsets});
}
} // namespace detail
} // namespace nvtext
| 83adac0b87efe4ed3774a717b50cebc0cbd684c2.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <text/subword/detail/hash_utils.cuh>
#include <text/subword/detail/tokenizer_utils.cuh>
#include <text/subword/detail/wordpiece_tokenizer.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/utilities/error.hpp>
#include <nvtext/subword_tokenize.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/for_each.h>
#include <thrust/remove.h>
#include <thrust/transform_scan.h>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Initializes the token-ids, word-indices, and token counts vectors.
*
* Each thread process a single code point from `code_points`.
* This also locates the start and end of each word within the `code_points` buffer.
* A word start is identified as a non-space character that appears right after a space.
* A word end is identified as a space character that appears right after a non-space one.
* If the code point at this thread does not represent a word start or word end,
* a max uint32_t value is written to the appropriate vector instead.
* A post processing step is required to filter the relevant values in these
* vectors.
*
* It is guaranteed that the same number of valid values will be written to both the
* start and end indices and that after the select step, the two arrays will be aligned.
* That is, `start_word_indices[word]` and `end_word_indices[word]` are the start and
* end for the same word.
*
* Memory required is 13 bytes per code point values:
* - 4 bytes each for `start_word_indices` and `end_word_indices`
* - 4 bytes for each `token_ids`
* - 1 byte for each each `tokens_per_word`
* Also, there is a code point value for each byte in the input strings.
*
* @param code_points[in] A pointer to the code points in the strings after normalization.
* @param start_word_indices[out] An array of size `num_code_points` which will contain the
* starting index for each word.
* @param end_word_indices[out] An array of size `num_code_points` which will contain the
* ending index for each word.
* @param num_code_points The total number of code_points.
* @param token_ids[out] An array of size `num_code_points` which will hold the token ids.
* This kernel just sets all the values to max uint32_t.
* @param tokens_per_word[out] An array of size `num_code_points` which hold the number of
* tokens. This kernel just sets all the values to 0.
*/
__global__ void init_data_and_mark_word_start_and_ends(uint32_t const* code_points,
uint32_t* start_word_indices,
uint32_t* end_word_indices,
size_t num_code_points,
uint32_t* token_ids,
uint8_t* tokens_per_word)
{
uint32_t char_for_thread = blockDim.x * blockIdx.x + threadIdx.x;
// Deal with the start_word_indices array
if (char_for_thread < num_code_points) {
uint32_t val_to_write = std::numeric_limits<uint32_t>::max();
if ((code_points[char_for_thread] != SPACE_CODE_POINT) && (char_for_thread > 0) &&
(code_points[char_for_thread - 1] == SPACE_CODE_POINT)) {
val_to_write = char_for_thread;
}
start_word_indices[char_for_thread] = val_to_write;
// Deal with the end_word_indices_array
val_to_write = std::numeric_limits<uint32_t>::max();
if ((code_points[char_for_thread] != SPACE_CODE_POINT) &&
(char_for_thread + 1 < num_code_points) &&
(code_points[char_for_thread + 1] == SPACE_CODE_POINT)) {
val_to_write = char_for_thread + 1;
}
end_word_indices[char_for_thread] = val_to_write;
token_ids[char_for_thread] = std::numeric_limits<uint32_t>::max();
tokens_per_word[char_for_thread] = 0;
}
}
/**
* @brief Resolves the string boundaries for the start and end words.
*
* This kernel should be called after `mark_word_start_and_ends` with at
* least `num_strings` total threads.
*
* The start and end indices are updated to honor the string boundaries
* within the strings array. This corrects any word ranges that span across
* individual strings.
*
* @param code_points A pointer to the code points in the strings.
* @param strings_offsets An array containing the index of the starting character of each string
* with an extra space at the end containing the total number of characters. As a result,
* this array is of length num_strings + 1.
* @param start_word_indices An array which will contain the starting index for each word scattered
* throughout. If an index does not represent a word start, the max-uint32_t value is written
* to indicate this.
* @param end_word_indices An array which will contain the one past the end index for each word
* scattered throughout. If an index does not represent a word end, the max uint32_t value is
* written to indicate this.
* @param num_strings The total number of strings to be processed.
*/
__global__ void mark_string_start_and_ends(uint32_t const* code_points,
uint32_t const* strings_offsets,
uint32_t* start_word_indices,
uint32_t* end_word_indices,
uint32_t num_strings)
{
uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x;
// Ensure the starting character of each strings is written to the word start array.
if (idx <= num_strings) {
auto const offset = strings_offsets[idx];
if ((idx < num_strings) && (code_points[offset] != SPACE_CODE_POINT)) {
start_word_indices[offset] = offset;
}
if ((offset > 0) && (code_points[offset - 1] != SPACE_CODE_POINT)) {
end_word_indices[offset - 1] = offset;
}
}
}
/**
* @brief Converts words into token ids.
*
* Each thread is assigned a word to convert based on the `hash_table`. Each thread converts
* its word and writes the number of tokens it found in the `tokens_per_word` array.
*
* The `tokens_per_word` array is kept to the length `num_code_points + 1`. This means each thread
* can write its number of tokens to the `tokens_per_word` corresponding to the starting
* character of each word. Since strings must start at some word, we can prefix sum this array
* and use the strings_lengths code point offsets to directly index the number of tokens in each
* string.
*
* The `token_ids` array should be initialized to the max uint32_t before calling this kernel.
*
* @param code_points An array containing all of the code points to be processed
* @param hash_table An array containing the flattened hash table with key, value pairs
* packed in 64-bits
* @param bin_coefficients A pointer to the GPU pointer containing the hashing parameters for
* each hash bin on the GPU.
* @param bin_offsets: A pointer to the GPU pointer containing the start index of each bin in
* the flattened hash table.
* @param token_ids The index for each token found during tokenization. This is of length
* num_code_points. In most cases, multiple characters will collapse to one token. In these
* cases, the max uint32_t will be in place. Cub will be used later to filter out these
* invalid ids later.
* @param word_starts An array of length `num_code_points`. The first total word elements contains
* the index of the first character for each word.
* @param word_ends An array of length num_code_points. The first total_words elements contains the
* past the end index for each word. This array is kept aligned with the initial
* token_ids array containing the word start code points.
* `word_ends[word] - filtered_start_indices[word] = word_length`
* @param tokens_per_word An array of size num_code_points that will contain the number of tokens in
* each word in a string. This array can be exclusive summed and the result used in
* conjunction with the strings lengths array to find the tokens in each string. This is
* possible since the number of tokens in each word will be placed at the index corresponding
* to the start character of a word. If we assume prefix_summed is the prefix sum of the
* tokens_per_word array, then `prefix_summed[strings_lengths[string_idx] - 1]` is the number
* of tokens found before the start of string.
* @param unk_token_id The token id to be place for unknown tokens
* @param max_word_length The maximum length of a word. Any word longer than this length is
* replaced by the unknown token.
* @param total_words The total number of white space separated words
* @param outer_hash_a_param The a parameter for the outer hash
* @param outer_hash_b_param: The b parameter for the outer hash
* @param num_outer_bins: The number of bins for the outer hash
*/
__global__ void kernel_wordpiece_tokenizer(uint32_t const* code_points,
uint64_t const* hash_table,
uint64_t const* bin_coefficients,
uint16_t const* bin_offsets,
uint16_t unk_token_id,
uint32_t outer_hash_a_param,
uint32_t outer_hash_b_param,
uint16_t num_outer_bins,
uint32_t const* word_starts,
uint32_t const* word_ends,
uint32_t max_word_length,
uint32_t total_words,
uint32_t* token_ids,
uint8_t* tokens_per_word)
{
uint32_t const word_to_tokenize = blockDim.x * blockIdx.x + threadIdx.x;
if (word_to_tokenize >= total_words) return;
// Each thread gets the start code_point offset for each word and resets the token_id memory to
// the default value. In a post processing step, all of these values will be removed.
auto const token_start = word_starts[word_to_tokenize];
auto const token_end = word_ends[word_to_tokenize];
auto const word_length = token_end - token_start;
// The sdbm hash of "##"
constexpr uint32_t hashtag_hash = 2296000;
uint16_t num_values_tokenized = 0;
// initialize start, end
uint32_t start = token_start;
uint32_t end = token_end;
if (word_length > max_word_length) {
start = token_end;
num_values_tokenized = 1;
token_ids[token_start] = unk_token_id;
tokens_per_word[token_start] = num_values_tokenized;
}
while (start < token_end) {
end = token_end;
// init token_id to no token
int token_id = -1;
// compute current length
uint32_t const length = token_end - start;
uint64_t substr_hash =
sdbm_hash(code_points + start, length, start == token_start ? 0 : hashtag_hash);
while (start < end) {
token_id = retrieve(substr_hash,
outer_hash_a_param,
outer_hash_b_param,
num_outer_bins,
hash_table,
bin_coefficients,
bin_offsets);
if (token_id != -1) { break; }
--end;
// Pop off the last value from the substr hash
substr_hash = prev_sdbm_hash(substr_hash, code_points[end]);
}
if (token_id == -1) {
end = token_end;
token_id = unk_token_id;
// We need to clean up the global array. This case is very uncommon.
// Only 0.016% of words cannot be resolved to a token from the squad dev set.
for (uint32_t i = 1; i < num_values_tokenized; ++i) {
token_ids[token_start + i] = std::numeric_limits<uint32_t>::max();
}
num_values_tokenized = 0;
}
token_ids[token_start + num_values_tokenized] = token_id;
++num_values_tokenized;
start = end;
}
tokens_per_word[token_start] = num_values_tokenized;
}
} // namespace
wordpiece_tokenizer::wordpiece_tokenizer(hashed_vocabulary const& vocab_table,
uint32_t max_rows_final_tensor,
uint32_t max_sequence_length,
uint32_t stride,
bool do_truncate,
bool do_lower_case,
rmm::cuda_stream_view stream,
uint32_t max_word_length)
: vocab_table(vocab_table),
normalizer(stream, do_lower_case),
max_sequence_length{max_sequence_length},
stride(stride),
do_truncate(do_truncate),
max_word_length{max_word_length}
{
}
uvector_pair wordpiece_tokenizer::tokenize(char const* d_strings,
uint32_t const* d_offsets,
uint32_t num_strings,
rmm::cuda_stream_view stream)
{
auto cps_and_offsets = normalizer.normalize(d_strings, d_offsets, num_strings, stream);
tokenize(cps_and_offsets, stream);
return uvector_pair(std::move(cps_and_offsets.first), std::move(cps_and_offsets.second));
}
struct copy_if_fn { // inline lambda not allowed in private or protected member function
__device__ bool operator()(uint32_t cp) { return cp != std::numeric_limits<uint32_t>::max(); }
};
struct tranform_fn { // just converting uint8 value to uint32
__device__ uint32_t operator()(uint8_t count) { return count; }
};
void wordpiece_tokenizer::tokenize(uvector_pair& cps_and_offsets, rmm::cuda_stream_view stream)
{
uint32_t* device_code_points = cps_and_offsets.first->data();
size_t const num_code_points = cps_and_offsets.first->size();
uint32_t* device_strings_offsets = cps_and_offsets.second->data();
uint32_t const num_strings = cps_and_offsets.second->size() - 1;
const size_t four_byte_cp_chunks = 1 + (num_code_points - 1) / sizeof(uint32_t);
const size_t rounded_num_cps = sizeof(uint32_t) * four_byte_cp_chunks;
rmm::device_uvector<uint8_t> device_tokens_per_word(rounded_num_cps, stream);
rmm::device_uvector<uint32_t> device_token_ids(num_code_points, stream);
rmm::device_uvector<uint32_t> device_word_indices(2 * num_code_points, stream);
// make device_start_word_indices and device_end_word_indices contiguous
uint32_t* device_start_word_indices = device_word_indices.data();
uint32_t* device_end_word_indices = device_start_word_indices + num_code_points;
cudf::detail::grid_1d const grid_init{static_cast<cudf::size_type>(num_code_points),
THREADS_PER_BLOCK};
detail::init_data_and_mark_word_start_and_ends<<<grid_init.num_blocks,
grid_init.num_threads_per_block,
0,
stream.value()>>>(device_code_points,
device_start_word_indices,
device_end_word_indices,
num_code_points,
device_token_ids.data(),
device_tokens_per_word.data());
CHECK_CUDA(stream.value());
cudf::detail::grid_1d const grid_mark{static_cast<cudf::size_type>(num_strings + 1),
THREADS_PER_BLOCK};
detail::mark_string_start_and_ends<<<grid_mark.num_blocks,
grid_mark.num_threads_per_block,
0,
stream.value()>>>(device_code_points,
device_strings_offsets,
device_start_word_indices,
device_end_word_indices,
num_strings);
CHECK_CUDA(stream.value());
// Now start_word_indices has the word starts scattered throughout the array. We need to select
// all values not equal to the max uint32_t and place them at the start of the array. We leverage
// the fact that the start_word_indices and the end_word indices are contiguous to only launch one
// device select kernel.
auto const execpol = rmm::exec_policy(stream);
auto itr_end = thrust::remove(execpol->on(stream.value()),
device_word_indices.begin(),
device_word_indices.end(),
std::numeric_limits<uint32_t>::max());
// The number of tokens selected will be double the number of words since we
// select from both the start and end index arrays.
uint32_t const num_words = thrust::distance(device_word_indices.begin(), itr_end) / 2;
// We need to change the end_word_indices pointer after the selection is complete
device_end_word_indices = device_start_word_indices + num_words;
cudf::detail::grid_1d const grid{static_cast<cudf::size_type>(num_words), THREADS_PER_BLOCK};
detail::
kernel_wordpiece_tokenizer<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>(
device_code_points,
vocab_table.table->view().data<uint64_t>(),
vocab_table.bin_coefficients->view().data<uint64_t>(),
vocab_table.bin_offsets->view().data<uint16_t>(),
vocab_table.unknown_token_id,
vocab_table.outer_hash_a,
vocab_table.outer_hash_b,
vocab_table.num_bins,
device_start_word_indices,
device_end_word_indices,
max_word_length,
num_words,
device_token_ids.data(),
device_tokens_per_word.data());
CHECK_CUDA(stream.value());
// Repurpose the input array for the token ids. In the worst case, each code point ends up being a
// token so this will always have enough memory to store the contiguous tokens.
uint32_t* contiguous_token_ids = device_code_points;
thrust::copy_if(execpol->on(stream.value()),
device_token_ids.begin(),
device_token_ids.end(),
contiguous_token_ids,
copy_if_fn{});
// Repurpose start word indices since it is the same size and type as the required output.
uint32_t* token_id_counts = device_start_word_indices;
thrust::transform_inclusive_scan(execpol->on(stream.value()),
device_tokens_per_word.data(),
device_tokens_per_word.data() + num_code_points,
token_id_counts,
tranform_fn{},
thrust::plus<uint32_t>());
// Update the device_strings_offsets using the token_id_counts
thrust::for_each_n(rmm::exec_policy(stream)->on(stream.value()),
thrust::make_counting_iterator<uint32_t>(1),
num_strings,
update_strings_lengths_fn{token_id_counts, device_strings_offsets});
}
} // namespace detail
} // namespace nvtext
|
e5f09aba2b1bc69c5f7bddc1f527aa664c330e4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "training/graph_group_multinode.h"
#include "kernels/tensor_operators.h"
#include "graph_group_multinode.h"
namespace marian {
/**
* Set given scheduler to register training observers on the shard optimizers.
*/
void MultiNodeGraphGroup::setScheduler(Ptr<Scheduler> scheduler) {
scheduler_ = scheduler;
// optimizer has to be registered last to see a change of learning rate
scheduler_->registerTrainingObserver(scheduler_);
for (auto opt : shardOptimizers_) {
scheduler_->registerTrainingObserver(opt);
}
}
/**
* Allocate new tensor on given GPU and store allocator.
*/
Tensor MultiNodeGraphGroup::newTensor(int size, int device) {
Tensor t;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(device);
allocator->reserveExact(size * sizeof(float));
allocator->allocate(t, {1, size});
allocators_.push_back(allocator);
return t;
}
/**
* Setup training environment and launch server thread and (if enabled) client communication overlap threads..
* Includes setting up MPI, node and shard sizes, clients, server shards and communication overlap stuff.
*/
void MultiNodeGraphGroup::init(Ptr<data::Batch> batch) {
// Setup clients and shards
setupMPI();
setupClients(batch);
setupServerShards();
if (clientCommOverlap) {
initClientCommOverlapVars();
initClientCommOverlapGpuTensors();
}
// Launch threads
launchServerThread(); // For receiving and processing gradients and sending back parameters
if (clientCommOverlap) {
launchCommOverlapThreads(); // For communicating with server shards while other threads do computations
}
}
/**
* Setup MPI world size and rank of this node.
*/
void MultiNodeGraphGroup::setupMPI() {
#if MPI_FOUND
MPI_Comm_size(MPI_COMM_WORLD, &mpi_comm_world_size_);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_my_rank_);
#endif
}
/**
* Setup clients that will compute gradients and communicate them with the server shards.
* There is one client per GPU.
*/
void MultiNodeGraphGroup::setupClients(Ptr<data::Batch> batch) {
runBatchThroughClientGraphs(batch);
calculateNodeSizes();
initClientCpuBuffers();
if(clientCommOverlap) {
initClientCommOverlapVars();
initClientCommOverlapGpuTensors();
}
clientThreadPool_ = new marian::ThreadPool(devices_.size(), devices_.size());
}
/**
* Initialize the graphs (models) of all clients on this node with the given batch.
*/
void MultiNodeGraphGroup::runBatchThroughClientGraphs(Ptr<data::Batch> batch) {
for(int i = 0; i < devices_.size(); i++) {
THREAD_GUARD(
clientBuilders_[i]->build(clientGraphs_[i], batch);
clientGraphs_[i]->forward();
);
}
hipStreamSynchronize(0);
}
/**
* Calculate the size of each node in the MPI world (cluster).
* Account for the edge case where the last node has fewer parameters because the model size is not perfectly divisible by the number of nodes.
*/
void MultiNodeGraphGroup::calculateNodeSizes() {
size_t modelSize = clientGraphs_[0]->params()->vals()->size();
size_t nodeSize = ceilf(((float) modelSize) / mpi_comm_world_size_);
for (int node = 0; node < mpi_comm_world_size_; node++) {
size_t remainingModelSize = modelSize - (nodeSize * node);
nodeSizes_.push_back(::min(nodeSize, remainingModelSize)); // Takes care of edge case where last node is smaller than the others
}
}
/**
* Initialize a CPU buffer for each client on this node for storing gradients or parameters.
* Required for sending GPU data through MPI to other nodes (GPU -> CPU -> MPI network).
*/
void MultiNodeGraphGroup::initClientCpuBuffers() {
// Initialize CPU buffers used to send GPU data through MPI (can't send directly from GPUs)
for (int i = 0; i < devices_.size(); i++) {
size_t size = nodeSizes_[mpi_my_rank_]; // @TODO Optimization: Use full size to copy in one go, then send gradients and receive parameters in parallel
clientCommBuffersCPU_.push_back(std::vector<float>(size));
}
}
/**
* Initialize variables required for overlapping client computations and communication.
* Includes summed and committed word counts, buffer flags, mutexes and condition variables.
*/
void MultiNodeGraphGroup::initClientCommOverlapVars() {
clientSummedWordCounts_ = std::vector<size_t>(devices_.size(), 0);
clientCommittedWordCounts_ = std::vector<size_t>(devices_.size(), 0);
clientCommOverlapBuffersFilled_ = std::vector<bool>(devices_.size(), false);
mutexClientCommOverlapBuffersFilled_ = std::vector<std::mutex>{devices_.size()};
cvClientCommOverlapBuffersFilled_ = std::vector<std::condition_variable>(devices_.size());
}
/**
* Initialize GPU tensors required for overlapping client computations and communication.
* Includes secondary buffers for params/grads, buffers for locally summing gradients, and local optimizers to apply received gradients to client parameters.
*/
void MultiNodeGraphGroup::initClientCommOverlapGpuTensors() {
size_t modelSize = clientGraphs_[0]->params()->vals()->size();
for (int client = 0; client < devices_.size(); client++) {
// Communication overlap buffer (for grads + params)
Tensor commOverlapBuffer = newTensor(modelSize, devices_[client]);
commOverlapBuffer->copyFrom(clientGraphs_[0]->params()->vals());
clientCommOverlapBuffersGPU_.push_back(commOverlapBuffer );
// Gradients local sum buffer
Tensor sumGrads = newTensor(modelSize, devices_[client]);
sumGrads->set(0);
clientSummedGradsGPU.push_back(sumGrads);
// Local optimizer to apply summed gradients
clientLocalOptimizers_.push_back(Optimizer(options_)); // => for simple SGD opt: clientLocalOptimizers_.push_back(Optimizer<Sgd>(0.0001, keywords::clip=Clipper<Norm>(1)));
}
}
/**
* Setup server shards that will receive gradients from clients, apply them to their part of the global parameters, and send them back to the same clients.
* There is one server shard per GPU. (Each GPU acts both as a client and as a server shard.)
*/
void MultiNodeGraphGroup::setupServerShards() {
calculateShardSizes();
initShardGpuTensors();
// CPU buffer for receiving/sending grads/params
serverShardBufferCPU_ = std::vector<float>(nodeSizes_[mpi_my_rank_]);
// Shard optimizers
for (int shard = 0; shard < devices_.size(); shard++) {
shardOptimizers_.push_back(Optimizer(options_));
}
// Mutexes to prevent simultaneous access to tensors and/or optimizers
shardMutex_ = std::vector<std::mutex>(devices_.size());
}
/**
* Calculate the size of each shard on this node.
* Account for the edge case where the last shard has fewer parameters because the node size is not perfectly divisibly by the number of shards.
*/
void MultiNodeGraphGroup::calculateShardSizes() {
size_t nodeSize = nodeSizes_[mpi_my_rank_];
size_t shardSize = ceilf(((float) nodeSize) / devices_.size());
for (int shard = 0; shard < devices_.size(); shard++) {
size_t remainingNodeSize = nodeSize - (shardSize * shard);
shardSizes_.push_back(::min(shardSize, remainingNodeSize)); // Takes care of edge case where last shard is smaller than the others
}
}
/**
* Initialize the GPU tensors for storing the parameters and gradients of each server shard.
*/
void MultiNodeGraphGroup::initShardGpuTensors() {
size_t offset = 0;
for (int shard = 0; shard < devices_.size(); shard++) {
Tensor gpuParams = newTensor(shardSizes_[shard], devices_[shard]);
gpuParams->copyFrom(clientGraphs_[0]->params()->vals()->subtensor(offset, shardSizes_[shard]));
shardParams_.push_back(gpuParams);
shardGrads_.push_back(newTensor(shardSizes_[shard], devices_[shard]));
}
}
/**
* Launch independent thread which continually receives gradients assigned to this shard from any client, runs the shard optimizer and sends back the updated parameters.
*/
void MultiNodeGraphGroup::launchServerThread() {
#if MPI_FOUND
serverShardThread_ = new std::thread([this] {
int nCommunicatingNodes = mpi_comm_world_size_; // keep track of number of nodes still communicating with this shard
MPI_Status status;
do {
// Receive grads from any client
unsigned long messageInfo[4];
MPI_Recv(&messageInfo, 4, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD, &status);
if (messageInfo[MSG_INFO_STATUS_] == STATUS_NODE_FINISHED_) {
nCommunicatingNodes--;
continue;
} // register finished node and skip to next loop iteration
MPI_Recv(serverShardBufferCPU_.data(), nodeSizes_[mpi_my_rank_], MPI_FLOAT, status.MPI_SOURCE, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// Update shard params asynchronously over GPUs
std::vector<std::thread> threads;
size_t offset = 0;
for (int gpu = 0; gpu < devices_.size(); gpu++) {
size_t size = shardSizes_[gpu];
threads.emplace_back(std::thread([=](int gpu, size_t offset, size_t size, size_t batchWords) {
std::lock_guard<std::mutex> guard(shardMutex_[gpu]);
// Copy grads to appropriate GPU
hipMemcpy(shardGrads_[gpu]->data(), &serverShardBufferCPU_.at(offset), size * sizeof(float), hipMemcpyHostToDevice);
hipStreamSynchronize(0);
// Run optimizer on GPU
if (scaleLearningRate_ && batchWords > 0) {
shardOptimizers_[gpu]->update(shardParams_[gpu], shardGrads_[gpu], batchWords / avgBatchWords_);
} else {
shardOptimizers_[gpu]->update(shardParams_[gpu], shardGrads_[gpu]);
}
hipStreamSynchronize(0);
// Copy params from GPU
hipMemcpy(&serverShardBufferCPU_.at(offset), shardParams_[gpu]->data(), size * sizeof(float), hipMemcpyDeviceToHost);
hipStreamSynchronize(0);
}, gpu, offset, size, messageInfo[MSG_INFO_BATCHWORDS_]));
offset += size;
}
for (auto &&t : threads) { t.join(); }
// Send updated params to same client
MPI_Ssend(serverShardBufferCPU_.data(), nodeSizes_[mpi_my_rank_], MPI_FLOAT, status.MPI_SOURCE,
MPI_TAG_PARAM_PUSH_, MPI_COMM_WORLD);
} while (nCommunicatingNodes != 0);
});
#endif
}
/**
* Safely shut down the launched server shard thread.
*/
void MultiNodeGraphGroup::shutDownServerThread() {
serverShardThread_->join(); // Wait for server thread to finish communicating (with unfinished nodes)
}
/**
* Launch independent threads which continually synchronize their client's gradients/parameters whenever the respective communication buffers are full.
*/
void MultiNodeGraphGroup::launchCommOverlapThreads() {
#if MPI_FOUND
for (int gpu = 0; gpu < devices_.size(); gpu++) {
clientCommThreads_.emplace_back(new std::thread([this](int gpu) {
do {
// Wait for GPU (client) to fill buffers pointers
std::unique_lock<std::mutex> uniqueLock(mutexClientCommOverlapBuffersFilled_[gpu]);
while (!clientCommOverlapBuffersFilled_[gpu]) {
cvClientCommOverlapBuffersFilled_[gpu].wait(uniqueLock);
}
if (stopClientCommThreads_) { break; }
// Synchronize with server shards
synchronizeWithServerShards(clientCommOverlapBuffersGPU_[gpu], clientCommOverlapBuffersGPU_[gpu], gpu, scaleLearningRate_ ? clientCommittedWordCounts_[gpu] : 0);
// Indicate that buffers can be read from and filled again
clientCommOverlapBuffersFilled_[gpu] = false;
} while (!stopClientCommThreads_);
}, gpu));
}
#endif
}
/**
* Safely shut down the launched communication overlap threads
*/
void MultiNodeGraphGroup::shutDownCommOverlapThreads() {
stopClientCommThreads_ = true;
for (int gpu = 0; gpu < devices_.size(); gpu++) {
clientCommOverlapBuffersFilled_[gpu] = true;
cvClientCommOverlapBuffersFilled_[gpu].notify_one(); // Unblock thread from lock, then join it
clientCommThreads_[gpu]->join();
}
}
/**
* Send new gradients to the server shards and receive the updated (global) parameters.
*
* @param newGrads Gradients to send
* @param oldParams Parameters to replace
* @param gpu GPU/client performing synchronize (to access appropriate buffers etc.)
* @param batchWords Number of batch words to pass to server shard optimizers
*/
void MultiNodeGraphGroup::synchronizeWithServerShards(Tensor newGrads, Tensor oldParams, int gpu, size_t batchWords) {
#if MPI_FOUND
size_t offset = 0;
for (int node = 0; node < mpi_comm_world_size_; node++) {
size_t nodeSize = nodeSizes_[node];
// Update remotely if node != this node
if (node != mpi_my_rank_) {
// Copy grads from GPU to CPU (for MPI sending)
hipMemcpy(clientCommBuffersCPU_[gpu].data(), newGrads->subtensor(offset, nodeSize)->data(), nodeSize * sizeof(float), hipMemcpyDeviceToHost);
hipStreamSynchronize(0);
// Send grads to server node
size_t messageInfo[4];
messageInfo[MSG_INFO_SIZE_] = nodeSize;
messageInfo[MSG_INFO_CLIENT_] = gpu;
messageInfo[MSG_INFO_BATCHWORDS_] = batchWords;
messageInfo[MSG_INFO_STATUS_] = STATUS_NODE_TRAINING_;
MPI_Ssend(&messageInfo, 4, MPI_UNSIGNED_LONG, node, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD);
MPI_Ssend(clientCommBuffersCPU_[gpu].data(), nodeSize, MPI_FLOAT, node, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD);
// Receive updated params from server node
MPI_Recv(clientCommBuffersCPU_[gpu].data(), nodeSize, MPI_FLOAT, node, MPI_TAG_PARAM_PUSH_, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// Copy params from CPU back to GPU
hipMemcpy(oldParams->subtensor(offset, nodeSize)->data(), clientCommBuffersCPU_[gpu].data(), nodeSize * sizeof(float), hipMemcpyHostToDevice);
hipStreamSynchronize(0);
// Else update locally if node == this node
} else {
size_t localOffset = offset;
std::vector<std::thread> threads;
for (int gpu = 0; gpu < devices_.size(); gpu++) {
size_t gpuSize = shardSizes_[gpu];
threads.emplace_back(std::thread([=](int gpu, size_t offset, size_t size) {
std::lock_guard<std::mutex> guard(shardMutex_[gpu]);
// Copy grads to appropriate GPU
shardGrads_[gpu]->copyFrom(newGrads->subtensor(offset, size));
// Run optimizer on GPU
if (scaleLearningRate_ && batchWords > 0) {
shardOptimizers_[gpu]->update(shardParams_[gpu], shardGrads_[gpu], batchWords / avgBatchWords_);
} else {
shardOptimizers_[gpu]->update(shardParams_[gpu], shardGrads_[gpu]);
}
hipStreamSynchronize(0);
// Copy params back to current GPU
oldParams->subtensor(offset, size)->copyFrom(shardParams_[gpu]);
}, gpu, localOffset, gpuSize));
localOffset += gpuSize;
}
for (auto &&t : threads) { t.join(); }
}
offset += nodeSize;
}
#endif
}
/**
* Execute given batch on this node, pushing/pulling the resulting gradients/parameters to/from the server shards
* or -- if comm. overlap enabled -- to/from the communication buffers, summing gradients locally if the communication thread is busy
*
* @param batch Batch on which to perform forward and backward passes.
*/
void MultiNodeGraphGroup::execute(Ptr<data::Batch> batch) {
if (!initialized_) {
init(batch);
initialized_ = true;
}
auto task = [this](Ptr<data::Batch> batch) {
static size_t i = 0;
thread_local Ptr<ExpressionGraph> graph;
thread_local Ptr<models::ModelBase> builder;
thread_local size_t my_id = 0;
if (!graph) {
std::lock_guard<std::mutex> lock(mutexClientInit_);
my_id = i;
graph = clientGraphs_[i];
builder = clientBuilders_[i++];
}
auto costNode = builder->build(graph, batch);
graph->forward();
float cost = costNode->scalar();
graph->backward();
hipStreamSynchronize(0);
if(!clientCommOverlap) {
synchronizeWithServerShards(graph->params()->grads(), graph->params()->vals(), my_id, batch->words());
}
// Overlapping computations with communication
if (clientCommOverlap) {
// Add computed gradients to local running sum
Element(functional::_1 = functional::_1 + functional::_2, clientSummedGradsGPU[my_id], graph->params()->grads());
hipStreamSynchronize(0);
// Sum up word counts if batch flexible learning rate is enabled
if (scaleLearningRate_) {
clientSummedWordCounts_[my_id] += batch->words();
}
// If communication channel ready, swap graph's pointers with secondary buffers
if (!clientCommOverlapBuffersFilled_[my_id]) {
std::unique_lock<std::mutex> tryLock(mutexClientCommOverlapBuffersFilled_[my_id], std::try_to_lock);
if (tryLock.owns_lock()) {
// Copy parameters from communication buffer
graph->params()->vals()->copyFrom(clientCommOverlapBuffersGPU_[my_id]);
// Copy summed grads to communication buffer
clientCommOverlapBuffersGPU_[my_id]->copyFrom(clientSummedGradsGPU[my_id]);
// Commit summed word counts if batch-flexible-lr enabled
if (scaleLearningRate_) {
clientCommittedWordCounts_[my_id] = clientSummedWordCounts_[my_id];
clientSummedWordCounts_[my_id] = 0;
}
// Notify communication thread that buffers have been read and filled
clientCommOverlapBuffersFilled_[my_id] = true;
cvClientCommOverlapBuffersFilled_[my_id].notify_one();
// Apply summed gradients to new parameters
clientLocalOptimizers_[my_id]->update(graph->params()->vals(), clientSummedGradsGPU[my_id]);
// Clear summed gradients
clientSummedGradsGPU[my_id]->set(0);
}
}
}
// Run scheduler (if enabled)
if(scheduler_) {
std::unique_lock<std::mutex> lock(schedulerMutex_);
// Wait until the thread that wants to do validation is finished.
clientThreadPool_->wait_for_one(lock);
scheduler_->update(cost, batch);
if(scheduler_->saving() || scheduler_->validating()) {
// Wait with validation or saving until all other threads are done with update.
// We want to reuse the graphs for validation, so they need to be in
// a safe state.
clientThreadPool_->wait_for_others(lock);
if(scheduler_->saving())
this->save(graph);
if(scheduler_->validating())
scheduler_->validate(clientGraphs_);
// Validation or saving is done, tell other threads to continue work.
clientThreadPool_->notify_others();
}
}
};
clientThreadPool_->enqueue(task, batch);
}
/**
* Notify server shards that this node has finished training.
*/
void MultiNodeGraphGroup::signalFinishedToServerShards() {
#if MPI_FOUND
unsigned long messageInfo[4];
messageInfo[MSG_INFO_STATUS_] = STATUS_NODE_FINISHED_;
for (int node = 0; node < mpi_comm_world_size_; node++) {
MPI_Ssend(&messageInfo, 4, MPI_UNSIGNED_LONG, node, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD);
}
#endif
}
}
| e5f09aba2b1bc69c5f7bddc1f527aa664c330e4e.cu | #include "training/graph_group_multinode.h"
#include "kernels/tensor_operators.h"
#include "graph_group_multinode.h"
namespace marian {
/**
* Set given scheduler to register training observers on the shard optimizers.
*/
void MultiNodeGraphGroup::setScheduler(Ptr<Scheduler> scheduler) {
scheduler_ = scheduler;
// optimizer has to be registered last to see a change of learning rate
scheduler_->registerTrainingObserver(scheduler_);
for (auto opt : shardOptimizers_) {
scheduler_->registerTrainingObserver(opt);
}
}
/**
* Allocate new tensor on given GPU and store allocator.
*/
Tensor MultiNodeGraphGroup::newTensor(int size, int device) {
Tensor t;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(device);
allocator->reserveExact(size * sizeof(float));
allocator->allocate(t, {1, size});
allocators_.push_back(allocator);
return t;
}
/**
* Setup training environment and launch server thread and (if enabled) client communication overlap threads..
* Includes setting up MPI, node and shard sizes, clients, server shards and communication overlap stuff.
*/
void MultiNodeGraphGroup::init(Ptr<data::Batch> batch) {
// Setup clients and shards
setupMPI();
setupClients(batch);
setupServerShards();
if (clientCommOverlap) {
initClientCommOverlapVars();
initClientCommOverlapGpuTensors();
}
// Launch threads
launchServerThread(); // For receiving and processing gradients and sending back parameters
if (clientCommOverlap) {
launchCommOverlapThreads(); // For communicating with server shards while other threads do computations
}
}
/**
* Setup MPI world size and rank of this node.
*/
void MultiNodeGraphGroup::setupMPI() {
#if MPI_FOUND
MPI_Comm_size(MPI_COMM_WORLD, &mpi_comm_world_size_);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_my_rank_);
#endif
}
/**
* Setup clients that will compute gradients and communicate them with the server shards.
* There is one client per GPU.
*/
void MultiNodeGraphGroup::setupClients(Ptr<data::Batch> batch) {
runBatchThroughClientGraphs(batch);
calculateNodeSizes();
initClientCpuBuffers();
if(clientCommOverlap) {
initClientCommOverlapVars();
initClientCommOverlapGpuTensors();
}
clientThreadPool_ = new marian::ThreadPool(devices_.size(), devices_.size());
}
/**
* Initialize the graphs (models) of all clients on this node with the given batch.
*/
void MultiNodeGraphGroup::runBatchThroughClientGraphs(Ptr<data::Batch> batch) {
for(int i = 0; i < devices_.size(); i++) {
THREAD_GUARD(
clientBuilders_[i]->build(clientGraphs_[i], batch);
clientGraphs_[i]->forward();
);
}
cudaStreamSynchronize(0);
}
/**
* Calculate the size of each node in the MPI world (cluster).
* Account for the edge case where the last node has fewer parameters because the model size is not perfectly divisible by the number of nodes.
*/
void MultiNodeGraphGroup::calculateNodeSizes() {
size_t modelSize = clientGraphs_[0]->params()->vals()->size();
size_t nodeSize = ceilf(((float) modelSize) / mpi_comm_world_size_);
for (int node = 0; node < mpi_comm_world_size_; node++) {
size_t remainingModelSize = modelSize - (nodeSize * node);
nodeSizes_.push_back(std::min(nodeSize, remainingModelSize)); // Takes care of edge case where last node is smaller than the others
}
}
/**
* Initialize a CPU buffer for each client on this node for storing gradients or parameters.
* Required for sending GPU data through MPI to other nodes (GPU -> CPU -> MPI network).
*/
void MultiNodeGraphGroup::initClientCpuBuffers() {
// Initialize CPU buffers used to send GPU data through MPI (can't send directly from GPUs)
for (int i = 0; i < devices_.size(); i++) {
size_t size = nodeSizes_[mpi_my_rank_]; // @TODO Optimization: Use full size to copy in one go, then send gradients and receive parameters in parallel
clientCommBuffersCPU_.push_back(std::vector<float>(size));
}
}
/**
* Initialize variables required for overlapping client computations and communication.
* Includes summed and committed word counts, buffer flags, mutexes and condition variables.
*/
void MultiNodeGraphGroup::initClientCommOverlapVars() {
clientSummedWordCounts_ = std::vector<size_t>(devices_.size(), 0);
clientCommittedWordCounts_ = std::vector<size_t>(devices_.size(), 0);
clientCommOverlapBuffersFilled_ = std::vector<bool>(devices_.size(), false);
mutexClientCommOverlapBuffersFilled_ = std::vector<std::mutex>{devices_.size()};
cvClientCommOverlapBuffersFilled_ = std::vector<std::condition_variable>(devices_.size());
}
/**
* Initialize GPU tensors required for overlapping client computations and communication.
* Includes secondary buffers for params/grads, buffers for locally summing gradients, and local optimizers to apply received gradients to client parameters.
*/
void MultiNodeGraphGroup::initClientCommOverlapGpuTensors() {
size_t modelSize = clientGraphs_[0]->params()->vals()->size();
for (int client = 0; client < devices_.size(); client++) {
// Communication overlap buffer (for grads + params)
Tensor commOverlapBuffer = newTensor(modelSize, devices_[client]);
commOverlapBuffer->copyFrom(clientGraphs_[0]->params()->vals());
clientCommOverlapBuffersGPU_.push_back(commOverlapBuffer );
// Gradients local sum buffer
Tensor sumGrads = newTensor(modelSize, devices_[client]);
sumGrads->set(0);
clientSummedGradsGPU.push_back(sumGrads);
// Local optimizer to apply summed gradients
clientLocalOptimizers_.push_back(Optimizer(options_)); // => for simple SGD opt: clientLocalOptimizers_.push_back(Optimizer<Sgd>(0.0001, keywords::clip=Clipper<Norm>(1)));
}
}
/**
* Setup server shards that will receive gradients from clients, apply them to their part of the global parameters, and send them back to the same clients.
* There is one server shard per GPU. (Each GPU acts both as a client and as a server shard.)
*/
void MultiNodeGraphGroup::setupServerShards() {
calculateShardSizes();
initShardGpuTensors();
// CPU buffer for receiving/sending grads/params
serverShardBufferCPU_ = std::vector<float>(nodeSizes_[mpi_my_rank_]);
// Shard optimizers
for (int shard = 0; shard < devices_.size(); shard++) {
shardOptimizers_.push_back(Optimizer(options_));
}
// Mutexes to prevent simultaneous access to tensors and/or optimizers
shardMutex_ = std::vector<std::mutex>(devices_.size());
}
/**
* Calculate the size of each shard on this node.
* Account for the edge case where the last shard has fewer parameters because the node size is not perfectly divisibly by the number of shards.
*/
void MultiNodeGraphGroup::calculateShardSizes() {
size_t nodeSize = nodeSizes_[mpi_my_rank_];
size_t shardSize = ceilf(((float) nodeSize) / devices_.size());
for (int shard = 0; shard < devices_.size(); shard++) {
size_t remainingNodeSize = nodeSize - (shardSize * shard);
shardSizes_.push_back(std::min(shardSize, remainingNodeSize)); // Takes care of edge case where last shard is smaller than the others
}
}
/**
* Initialize the GPU tensors for storing the parameters and gradients of each server shard.
*/
void MultiNodeGraphGroup::initShardGpuTensors() {
size_t offset = 0;
for (int shard = 0; shard < devices_.size(); shard++) {
Tensor gpuParams = newTensor(shardSizes_[shard], devices_[shard]);
gpuParams->copyFrom(clientGraphs_[0]->params()->vals()->subtensor(offset, shardSizes_[shard]));
shardParams_.push_back(gpuParams);
shardGrads_.push_back(newTensor(shardSizes_[shard], devices_[shard]));
}
}
/**
* Launch independent thread which continually receives gradients assigned to this shard from any client, runs the shard optimizer and sends back the updated parameters.
*/
void MultiNodeGraphGroup::launchServerThread() {
#if MPI_FOUND
serverShardThread_ = new std::thread([this] {
int nCommunicatingNodes = mpi_comm_world_size_; // keep track of number of nodes still communicating with this shard
MPI_Status status;
do {
// Receive grads from any client
unsigned long messageInfo[4];
MPI_Recv(&messageInfo, 4, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD, &status);
if (messageInfo[MSG_INFO_STATUS_] == STATUS_NODE_FINISHED_) {
nCommunicatingNodes--;
continue;
} // register finished node and skip to next loop iteration
MPI_Recv(serverShardBufferCPU_.data(), nodeSizes_[mpi_my_rank_], MPI_FLOAT, status.MPI_SOURCE, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// Update shard params asynchronously over GPUs
std::vector<std::thread> threads;
size_t offset = 0;
for (int gpu = 0; gpu < devices_.size(); gpu++) {
size_t size = shardSizes_[gpu];
threads.emplace_back(std::thread([=](int gpu, size_t offset, size_t size, size_t batchWords) {
std::lock_guard<std::mutex> guard(shardMutex_[gpu]);
// Copy grads to appropriate GPU
cudaMemcpy(shardGrads_[gpu]->data(), &serverShardBufferCPU_.at(offset), size * sizeof(float), cudaMemcpyHostToDevice);
cudaStreamSynchronize(0);
// Run optimizer on GPU
if (scaleLearningRate_ && batchWords > 0) {
shardOptimizers_[gpu]->update(shardParams_[gpu], shardGrads_[gpu], batchWords / avgBatchWords_);
} else {
shardOptimizers_[gpu]->update(shardParams_[gpu], shardGrads_[gpu]);
}
cudaStreamSynchronize(0);
// Copy params from GPU
cudaMemcpy(&serverShardBufferCPU_.at(offset), shardParams_[gpu]->data(), size * sizeof(float), cudaMemcpyDeviceToHost);
cudaStreamSynchronize(0);
}, gpu, offset, size, messageInfo[MSG_INFO_BATCHWORDS_]));
offset += size;
}
for (auto &&t : threads) { t.join(); }
// Send updated params to same client
MPI_Ssend(serverShardBufferCPU_.data(), nodeSizes_[mpi_my_rank_], MPI_FLOAT, status.MPI_SOURCE,
MPI_TAG_PARAM_PUSH_, MPI_COMM_WORLD);
} while (nCommunicatingNodes != 0);
});
#endif
}
/**
* Safely shut down the launched server shard thread.
*/
void MultiNodeGraphGroup::shutDownServerThread() {
serverShardThread_->join(); // Wait for server thread to finish communicating (with unfinished nodes)
}
/**
* Launch independent threads which continually synchronize their client's gradients/parameters whenever the respective communication buffers are full.
*/
void MultiNodeGraphGroup::launchCommOverlapThreads() {
#if MPI_FOUND
for (int gpu = 0; gpu < devices_.size(); gpu++) {
clientCommThreads_.emplace_back(new std::thread([this](int gpu) {
do {
// Wait for GPU (client) to fill buffers pointers
std::unique_lock<std::mutex> uniqueLock(mutexClientCommOverlapBuffersFilled_[gpu]);
while (!clientCommOverlapBuffersFilled_[gpu]) {
cvClientCommOverlapBuffersFilled_[gpu].wait(uniqueLock);
}
if (stopClientCommThreads_) { break; }
// Synchronize with server shards
synchronizeWithServerShards(clientCommOverlapBuffersGPU_[gpu], clientCommOverlapBuffersGPU_[gpu], gpu, scaleLearningRate_ ? clientCommittedWordCounts_[gpu] : 0);
// Indicate that buffers can be read from and filled again
clientCommOverlapBuffersFilled_[gpu] = false;
} while (!stopClientCommThreads_);
}, gpu));
}
#endif
}
/**
* Safely shut down the launched communication overlap threads
*/
void MultiNodeGraphGroup::shutDownCommOverlapThreads() {
stopClientCommThreads_ = true;
for (int gpu = 0; gpu < devices_.size(); gpu++) {
clientCommOverlapBuffersFilled_[gpu] = true;
cvClientCommOverlapBuffersFilled_[gpu].notify_one(); // Unblock thread from lock, then join it
clientCommThreads_[gpu]->join();
}
}
/**
* Send new gradients to the server shards and receive the updated (global) parameters.
*
* @param newGrads Gradients to send
* @param oldParams Parameters to replace
* @param gpu GPU/client performing synchronize (to access appropriate buffers etc.)
* @param batchWords Number of batch words to pass to server shard optimizers
*/
void MultiNodeGraphGroup::synchronizeWithServerShards(Tensor newGrads, Tensor oldParams, int gpu, size_t batchWords) {
#if MPI_FOUND
size_t offset = 0;
for (int node = 0; node < mpi_comm_world_size_; node++) {
size_t nodeSize = nodeSizes_[node];
// Update remotely if node != this node
if (node != mpi_my_rank_) {
// Copy grads from GPU to CPU (for MPI sending)
cudaMemcpy(clientCommBuffersCPU_[gpu].data(), newGrads->subtensor(offset, nodeSize)->data(), nodeSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaStreamSynchronize(0);
// Send grads to server node
size_t messageInfo[4];
messageInfo[MSG_INFO_SIZE_] = nodeSize;
messageInfo[MSG_INFO_CLIENT_] = gpu;
messageInfo[MSG_INFO_BATCHWORDS_] = batchWords;
messageInfo[MSG_INFO_STATUS_] = STATUS_NODE_TRAINING_;
MPI_Ssend(&messageInfo, 4, MPI_UNSIGNED_LONG, node, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD);
MPI_Ssend(clientCommBuffersCPU_[gpu].data(), nodeSize, MPI_FLOAT, node, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD);
// Receive updated params from server node
MPI_Recv(clientCommBuffersCPU_[gpu].data(), nodeSize, MPI_FLOAT, node, MPI_TAG_PARAM_PUSH_, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// Copy params from CPU back to GPU
cudaMemcpy(oldParams->subtensor(offset, nodeSize)->data(), clientCommBuffersCPU_[gpu].data(), nodeSize * sizeof(float), cudaMemcpyHostToDevice);
cudaStreamSynchronize(0);
// Else update locally if node == this node
} else {
size_t localOffset = offset;
std::vector<std::thread> threads;
for (int gpu = 0; gpu < devices_.size(); gpu++) {
size_t gpuSize = shardSizes_[gpu];
threads.emplace_back(std::thread([=](int gpu, size_t offset, size_t size) {
std::lock_guard<std::mutex> guard(shardMutex_[gpu]);
// Copy grads to appropriate GPU
shardGrads_[gpu]->copyFrom(newGrads->subtensor(offset, size));
// Run optimizer on GPU
if (scaleLearningRate_ && batchWords > 0) {
shardOptimizers_[gpu]->update(shardParams_[gpu], shardGrads_[gpu], batchWords / avgBatchWords_);
} else {
shardOptimizers_[gpu]->update(shardParams_[gpu], shardGrads_[gpu]);
}
cudaStreamSynchronize(0);
// Copy params back to current GPU
oldParams->subtensor(offset, size)->copyFrom(shardParams_[gpu]);
}, gpu, localOffset, gpuSize));
localOffset += gpuSize;
}
for (auto &&t : threads) { t.join(); }
}
offset += nodeSize;
}
#endif
}
/**
* Execute given batch on this node, pushing/pulling the resulting gradients/parameters to/from the server shards
* or -- if comm. overlap enabled -- to/from the communication buffers, summing gradients locally if the communication thread is busy
*
* @param batch Batch on which to perform forward and backward passes.
*/
void MultiNodeGraphGroup::execute(Ptr<data::Batch> batch) {
if (!initialized_) {
init(batch);
initialized_ = true;
}
auto task = [this](Ptr<data::Batch> batch) {
static size_t i = 0;
thread_local Ptr<ExpressionGraph> graph;
thread_local Ptr<models::ModelBase> builder;
thread_local size_t my_id = 0;
if (!graph) {
std::lock_guard<std::mutex> lock(mutexClientInit_);
my_id = i;
graph = clientGraphs_[i];
builder = clientBuilders_[i++];
}
auto costNode = builder->build(graph, batch);
graph->forward();
float cost = costNode->scalar();
graph->backward();
cudaStreamSynchronize(0);
if(!clientCommOverlap) {
synchronizeWithServerShards(graph->params()->grads(), graph->params()->vals(), my_id, batch->words());
}
// Overlapping computations with communication
if (clientCommOverlap) {
// Add computed gradients to local running sum
Element(functional::_1 = functional::_1 + functional::_2, clientSummedGradsGPU[my_id], graph->params()->grads());
cudaStreamSynchronize(0);
// Sum up word counts if batch flexible learning rate is enabled
if (scaleLearningRate_) {
clientSummedWordCounts_[my_id] += batch->words();
}
// If communication channel ready, swap graph's pointers with secondary buffers
if (!clientCommOverlapBuffersFilled_[my_id]) {
std::unique_lock<std::mutex> tryLock(mutexClientCommOverlapBuffersFilled_[my_id], std::try_to_lock);
if (tryLock.owns_lock()) {
// Copy parameters from communication buffer
graph->params()->vals()->copyFrom(clientCommOverlapBuffersGPU_[my_id]);
// Copy summed grads to communication buffer
clientCommOverlapBuffersGPU_[my_id]->copyFrom(clientSummedGradsGPU[my_id]);
// Commit summed word counts if batch-flexible-lr enabled
if (scaleLearningRate_) {
clientCommittedWordCounts_[my_id] = clientSummedWordCounts_[my_id];
clientSummedWordCounts_[my_id] = 0;
}
// Notify communication thread that buffers have been read and filled
clientCommOverlapBuffersFilled_[my_id] = true;
cvClientCommOverlapBuffersFilled_[my_id].notify_one();
// Apply summed gradients to new parameters
clientLocalOptimizers_[my_id]->update(graph->params()->vals(), clientSummedGradsGPU[my_id]);
// Clear summed gradients
clientSummedGradsGPU[my_id]->set(0);
}
}
}
// Run scheduler (if enabled)
if(scheduler_) {
std::unique_lock<std::mutex> lock(schedulerMutex_);
// Wait until the thread that wants to do validation is finished.
clientThreadPool_->wait_for_one(lock);
scheduler_->update(cost, batch);
if(scheduler_->saving() || scheduler_->validating()) {
// Wait with validation or saving until all other threads are done with update.
// We want to reuse the graphs for validation, so they need to be in
// a safe state.
clientThreadPool_->wait_for_others(lock);
if(scheduler_->saving())
this->save(graph);
if(scheduler_->validating())
scheduler_->validate(clientGraphs_);
// Validation or saving is done, tell other threads to continue work.
clientThreadPool_->notify_others();
}
}
};
clientThreadPool_->enqueue(task, batch);
}
/**
* Notify server shards that this node has finished training.
*/
void MultiNodeGraphGroup::signalFinishedToServerShards() {
#if MPI_FOUND
unsigned long messageInfo[4];
messageInfo[MSG_INFO_STATUS_] = STATUS_NODE_FINISHED_;
for (int node = 0; node < mpi_comm_world_size_; node++) {
MPI_Ssend(&messageInfo, 4, MPI_UNSIGNED_LONG, node, MPI_TAG_GRAD_PUSH_, MPI_COMM_WORLD);
}
#endif
}
}
|
7e8f50cf651fa609fca6f4ec5742b507358c8c2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gputests.h"
/***********************************************************************************
* Test 8 [Modulo 20, random pattern]
*
* A random pattern is generated. This pattern is used to set every 20th memory location
* in memory. The rest of the memory location is set to the complimemnt of the pattern.
* Repeat this for 20 times and each time the memory location to set the pattern is shifted right.
*
*
**********************************************************************************/
__global__ void
kernel_modtest_write(char* _ptr, char* end_ptr, unsigned int offset, unsigned int p1, unsigned int p2)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr)
{
return;
}
for (i = offset; i < BLOCKSIZE/sizeof(unsigned int); i+=MOD_SZ)
{
ptr[i] =p1;
}
for (i = 0; i < BLOCKSIZE/sizeof(unsigned int); i++)
{
if (i % MOD_SZ != offset)
{
ptr[i] =p2;
}
}
return;
}
__global__ void
kernel_modtest_read(char* _ptr, char* end_ptr, unsigned int offset, unsigned int p1, MemoryError* local_error, int* local_count)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr)
{
return;
}
for (i = offset; i < BLOCKSIZE/sizeof(unsigned int); i+=MOD_SZ)
{
if (ptr[i] !=p1)
{
record_error(local_error, local_count, &ptr[i], p1);
}
}
return;
}
unsigned int
modtest(char* ptr, unsigned int tot_num_blocks, unsigned int offset, unsigned int p1, unsigned int p2, MemoryError *local_error, int *local_count, bool* term)
{
unsigned int i;
char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE;
unsigned int err = 0;
for (i= 0; i < tot_num_blocks; i+= GRIDSIZE)
{
if(*term == true) break;
dim3 grid;
grid.x= GRIDSIZE;
hipLaunchKernelGGL(( kernel_modtest_write), dim3(grid), dim3(1), 0, 0, ptr + i*BLOCKSIZE, end_ptr, offset, p1, p2); SYNC_CUERR;
//SHOW_PROGRESS("test8[mod test, write]", i, tot_num_blocks);
}
for (i= 0; i < tot_num_blocks; i+= GRIDSIZE)
{
if(*term == true) break;
dim3 grid;
grid.x= GRIDSIZE;
hipLaunchKernelGGL(( kernel_modtest_read), dim3(grid), dim3(1), 0, 0, ptr + i*BLOCKSIZE, end_ptr, offset, p1, local_error, local_count); SYNC_CUERR;
//err += error_checking("test8[mod test, read", i);
//SHOW_PROGRESS("test8[mod test, read]", i, tot_num_blocks);
}
return err;
}
int
test8(TestInputParams *tip, TestOutputParams *top, bool *term)
{
unsigned int i;
unsigned int err = 0;
unsigned int iteration = 0;
unsigned int p1;
//if (global_pattern){
//p1 = global_pattern;
//}else{
p1= get_random_num();
//}
unsigned int p2 = ~p1;
repeat:
//PRINTF("test8[mod test]: p1=0x%x, p2=0x%x\n", p1,p2);
for (i = 0; i < MOD_SZ; i++)
{
err += modtest(tip->ptr, tip->tot_num_blocks,i, p1, p2, top->err_vector, top->err_count, term);
}
if (err == 0 && iteration == 0)
{
return hipSuccess;
}
if (iteration < tip->num_iterations)
{
//PRINTF("%dth repeating test8 because there are %d errors found in last run, p1=%x, p2=%p\n", iteration, err, p1, p2);
iteration++;
err = 0;
if(*term == false) goto repeat;
}
return hipSuccess;
} | 7e8f50cf651fa609fca6f4ec5742b507358c8c2b.cu | #include "gputests.h"
/***********************************************************************************
* Test 8 [Modulo 20, random pattern]
*
* A random pattern is generated. This pattern is used to set every 20th memory location
* in memory. The rest of the memory location is set to the complimemnt of the pattern.
* Repeat this for 20 times and each time the memory location to set the pattern is shifted right.
*
*
**********************************************************************************/
__global__ void
kernel_modtest_write(char* _ptr, char* end_ptr, unsigned int offset, unsigned int p1, unsigned int p2)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr)
{
return;
}
for (i = offset; i < BLOCKSIZE/sizeof(unsigned int); i+=MOD_SZ)
{
ptr[i] =p1;
}
for (i = 0; i < BLOCKSIZE/sizeof(unsigned int); i++)
{
if (i % MOD_SZ != offset)
{
ptr[i] =p2;
}
}
return;
}
__global__ void
kernel_modtest_read(char* _ptr, char* end_ptr, unsigned int offset, unsigned int p1, MemoryError* local_error, int* local_count)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr)
{
return;
}
for (i = offset; i < BLOCKSIZE/sizeof(unsigned int); i+=MOD_SZ)
{
if (ptr[i] !=p1)
{
record_error(local_error, local_count, &ptr[i], p1);
}
}
return;
}
unsigned int
modtest(char* ptr, unsigned int tot_num_blocks, unsigned int offset, unsigned int p1, unsigned int p2, MemoryError *local_error, int *local_count, bool* term)
{
unsigned int i;
char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE;
unsigned int err = 0;
for (i= 0; i < tot_num_blocks; i+= GRIDSIZE)
{
if(*term == true) break;
dim3 grid;
grid.x= GRIDSIZE;
kernel_modtest_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, offset, p1, p2); SYNC_CUERR;
//SHOW_PROGRESS("test8[mod test, write]", i, tot_num_blocks);
}
for (i= 0; i < tot_num_blocks; i+= GRIDSIZE)
{
if(*term == true) break;
dim3 grid;
grid.x= GRIDSIZE;
kernel_modtest_read<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, offset, p1, local_error, local_count); SYNC_CUERR;
//err += error_checking("test8[mod test, read", i);
//SHOW_PROGRESS("test8[mod test, read]", i, tot_num_blocks);
}
return err;
}
int
test8(TestInputParams *tip, TestOutputParams *top, bool *term)
{
unsigned int i;
unsigned int err = 0;
unsigned int iteration = 0;
unsigned int p1;
//if (global_pattern){
//p1 = global_pattern;
//}else{
p1= get_random_num();
//}
unsigned int p2 = ~p1;
repeat:
//PRINTF("test8[mod test]: p1=0x%x, p2=0x%x\n", p1,p2);
for (i = 0; i < MOD_SZ; i++)
{
err += modtest(tip->ptr, tip->tot_num_blocks,i, p1, p2, top->err_vector, top->err_count, term);
}
if (err == 0 && iteration == 0)
{
return cudaSuccess;
}
if (iteration < tip->num_iterations)
{
//PRINTF("%dth repeating test8 because there are %d errors found in last run, p1=%x, p2=%p\n", iteration, err, p1, p2);
iteration++;
err = 0;
if(*term == false) goto repeat;
}
return cudaSuccess;
} |
da3ce6309b65e27e09c6511b09a68c619ee1451a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
const int NUM_REPS = 100;
// Check errors and print GB/s
void postprocess(const float *ref, const float *res, int n, float ms)
{
bool passed = true;
for (int i = 0; i < n; i++)
if (res[i] != ref[i]) {
printf("%d %f %f\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
if (passed)
printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms );
}
// simple copy kernel
// Used as reference case representing best effective bandwidth.
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[(y+j)*width + x] = idata[(y+j)*width + x];
}
// copy kernel using shared memory
// Also used as reference case, demonstrating effect of using shared memory.
__global__ void copySharedMem(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x];
}
// naive transpose
// Simplest transpose; doesn't use shared memory.
// Global memory reads are coalesced but writes are not.
__global__ void transposeNaive(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
// coalesced transpose
// Uses shared memory to achieve coalesing in both reads and writes
// Tile width == #banks causes shared memory bank conflicts.
__global__ void transposeCoalesced(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
// No bank-conflict transpose
// Same as transposeCoalesced except the first tile dimension is padded
// to avoid shared memory bank conflicts.
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
int main(int argc, char **argv)
{
const int nx = 1024;
const int ny = 1024;
const int mem_size = nx*ny*sizeof(float);
dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
int devId = 0;
if (argc > 1) devId = atoi(argv[1]);
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId));
printf("\nDevice : %s\n", prop.name);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n",
nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
checkCuda( hipSetDevice(devId) );
float *h_idata = (float*)malloc(mem_size);
float *h_cdata = (float*)malloc(mem_size);
float *h_tdata = (float*)malloc(mem_size);
float *gold = (float*)malloc(mem_size);
float *d_idata, *d_cdata, *d_tdata;
checkCuda( hipMalloc(&d_idata, mem_size) );
checkCuda( hipMalloc(&d_cdata, mem_size) );
checkCuda( hipMalloc(&d_tdata, mem_size) );
// check parameters and calculate execution configuration
if (nx % TILE_DIM || ny % TILE_DIM) {
printf("nx and ny must be a multiple of TILE_DIM\n");
goto error_exit;
}
if (TILE_DIM % BLOCK_ROWS) {
printf("TILE_DIM must be a multiple of BLOCK_ROWS\n");
goto error_exit;
}
// host
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
h_idata[j*nx + i] = j*nx + i;
// correct result for error checking
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
gold[j*nx + i] = h_idata[i*nx + j];
// device
checkCuda( hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice) );
// events for timing
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
float ms;
// ------------
// time kernels
// ------------
printf("%25s%25s\n", "Routine", "Bandwidth (GB/s)");
// ----
// copy
// ----
printf("%25s", "copy");
checkCuda( hipMemset(d_cdata, 0, mem_size) );
// warm up
hipLaunchKernelGGL(( copy), dim3(dimGrid), dim3(dimBlock), 0, 0, d_cdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( copy), dim3(dimGrid), dim3(dimBlock), 0, 0, d_cdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_cdata, d_cdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(h_idata, h_cdata, nx*ny, ms);
// -------------
// copySharedMem
// -------------
printf("%25s", "shared memory copy");
checkCuda( hipMemset(d_cdata, 0, mem_size) );
// warm up
hipLaunchKernelGGL(( copySharedMem), dim3(dimGrid), dim3(dimBlock), 0, 0, d_cdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( copySharedMem), dim3(dimGrid), dim3(dimBlock), 0, 0, d_cdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_cdata, d_cdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(h_idata, h_cdata, nx * ny, ms);
// --------------
// transposeNaive
// --------------
printf("%25s", "naive transpose");
checkCuda( hipMemset(d_tdata, 0, mem_size) );
// warmup
hipLaunchKernelGGL(( transposeNaive), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( transposeNaive), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
// ------------------
// transposeCoalesced
// ------------------
printf("%25s", "coalesced transpose");
checkCuda( hipMemset(d_tdata, 0, mem_size) );
// warmup
hipLaunchKernelGGL(( transposeCoalesced), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( transposeCoalesced), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
// ------------------------
// transposeNoBankConflicts
// ------------------------
printf("%25s", "conflict-free transpose");
checkCuda( hipMemset(d_tdata, 0, mem_size) );
// warmup
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata);
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
error_exit:
// cleanup
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
checkCuda( hipFree(d_tdata) );
checkCuda( hipFree(d_cdata) );
checkCuda( hipFree(d_idata) );
free(h_idata);
free(h_tdata);
free(h_cdata);
free(gold);
}
| da3ce6309b65e27e09c6511b09a68c619ee1451a.cu |
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
const int NUM_REPS = 100;
// Check errors and print GB/s
void postprocess(const float *ref, const float *res, int n, float ms)
{
bool passed = true;
for (int i = 0; i < n; i++)
if (res[i] != ref[i]) {
printf("%d %f %f\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
if (passed)
printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms );
}
// simple copy kernel
// Used as reference case representing best effective bandwidth.
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[(y+j)*width + x] = idata[(y+j)*width + x];
}
// copy kernel using shared memory
// Also used as reference case, demonstrating effect of using shared memory.
__global__ void copySharedMem(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x];
}
// naive transpose
// Simplest transpose; doesn't use shared memory.
// Global memory reads are coalesced but writes are not.
__global__ void transposeNaive(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
// coalesced transpose
// Uses shared memory to achieve coalesing in both reads and writes
// Tile width == #banks causes shared memory bank conflicts.
__global__ void transposeCoalesced(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
// No bank-conflict transpose
// Same as transposeCoalesced except the first tile dimension is padded
// to avoid shared memory bank conflicts.
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
int main(int argc, char **argv)
{
const int nx = 1024;
const int ny = 1024;
const int mem_size = nx*ny*sizeof(float);
dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
int devId = 0;
if (argc > 1) devId = atoi(argv[1]);
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId));
printf("\nDevice : %s\n", prop.name);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n",
nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
checkCuda( cudaSetDevice(devId) );
float *h_idata = (float*)malloc(mem_size);
float *h_cdata = (float*)malloc(mem_size);
float *h_tdata = (float*)malloc(mem_size);
float *gold = (float*)malloc(mem_size);
float *d_idata, *d_cdata, *d_tdata;
checkCuda( cudaMalloc(&d_idata, mem_size) );
checkCuda( cudaMalloc(&d_cdata, mem_size) );
checkCuda( cudaMalloc(&d_tdata, mem_size) );
// check parameters and calculate execution configuration
if (nx % TILE_DIM || ny % TILE_DIM) {
printf("nx and ny must be a multiple of TILE_DIM\n");
goto error_exit;
}
if (TILE_DIM % BLOCK_ROWS) {
printf("TILE_DIM must be a multiple of BLOCK_ROWS\n");
goto error_exit;
}
// host
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
h_idata[j*nx + i] = j*nx + i;
// correct result for error checking
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
gold[j*nx + i] = h_idata[i*nx + j];
// device
checkCuda( cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) );
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
float ms;
// ------------
// time kernels
// ------------
printf("%25s%25s\n", "Routine", "Bandwidth (GB/s)");
// ----
// copy
// ----
printf("%25s", "copy");
checkCuda( cudaMemset(d_cdata, 0, mem_size) );
// warm up
copy<<<dimGrid, dimBlock>>>(d_cdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
copy<<<dimGrid, dimBlock>>>(d_cdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(h_idata, h_cdata, nx*ny, ms);
// -------------
// copySharedMem
// -------------
printf("%25s", "shared memory copy");
checkCuda( cudaMemset(d_cdata, 0, mem_size) );
// warm up
copySharedMem<<<dimGrid, dimBlock>>>(d_cdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
copySharedMem<<<dimGrid, dimBlock>>>(d_cdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(h_idata, h_cdata, nx * ny, ms);
// --------------
// transposeNaive
// --------------
printf("%25s", "naive transpose");
checkCuda( cudaMemset(d_tdata, 0, mem_size) );
// warmup
transposeNaive<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
transposeNaive<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
// ------------------
// transposeCoalesced
// ------------------
printf("%25s", "coalesced transpose");
checkCuda( cudaMemset(d_tdata, 0, mem_size) );
// warmup
transposeCoalesced<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
transposeCoalesced<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
// ------------------------
// transposeNoBankConflicts
// ------------------------
printf("%25s", "conflict-free transpose");
checkCuda( cudaMemset(d_tdata, 0, mem_size) );
// warmup
transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_REPS; i++)
transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) );
postprocess(gold, h_tdata, nx * ny, ms);
error_exit:
// cleanup
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
checkCuda( cudaFree(d_tdata) );
checkCuda( cudaFree(d_cdata) );
checkCuda( cudaFree(d_idata) );
free(h_idata);
free(h_tdata);
free(h_cdata);
free(gold);
}
|
10a4b5a25fba7dfce33bf85ea3dfcc0422961f59.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <opencv2/core/cuda.hpp>
#include <opencv2/core/cuda_stream_accessor.hpp>
// http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __func__, __LINE__, false); }
inline void gpuAssert(hipError_t code, const char *file, const char *func, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"\e[1;31mGPUassert: %s %s %s %d\n\e[1;0m", hipGetErrorString(code), file, func, line);
if (abort) exit(code);
}
}
// http://stackoverflow.com/questions/24613637/custom-kernel-gpumat-with-float
__global__ void gpuNumDifferentKernel(const cv::cuda::PtrStepSz<uchar4> img1,
cv::cuda::PtrStepSz<uchar4> img2,
int* d_diff)
{
// which thread is this and in which block is it?
// These variables won't be used, but they are here for illustrative purposes
// http://www.martinpeniak.com/index.php?option=com_content&view=article&catid=17:updates&id=288:cuda-thread-indexing-explained
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
/*if(blockIdx.x < 20 && blockIdx.y < 10)
printf("Coord (%d, %d) thread(%d, %d, %d) bldim(%d, %d, %d) block(%d, %d, %d) grdim(%d, %d, %d)\n", y, x,
threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z,
blockIdx.x, blockIdx.y, blockIdx.z, gridDim.x, gridDim.y, gridDim.z);*/
__shared__ int sh_diff;
// One thread per block mush initialize the shared memory for that block
if(threadIdx.x == 0 && threadIdx.y == 0) {
sh_diff = 0;
}
__syncthreads();
if (x < img1.cols && y < img1.rows && y >= 0 && x >= 0)
{
if(img1(y, x).x != img2(y, x).x ||
img1(y, x).y != img2(y, x).y ||
img1(y, x).z != img2(y, x).z)
{
atomicAdd(&sh_diff, 1);
}
img2(y, x).z = 0;
}
__syncthreads();
// Same thread adds the contribution of the block to global count
if(threadIdx.x == 0 && threadIdx.y == 0)
{
atomicAdd(d_diff, sh_diff);
}
}
/**
* Demo function to illustrate how to create a new kernel for GpuMat,
* it counts the number of pixels with different value beetween
* img1 and img2.
*/
int gpuNumDifferent(cv::InputArray _img1,
cv::InputArray _img2)
{
const cv::cuda::GpuMat img1 = _img1.getGpuMat();
const cv::cuda::GpuMat img2 = _img2.getGpuMat();
dim3 cthreads_blockDim(32, 32);
dim3 cblocks_gridDim(
static_cast<int>(::ceil(img1.size().width /
static_cast<double>(cthreads_blockDim.x))),
static_cast<int>(::ceil(img1.size().height /
static_cast<double>(cthreads_blockDim.y))));
int h_diff = 0;
int *d_diff;
gpuErrchk( hipMalloc((void**)&d_diff, sizeof(int)) );
gpuErrchk( hipMemcpy((void*)d_diff, (void*)&h_diff, sizeof(int), hipMemcpyHostToDevice) );
cv::cuda::Stream _stream = cv::cuda::Stream();
hipStream_t stream = cv::cuda::StreamAccessor::getStream(_stream);
hipLaunchKernelGGL(( gpuNumDifferentKernel), dim3(cblocks_gridDim), dim3(cthreads_blockDim), 0, stream, img1, img2, d_diff);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk( hipMemcpy((void*)&h_diff, (void*)d_diff, sizeof(int), hipMemcpyDeviceToHost) );
hipFree(d_diff);
return h_diff;
}
| 10a4b5a25fba7dfce33bf85ea3dfcc0422961f59.cu | #include <cuda_runtime.h>
#include <opencv2/core/cuda.hpp>
#include <opencv2/core/cuda_stream_accessor.hpp>
// http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __func__, __LINE__, false); }
inline void gpuAssert(cudaError_t code, const char *file, const char *func, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"\e[1;31mGPUassert: %s %s %s %d\n\e[1;0m", cudaGetErrorString(code), file, func, line);
if (abort) exit(code);
}
}
// http://stackoverflow.com/questions/24613637/custom-kernel-gpumat-with-float
__global__ void gpuNumDifferentKernel(const cv::cuda::PtrStepSz<uchar4> img1,
cv::cuda::PtrStepSz<uchar4> img2,
int* d_diff)
{
// which thread is this and in which block is it?
// These variables won't be used, but they are here for illustrative purposes
// http://www.martinpeniak.com/index.php?option=com_content&view=article&catid=17:updates&id=288:cuda-thread-indexing-explained
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
/*if(blockIdx.x < 20 && blockIdx.y < 10)
printf("Coord (%d, %d) thread(%d, %d, %d) bldim(%d, %d, %d) block(%d, %d, %d) grdim(%d, %d, %d)\n", y, x,
threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z,
blockIdx.x, blockIdx.y, blockIdx.z, gridDim.x, gridDim.y, gridDim.z);*/
__shared__ int sh_diff;
// One thread per block mush initialize the shared memory for that block
if(threadIdx.x == 0 && threadIdx.y == 0) {
sh_diff = 0;
}
__syncthreads();
if (x < img1.cols && y < img1.rows && y >= 0 && x >= 0)
{
if(img1(y, x).x != img2(y, x).x ||
img1(y, x).y != img2(y, x).y ||
img1(y, x).z != img2(y, x).z)
{
atomicAdd(&sh_diff, 1);
}
img2(y, x).z = 0;
}
__syncthreads();
// Same thread adds the contribution of the block to global count
if(threadIdx.x == 0 && threadIdx.y == 0)
{
atomicAdd(d_diff, sh_diff);
}
}
/**
* Demo function to illustrate how to create a new kernel for GpuMat,
* it counts the number of pixels with different value beetween
* img1 and img2.
*/
int gpuNumDifferent(cv::InputArray _img1,
cv::InputArray _img2)
{
const cv::cuda::GpuMat img1 = _img1.getGpuMat();
const cv::cuda::GpuMat img2 = _img2.getGpuMat();
dim3 cthreads_blockDim(32, 32);
dim3 cblocks_gridDim(
static_cast<int>(std::ceil(img1.size().width /
static_cast<double>(cthreads_blockDim.x))),
static_cast<int>(std::ceil(img1.size().height /
static_cast<double>(cthreads_blockDim.y))));
int h_diff = 0;
int *d_diff;
gpuErrchk( cudaMalloc((void**)&d_diff, sizeof(int)) );
gpuErrchk( cudaMemcpy((void*)d_diff, (void*)&h_diff, sizeof(int), cudaMemcpyHostToDevice) );
cv::cuda::Stream _stream = cv::cuda::Stream();
cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream);
gpuNumDifferentKernel<<<cblocks_gridDim, cthreads_blockDim, 0, stream>>>(img1, img2, d_diff);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaMemcpy((void*)&h_diff, (void*)d_diff, sizeof(int), cudaMemcpyDeviceToHost) );
cudaFree(d_diff);
return h_diff;
}
|
f9779bbcdde2bf7eb3fdf2f319964b5074b2b65a.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "hip/hip_runtime.h"
#include "caffe/layer.hpp"
#include "caffe/custom_layers.hpp"
namespace caffe {
// compute each Vi
template <typename Dtype>
__global__ void ComputeSource(const int total, const int num, const int height, const int width,
const Dtype* target_data, const Dtype* theta, Dtype* source_data, int* source_range_data) {
// total = num * height * width
CUDA_KERNEL_LOOP(index, total) {
int div = height * width;
int n = index / div;
int n_rem = index % div;
div /= height;
int h = n_rem / div;
int w = n_rem % div;
Dtype x_target = target_data[h * width + w];
Dtype y_target = target_data[h * width + w + width * height];
int offset_theta = 6 * n;
Dtype x = x_target * theta[offset_theta] + y_target * theta[offset_theta + 1] + theta[offset_theta + 2];
Dtype y = x_target * theta[offset_theta + 3] + y_target * theta[offset_theta + 4] + theta[offset_theta + 5];
x = (x + (Dtype) 1.) / (Dtype) 2. * (width - 1);
y = (y + (Dtype) 1.) / (Dtype) 2. * (height - 1);
int offset_source = n * height * width * 2 + h * width + w;
source_data[offset_source] = x;
source_data[offset_source + height * width] = y;
int w_min = (floor(x) >= 0) ? floor(x) : 0;
int w_max = (ceil(x) < width) ? ceil(x) : (width - 1);
int h_min = (floor(y) >= 0) ? floor(y) : 0;
int h_max = (ceil(y) < height) ? ceil(y) : (height - 1);
int offset_range = (n * height * width + h * width + w) * 4;
source_range_data[offset_range] = w_min;
source_range_data[offset_range + 1] = w_max;
source_range_data[offset_range + 2] = h_min;
source_range_data[offset_range + 3] = h_max;
}
}
template <typename Dtype>
__global__ void AffineForward(const int count, const int channels, const int height, const int width,
const Dtype* in, const Dtype* source_data, const int* source_range_data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int div = channels * height * width;
int n = index / div;
int n_rem = index % div;
div /= channels;
int c = n_rem / div;
int c_rem = n_rem % div;
div /= height;
int h = c_rem / div;
int w = c_rem % div;
int offset_source = n * 2 * height * width + h * width + w;
Dtype x = source_data[offset_source];
Dtype y = source_data[offset_source + height * width];
int offset_range = (n * height * width + h * width + w) * 4;
int w_min = source_range_data[offset_range];
int w_max = source_range_data[offset_range + 1];
int h_min = source_range_data[offset_range + 2];
int h_max = source_range_data[offset_range + 3];
int offset_nc = n * channels * height * width + c * height*width;
Dtype tmp = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset_nc + hh * width + ww]*(1 - fabs(x - ww)) * (1 - fabs(y - hh));
}
}
out[offset_nc + h * width + w] = tmp;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* theta_data = bottom[1]->gpu_data();
const Dtype* target_data = target_.gpu_data();
Dtype* source_data = source_.mutable_gpu_data();
int* range_data = source_range_.mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set<Dtype>(count, 0, top_data);
ComputeSource<Dtype> << <CAFFE_GET_BLOCKS(num_ * height_ * width_),
CAFFE_CUDA_NUM_THREADS >> >(num_ * height_ * width_, num_, height_, width_,
target_data, theta_data, source_data, range_data);
AffineForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channel_, height_, width_,
bottom_data, source_data, range_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
__device__ inline void atomic_add(float * address, float val) {
atomicAdd(address, val);
}
__device__ inline void atomic_add(double * address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
// compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta}
// compute sum_{i} d{V_i} / d{U_nm}
template <typename Dtype>
__global__ void AffineBackward(const int count, const int num, const int channels, const int height, const int width,
const Dtype* data, const Dtype* source_data, int* source_range_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* source_grad_cache) {
// count = num * channel * height * width
CUDA_KERNEL_LOOP(index, count) {
int div = channels * height * width;
int n = index / div;
int n_rem = index % div;
div /= channels;
int c = n_rem / div;
int c_rem = n_rem % div;
div /= height;
int h = c_rem / div;
int w = c_rem % div;
int offset_source = n * 2 * height * width + h * width + w;
Dtype x = source_data[offset_source];
Dtype y = source_data[offset_source + height * width];
int offset_range = (n * height * width + h * width + w) * 4;
int w_min = source_range_data[offset_range];
int w_max = source_range_data[offset_range + 1];
int h_min = source_range_data[offset_range + 2];
int h_max = source_range_data[offset_range + 3];
int source_diff_x = c * num * 2 * height * width + n * 2 * height * width + h * width + w;
int source_diff_y = source_diff_x + height * width;
Dtype tmp_source_x = 0;
Dtype tmp_source_y = 0;
Dtype buffer = top_diff[n * channels * height * width + c * height * width + h * width + w];
for (int hh = h_min; hh <= h_max; ++hh) {
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype buffer2 = buffer * data[n * channels * height * width + c * height * width + hh * width + ww];
Dtype tmp_hh = 1 - fabs(y - hh);
Dtype tmp_ww = 1 - fabs(x - ww);
tmp_source_x += buffer2 * tmp_hh * sign_x;
tmp_source_y += buffer2 * tmp_ww * sign_y;
Dtype inc = buffer * tmp_hh * tmp_ww;
int offset = n * channels * height * width + c * height * width + hh * width + ww;
atomic_add(data_diff + offset, inc);
}
}
source_grad_cache[source_diff_x] = tmp_source_x * (width - 1) / (Dtype) 2.;
source_grad_cache[source_diff_y] = tmp_source_y * (height - 1) / (Dtype) 2.;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* data_diff = bottom[0]->mutable_gpu_diff();
Dtype* theta_diff = bottom[1]->mutable_gpu_diff();
Dtype* source_grad_cache = source_grad_cache_.mutable_gpu_data();
const Dtype* target_data = target_.gpu_data();
const Dtype* source_data = source_.gpu_data();
Dtype* source_diff = source_.mutable_gpu_diff();
int* source_range_data = source_range_.mutable_gpu_data();
caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff);
int count = bottom[0]->count();
// compute gradient with respect to theta
AffineBackward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, num_, channel_, height_, width_,
bottom_data, source_data, source_range_data, top_diff,
data_diff, source_grad_cache);
// merge gradient for theta
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, num_ * 2 * map_size_, channel_,
Dtype(1), source_grad_op_.gpu_data(), source_grad_cache, Dtype(0), source_diff);
for (int index = 0; index < num_; ++index) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, map_size_,
Dtype(1), source_diff + index * 2 * map_size_, target_data, Dtype(0), theta_diff + index * 6);
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
| f9779bbcdde2bf7eb3fdf2f319964b5074b2b65a.cu | #include <algorithm>
#include <vector>
#include "cuda.h"
#include "caffe/layer.hpp"
#include "caffe/custom_layers.hpp"
namespace caffe {
// compute each Vi
template <typename Dtype>
__global__ void ComputeSource(const int total, const int num, const int height, const int width,
const Dtype* target_data, const Dtype* theta, Dtype* source_data, int* source_range_data) {
// total = num * height * width
CUDA_KERNEL_LOOP(index, total) {
int div = height * width;
int n = index / div;
int n_rem = index % div;
div /= height;
int h = n_rem / div;
int w = n_rem % div;
Dtype x_target = target_data[h * width + w];
Dtype y_target = target_data[h * width + w + width * height];
int offset_theta = 6 * n;
Dtype x = x_target * theta[offset_theta] + y_target * theta[offset_theta + 1] + theta[offset_theta + 2];
Dtype y = x_target * theta[offset_theta + 3] + y_target * theta[offset_theta + 4] + theta[offset_theta + 5];
x = (x + (Dtype) 1.) / (Dtype) 2. * (width - 1);
y = (y + (Dtype) 1.) / (Dtype) 2. * (height - 1);
int offset_source = n * height * width * 2 + h * width + w;
source_data[offset_source] = x;
source_data[offset_source + height * width] = y;
int w_min = (floor(x) >= 0) ? floor(x) : 0;
int w_max = (ceil(x) < width) ? ceil(x) : (width - 1);
int h_min = (floor(y) >= 0) ? floor(y) : 0;
int h_max = (ceil(y) < height) ? ceil(y) : (height - 1);
int offset_range = (n * height * width + h * width + w) * 4;
source_range_data[offset_range] = w_min;
source_range_data[offset_range + 1] = w_max;
source_range_data[offset_range + 2] = h_min;
source_range_data[offset_range + 3] = h_max;
}
}
template <typename Dtype>
__global__ void AffineForward(const int count, const int channels, const int height, const int width,
const Dtype* in, const Dtype* source_data, const int* source_range_data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int div = channels * height * width;
int n = index / div;
int n_rem = index % div;
div /= channels;
int c = n_rem / div;
int c_rem = n_rem % div;
div /= height;
int h = c_rem / div;
int w = c_rem % div;
int offset_source = n * 2 * height * width + h * width + w;
Dtype x = source_data[offset_source];
Dtype y = source_data[offset_source + height * width];
int offset_range = (n * height * width + h * width + w) * 4;
int w_min = source_range_data[offset_range];
int w_max = source_range_data[offset_range + 1];
int h_min = source_range_data[offset_range + 2];
int h_max = source_range_data[offset_range + 3];
int offset_nc = n * channels * height * width + c * height*width;
Dtype tmp = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset_nc + hh * width + ww]*(1 - fabs(x - ww)) * (1 - fabs(y - hh));
}
}
out[offset_nc + h * width + w] = tmp;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* theta_data = bottom[1]->gpu_data();
const Dtype* target_data = target_.gpu_data();
Dtype* source_data = source_.mutable_gpu_data();
int* range_data = source_range_.mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set<Dtype>(count, 0, top_data);
ComputeSource<Dtype> << <CAFFE_GET_BLOCKS(num_ * height_ * width_),
CAFFE_CUDA_NUM_THREADS >> >(num_ * height_ * width_, num_, height_, width_,
target_data, theta_data, source_data, range_data);
AffineForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channel_, height_, width_,
bottom_data, source_data, range_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
__device__ inline void atomic_add(float * address, float val) {
atomicAdd(address, val);
}
__device__ inline void atomic_add(double * address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
// compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta}
// compute sum_{i} d{V_i} / d{U_nm}
template <typename Dtype>
__global__ void AffineBackward(const int count, const int num, const int channels, const int height, const int width,
const Dtype* data, const Dtype* source_data, int* source_range_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* source_grad_cache) {
// count = num * channel * height * width
CUDA_KERNEL_LOOP(index, count) {
int div = channels * height * width;
int n = index / div;
int n_rem = index % div;
div /= channels;
int c = n_rem / div;
int c_rem = n_rem % div;
div /= height;
int h = c_rem / div;
int w = c_rem % div;
int offset_source = n * 2 * height * width + h * width + w;
Dtype x = source_data[offset_source];
Dtype y = source_data[offset_source + height * width];
int offset_range = (n * height * width + h * width + w) * 4;
int w_min = source_range_data[offset_range];
int w_max = source_range_data[offset_range + 1];
int h_min = source_range_data[offset_range + 2];
int h_max = source_range_data[offset_range + 3];
int source_diff_x = c * num * 2 * height * width + n * 2 * height * width + h * width + w;
int source_diff_y = source_diff_x + height * width;
Dtype tmp_source_x = 0;
Dtype tmp_source_y = 0;
Dtype buffer = top_diff[n * channels * height * width + c * height * width + h * width + w];
for (int hh = h_min; hh <= h_max; ++hh) {
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype buffer2 = buffer * data[n * channels * height * width + c * height * width + hh * width + ww];
Dtype tmp_hh = 1 - fabs(y - hh);
Dtype tmp_ww = 1 - fabs(x - ww);
tmp_source_x += buffer2 * tmp_hh * sign_x;
tmp_source_y += buffer2 * tmp_ww * sign_y;
Dtype inc = buffer * tmp_hh * tmp_ww;
int offset = n * channels * height * width + c * height * width + hh * width + ww;
atomic_add(data_diff + offset, inc);
}
}
source_grad_cache[source_diff_x] = tmp_source_x * (width - 1) / (Dtype) 2.;
source_grad_cache[source_diff_y] = tmp_source_y * (height - 1) / (Dtype) 2.;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* data_diff = bottom[0]->mutable_gpu_diff();
Dtype* theta_diff = bottom[1]->mutable_gpu_diff();
Dtype* source_grad_cache = source_grad_cache_.mutable_gpu_data();
const Dtype* target_data = target_.gpu_data();
const Dtype* source_data = source_.gpu_data();
Dtype* source_diff = source_.mutable_gpu_diff();
int* source_range_data = source_range_.mutable_gpu_data();
caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff);
int count = bottom[0]->count();
// compute gradient with respect to theta
AffineBackward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, num_, channel_, height_, width_,
bottom_data, source_data, source_range_data, top_diff,
data_diff, source_grad_cache);
// merge gradient for theta
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, num_ * 2 * map_size_, channel_,
Dtype(1), source_grad_op_.gpu_data(), source_grad_cache, Dtype(0), source_diff);
for (int index = 0; index < num_; ++index) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, map_size_,
Dtype(1), source_diff + index * 2 * map_size_, target_data, Dtype(0), theta_diff + index * 6);
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
|
c7abbe8a84eb232027d96adc62857ae387fdd83a.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <hip/hip_runtime.h>
#include <util/checkCUDAError.h>
#include <glm/gtx/transform.hpp>
#include "simulate.h"
#include <vector>
#include "fluid.h"
#define H_KERNAL_WIDTH (0.1f)
#define NEIGHBOUR_R (2)
#define HITTEST_R (1)
#define LAMBDA_EPSILON (0.0f)
#define SPLEEFING_COFF (1000.0f)
#define RHO0 (1.0f)
static int num_rigidBodies;
static int num_particles;
static Particle * dev_particles;
static glm::vec3 * dev_predictPosition;
static glm::vec3 * dev_deltaPosition;
static int * dev_n;
static float * dev_positions;
// Pre-calculated local hit test loop indices
static int loopSize_fluid = (NEIGHBOUR_R * 2 + 1)*(NEIGHBOUR_R * 2 + 1)*(NEIGHBOUR_R * 2 + 1);
static int loopSize_hit = (HITTEST_R * 2 + 1)*(HITTEST_R * 2 + 1)*(HITTEST_R * 2 + 1);
static int * dev_loopIdx_fluid;
static int * dev_loopIdx_hit;
static float grid_length;
static glm::ivec3 grid_resolution;
static glm::vec3 grid_min_x;
static glm::vec3 grid_max_x;
static Voxel * dev_grid;
static int * dev_particle_voxel_id;
static float * dev_lambda;
//lock per particle
//static int * dev_mutex;
//--------data for shape rematching--------------
//struct RigidBodyWrapper
//{
// int base; // first particle id
// int size; // size of particle
// glm::vec3 cm_0; //center of mass of original
//};
//__constant__ static RigidBodyWrapper* dev_rigidBodyWrappers;
glm::vec3 * hst_cm0 = NULL;
//__constant__ static glm::vec3* dev_rigid_body_cm_0; //center mass origin
__constant__ static glm::vec3* dev_particle_x0;
//static glm::vec3* dev_particle_x0;
//-----------------------------------------------
//void initSimulate(int num_rigidBody, RigidBody * rigidbodys, glm::vec3 bmin, glm::vec3 bmax, float particle_diameter)
//{
// assembleParticleArray(num_rigidBody, rigidbodys);
// initUniformGrid(bmin, bmax, particle_diameter);
//}
// Particle transformation
__global__
void transformParticlePositionPerRigidBody(int base,int size, Particle * particles, glm::vec3* x0,glm::mat4 mat){
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < size){
threadId += base;
x0[threadId] = particles[threadId].x;
glm::vec4 tmp = mat * glm::vec4(particles[threadId].x, 1.0f);
tmp /= tmp.w;
particles[threadId].x = glm::vec3(tmp.x, tmp.y, tmp.z);
}
}
// Initial assembly of information
void assembleParticleArray(int num_rigidBody, RigidBody * rigidbodys)
{
hst_cm0 = new glm::vec3[num_rigidBody];
num_rigidBodies = num_rigidBody;
num_particles = 0;
for (int i = 0; i < num_rigidBody; i++)
{
num_particles += rigidbodys[i].m_particles.size();
}
hipMalloc(&dev_particles, num_particles * sizeof(Particle));
hipMalloc(&dev_predictPosition, num_particles * sizeof(glm::vec3));
hipMemset(dev_predictPosition, 0, num_particles * sizeof(glm::vec3));
hipMalloc(&dev_deltaPosition, num_particles * sizeof(glm::vec3));
hipMalloc(&dev_n, num_particles * sizeof(int));
hipMalloc(&dev_positions, 3 * num_particles * sizeof(float));
hipMemset(dev_positions, 0, 3 * num_particles * sizeof(float));
checkCUDAError("ERROR: hipMalloc");
hipMalloc(&dev_particle_x0, num_particles * sizeof(glm::vec3));
//lambda
hipMalloc(&dev_lambda, num_particles * sizeof(float));
hipMemset(dev_lambda, 0, num_particles * sizeof(float));
//lock
//hipMalloc(&dev_mutex, num_particles * sizeof(int));
//hipMemset(dev_mutex, 0, num_particles * sizeof(int));
int cur = 0;
//glm::vec3 * hst_cm0 = new glm::vec3[num_rigidBody];
for (int i = 0; i < num_rigidBody; i++)
{
hst_cm0[i] = rigidbodys[i].getCenterOfMass();
// Particle objects
int size = rigidbodys[i].m_particles.size();
hipMemcpy(dev_particles + cur, rigidbodys[i].m_particles.data(), size * sizeof(Particle), hipMemcpyHostToDevice);
const int blockSizer = 192;
dim3 blockCountr((size + blockSizer - 1) / blockSizer);
transformParticlePositionPerRigidBody << <blockCountr, blockSizer >> >(cur, size, dev_particles, dev_particle_x0, rigidbodys[i].getTransformMatrix());
// Initialize rest config positions
//hipMemcpy(dev_particle_x0 + cur, rigidbodys[i].m_x0.data(), size * sizeof(glm::vec3), hipMemcpyHostToDevice);
cur += size;
}
//hipMalloc(&dev_rigid_body_cm_0, num_rigidBody * sizeof(glm::vec3));
//hipMemcpy(dev_rigid_body_cm_0, hst_cm0, num_rigidBody * sizeof(glm::vec3), hipMemcpyHostToDevice);
//delete []hst_cm0;
checkCUDAError("ERROR: assemble particle array");
}
// Initialize hit test loop indices
__global__
void initTestLoopIdx(int * fluidLoop, int * hitLoop, const glm::ivec3 resolution){
int x, y, z, idSum2, idSum3, yTimesZ = resolution.y * resolution.z, j = 0;
for (x = -NEIGHBOUR_R; x <= NEIGHBOUR_R; x++)
{
idSum2 = x * yTimesZ;
for (y = -NEIGHBOUR_R; y <= NEIGHBOUR_R; y++)
{
idSum3 = idSum2 + y * resolution.z;
for (z = -NEIGHBOUR_R; z <= NEIGHBOUR_R; z++)
{
fluidLoop[j] = idSum3 + z;
j++;
}
}
}
j = 0;
for (x = -HITTEST_R; x <= HITTEST_R; x++)
{
idSum2 = x * yTimesZ;
for (y = -HITTEST_R; y <= HITTEST_R; y++)
{
idSum3 = idSum2 + y * resolution.z;
for (z = -HITTEST_R; z <= HITTEST_R; z++)
{
hitLoop[j] = idSum3 + z;
j++;
}
}
}
}
// Initialize voxel grid
void initUniformGrid(glm::vec3 bmin, glm::vec3 bmax, float diameter)
{
//init size
grid_min_x = bmin;
grid_max_x = bmax;
grid_length = diameter;
grid_resolution = ceil((grid_max_x - grid_min_x) / grid_length);
int grid_size = grid_resolution.x * grid_resolution.y * grid_resolution.z;
hipMalloc(&dev_grid, grid_size * sizeof(Voxel));
hipMemset(dev_grid, 0, grid_size * sizeof(Voxel));
hipMalloc(&dev_particle_voxel_id, num_particles *sizeof(int));
hipMalloc(&dev_loopIdx_fluid, loopSize_fluid *sizeof(int));
hipMalloc(&dev_loopIdx_hit, loopSize_hit *sizeof(int));
initTestLoopIdx << <1, 1 >> >(dev_loopIdx_fluid, dev_loopIdx_hit, grid_resolution);
}
void endSimulation()
{
hipFree(dev_particles);
hipFree(dev_predictPosition);
hipFree(dev_deltaPosition);
hipFree(dev_n);
hipFree(dev_positions);
hipFree(dev_grid);
hipFree(dev_particle_voxel_id);
hipFree(dev_particle_x0);
hipFree(dev_lambda);
hipFree(dev_loopIdx_fluid);
hipFree(dev_loopIdx_hit);
//lock
//hipFree(dev_mutex);
if (hst_cm0 != NULL)
{
delete []hst_cm0;
hst_cm0 = NULL;
}
}
__device__
glm::ivec3 gridMap(glm::vec3 x, glm::vec3 min_x, float grid_length)
{
return (glm::ivec3)(floor((x - min_x) / grid_length));
}
__global__
void updateVoxelIndex(int N , glm::ivec3 grid_resolution, glm::vec3 min_x, float grid_length, glm::vec3 * particlePositions, Voxel * grid, int * ids )
{
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < N)
{
glm::ivec3 coordinate = gridMap(particlePositions[threadId], min_x, grid_length);
//outof simulate area
if (coordinate.x >= grid_resolution.x || coordinate.x < 0
|| coordinate.y >= grid_resolution.y || coordinate.y < 0
|| coordinate.z >= grid_resolution.z || coordinate.z < 0)
{
//don't assign to vertex
//printf("out of simulation region\n"); //test
return;
}
int voxel_id = coordinate.x * grid_resolution.y * grid_resolution.z
+ coordinate.y * grid_resolution.z
+ coordinate.z;
//not taken into account when n > NUM_PRIACTICLE_VOXEL ?
grid[voxel_id].particle_id[grid[voxel_id].num] = threadId;
grid[voxel_id].num += 1;
ids[threadId] = voxel_id;
}
}
// Prediction using simple force-based translation
__global__
void kernApplyForces(int N, Particle * particles, glm::vec3 * predictPosition, const glm::vec3 forces, const float delta_t)
{
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < N)
{
//apply forces
particles[threadId].v += particles[threadId].invmass * forces * delta_t;
//predict positions
predictPosition[threadId] = particles[threadId].x + particles[threadId].v * delta_t;
}
}
// Particle hit test for a single particle
__device__
void hitTestVoxelSolid(const int num_voxel, const float diameter, const int particle_id, const int particlePhase, const float particleInvmass, const glm::vec3 particlePos,
const int voxel_id, const glm::vec3 * predict_positions, glm::vec3 * delta_positions,
const Particle * particles, const Voxel * grid, int * dev_n)
{
if (voxel_id < 0 || voxel_id >= num_voxel)
{
//voxel_id is invalid
return;
}
// Delta X for collision
glm::vec3 delta_pos(0.0), d;
// Average count
int n = 0;
float delta;
for (int i = 0; i < grid[voxel_id].num; i++)
{
if (particles[grid[voxel_id].particle_id[i]].phase == particlePhase)
{
continue;
}
// Distance vector from particle i to particle j (on particle centers)
d = predict_positions[grid[voxel_id].particle_id[i]] - particlePos;
delta = diameter - glm::length(d);
// If particles overlap
if (0 > delta)
{
continue;
}
// Momentum weighing based on particle mass
// Not true momentum, but approximation
delta = particles[particle_id].type == FLUID ? -delta : delta * (-particleInvmass / (particleInvmass + particles[grid[voxel_id].particle_id[i]].invmass));
n++;
// Move particle i along the vector so that i and j are in the post-collision states
delta_pos += glm::normalize(d) * delta;
}
// Apply average delta X position (treat as results of constraint solver)
delta_positions[particle_id] += delta_pos;
dev_n[particle_id] += n;
}
// ------------------fluid---------------------
__device__
inline float getH(float diameter)
{
//return ((float)NEIGHBOUR_R + 0.5f) * diameter;
return 2.5f * diameter;
//return (float)NEIGHBOUR_R * diameter;
}
__device__
inline float getRHO0(float diameter)
{
return 0.7f * 1.0f / powf(diameter / 0.99f, 3.0f);
}
__device__
inline float SmoothKernel(float r, float h)
{
//poly 6 kernel
return r > h ? 0.0f : 315.0f / 64.0f / (float)PI / powf(h, 9.0f) * powf(h*h - r*r, 3.0f);
//for test
//float res = r > h ? 0.0f : 315.0f / 64.0f / (float)PI / powf(h, 9.0f) * powf(h*h - r*r, 3.0f);
//printf("%f,%f\tres:%f\n", r, h, res);
//return res;
//nearest neighbour
//return 1.0f / glm::dot(r,r);
}
__device__
inline glm::vec3 gradientSmoothKernel(glm::vec3 vec_r, float h)
{
//r = || pi - pj||
float r = glm::length(vec_r);
//spiky kernel gradient
return r>h ? glm::vec3(0.0f) : (-45.0f) / (float)PI / powf(h, 6.0f) * powf(h-r,2.0f) * glm::normalize(vec_r);
//return glm::normalize(vec_r);
//tmp
//return 1.0f;
}
__device__
float fluidInfoSum(glm::vec3 & gradient, float & gradient2,
int num_voxel, float diameter, float H, int particle_id, glm::vec3 particlePos, int voxel_id, glm::vec3 * predict_positions, glm::vec3 * delta_positions,
Particle * particles, Voxel * grid, int * dev_n)
{
if (voxel_id < 0 || voxel_id >= num_voxel)
{
//voxel_id is invalid
return;
}
glm::vec3 g;
float density = 0.0f; //for particles in this voxel
float distance;
for (int i = 0; i < grid[voxel_id].num; i++)
{
if (grid[voxel_id].particle_id[i] == particle_id)
{
continue;
}
//pi - pj
g = particlePos - predict_positions[grid[voxel_id].particle_id[i]];
distance = glm::length(g);
if (distance > H)
{
continue;
}
density += SmoothKernel(distance, H);
g = gradientSmoothKernel(g, H);
gradient += g;
gradient2 += glm::dot(g,g);
}
//correct version should update based on gradient matrix of Kernal
//update all positions together
return density;
}
__device__
void fluidNeighbourEnforce(float* lambda, //int * mutex,
int num_voxel, float diameter, float H, float oneOverRho, int particle_id, glm::vec3 particlePos, float particleLambda, int voxel_id, glm::vec3 * predict_positions, glm::vec3 * delta_positions,
Particle * particles, Voxel * grid, int * dev_n)
{
if (voxel_id < 0 || voxel_id >= num_voxel)
{
//voxel_id is invalid
return;
}
int gridSize = grid[voxel_id].num, gridPartId;
glm::vec3 d;
float delta_w;
for (int i = 0; i < gridSize; i++)
{
gridPartId = grid[voxel_id].particle_id[i];
if (gridPartId == particle_id)
{
continue;
}
// Distance vector from particle i to particle j (on particle centers)
d = particlePos - predict_positions[gridPartId];
if (glm::length(d) > H)
{
continue;
}
delta_w = oneOverRho * (particleLambda + lambda[gridPartId]);
dev_n[particle_id] += 1;
//gradent of W(pi-pj)
delta_positions[particle_id] += delta_w * (gradientSmoothKernel(d, H));
}
}
//---------------------------------------------
// Collision constraints handler
__global__
void handleCollision(int N, int num_voxel, float diameter, int * fluidLoop, int fluidLoopSize, int * hitLoop, int hitLoopSize, float* lambda,//int * mutex,
glm::vec3 * predictPositions, glm::vec3 * deltaPositions, Particle * particles,Voxel * grid, int * ids, float delta_t, int * dev_n)
{
int particle_id = blockDim.x * blockIdx.x + threadIdx.x;
if (particle_id < N)
{
// Collision detection & reaction; simplified SDF constraint
// hitTest particles in neighbour voxel
// float particleInvmass
float density = particles[particle_id].invmass;
int particlePhase = particles[particle_id].phase;
int i, idSum = ids[particle_id];
glm::vec3 particlePos = predictPositions[particle_id];
for (i = 0; i < hitLoopSize; i++)
{
hitTestVoxelSolid(num_voxel, diameter, particle_id, particlePhase, density, particlePos,
idSum + hitLoop[i],
predictPositions, deltaPositions, particles, grid, dev_n);
}
if (particles[particle_id].type == FLUID)
{
density = 0.0f;
float H = getH(diameter);
glm::vec3 sum_gradient(0.0f);
float sum_gradient2 = 0.0f;
//first loop used to get the sum of density rho_i, sum of gradient
//fluid density constraint
for (i = 0; i < fluidLoopSize; i++)
{
density += fluidInfoSum(sum_gradient, sum_gradient2,
num_voxel, diameter, H, particle_id, particlePos,
idSum + fluidLoop[i],
predictPositions, deltaPositions, particles, grid, dev_n);
}
// when density / rho_0 -1.0f > 0 , move
// i.e. when lambda < 0, move
// float ci
H = density / getRHO0(diameter) - 1.0f;
// float denominator
density = sum_gradient2 + glm::dot(sum_gradient, sum_gradient) + LAMBDA_EPSILON;
// float lambda_i
H = -20.0f * H / density;
lambda[particle_id] = min(0.0f, H);
}
}
}
// fluid apply delta; retargeting
__global__
void FluidApplyLambdaDelta(int N, int num_voxel, float diameter, int * fluidLoop, int loopSize, float* lambda,//int * mutex,
glm::vec3 * predictPositions, glm::vec3 * deltaPositions, Particle * particles, Voxel * grid, int * ids, float delta_t, int * dev_n)
{
int particle_id = blockDim.x * blockIdx.x + threadIdx.x;
if (particle_id < N)
{
// Collision detection & reaction; simplified SDF constraint
// hitTest particles in neighbour voxel
if (particles[particle_id].type == FLUID)
{
int idSum = ids[particle_id];
glm::vec3 particlePos = predictPositions[particle_id];
float particleLambda = lambda[particle_id];
float H = getH(diameter);
float oneOverRho = 1.0f / getRHO0(diameter);
for (int i = 0; i < loopSize; i++)
{
fluidNeighbourEnforce(lambda,//mutex,
num_voxel, diameter, H, oneOverRho, particle_id, particlePos, particleLambda,
idSum + fluidLoop[i],
predictPositions, deltaPositions, particles, grid, dev_n);
}
}
}
}
// Shape matching matrix A components
__global__
void setAValue(int base, int N, glm::mat3 * Apq, glm::vec3 * x0 , glm::vec3 * predict_x, glm::vec3 cm, glm::vec3 cm0)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < N)
{
//treat every particle in one rigid body has the same mass
Apq[tid] = glm::outerProduct(predict_x[tid + base] - cm
, x0[tid + base] - cm0);
}
}
__global__
void shapeMatching(int base, int size, glm::vec3 * delta_positions, glm::vec3 * predictions, glm::vec3 *x0, glm::vec3 cm0, glm::vec3 cm, glm::mat3 Apq, int * dev_n){
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < size){
threadId += base;
glm::mat3 R(0.0f), ROOT(0.0f);
glm::mat3 A = glm::transpose(Apq)*Apq;
// DenmanCBeavers iteration
// https://en.wikipedia.org/wiki/Square_root_of_a_matrix
glm::mat3 Y = A, Z(1.0f);
//older 8
for (int i = 0; i < 16; i++){
Y = 0.5f*(Y + glm::inverse(Z));
Z = 0.5f*(Z + glm::inverse(Y));
}
ROOT = Y;
// https://en.wikipedia.org/wiki/Polar_decomposition
R = Apq * glm::inverse(ROOT);
// Delta X for shape matching
delta_positions[threadId] += R * (x0[threadId] - cm0) + cm - predictions[threadId];
dev_n[threadId]++;
}
}
__global__
void applyDelta(glm::vec3 * predictions, const glm::vec3 * delta, const int * n, const int num_particles){
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < num_particles){
// Average and update
float nd = n[threadId] == 0 ? 1.0 : (float)n[threadId];
predictions[threadId] += delta[threadId] / nd;
}
}
// Only need for center-of-mass calculation
__global__
void applyDeltaForCM(glm::vec3 * predictions, const glm::vec3 * delta, const int * n, const int num_particles, int base){
//for one rigid body
//predictions here are temporary, calculating for cm only
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < num_particles){
// Average and update
int offset = threadId + base;
float nd = n[offset] == 0 ? 1.0 : (float)n[offset];
predictions[threadId] += delta[offset] / nd;
}
}
__global__
void updatePositionFloatArray(int N, glm::vec3 * predictions, Particle * particles, float * positions, const float delta_t)
{
//N = num of particles
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < N)
{
glm::vec3 delta_d = predictions[threadId] - particles[threadId].x;
// Update velocity
particles[threadId].v = delta_d / delta_t;
// Particle sleeping
// Truncate super small values so avoid if-statement
particles[threadId].x = particles[threadId].x + glm::trunc(delta_d*SPLEEFING_COFF) / SPLEEFING_COFF;
//if (particles[threadId].type == SOLID)
//{
// particles[threadId].x = particles[threadId].x + glm::trunc(delta_d*SPLEEFING_COFF) / SPLEEFING_COFF;
//}
//else
//{
// particles[threadId].x = particles[threadId].x + delta_d;
//}
// Update positions
positions[3 * threadId] = particles[threadId].x.x;
positions[3 * threadId + 1] = particles[threadId].x.y;
positions[3 * threadId + 2] = particles[threadId].x.z;
}
}
void simulate(const glm::vec3 forces, const float delta_t, float * opengl_buffer, RigidBody * rigidBody)
{
hipMemset(dev_deltaPosition, 0, num_particles * sizeof(glm::vec3));
hipMemset(dev_n, 0, num_particles * sizeof(int));
const int blockSizer = 192;
dim3 blockCountr((num_particles + blockSizer - 1) / blockSizer);
checkCUDAError("ERROR: LOL");
//apply forces
kernApplyForces << <blockCountr, blockSizer >> >(num_particles, dev_particles, dev_predictPosition, forces, delta_t);
checkCUDAError("ERROR: apply forces update");
//update voxel index
//clean
int num_voxel = grid_resolution.x * grid_resolution.y * grid_resolution.z;
hipMemset(dev_grid, 0, num_voxel * sizeof(Voxel));
hipMemset(dev_particle_voxel_id, 0, num_particles * sizeof(int));
//update
updateVoxelIndex << <blockCountr, blockSizer >> >(num_particles, grid_resolution, grid_min_x, grid_length, dev_predictPosition, dev_grid, dev_particle_voxel_id);
//updateVoxelIndexBefore << <blockCountr, blockSizer >> >(num_particles, grid_resolution, grid_min_x, grid_length, dev_positions, dev_grid, dev_particle_voxel_id);
checkCUDAError("ERROR: updateVoxelIndex");
const int blockSizer2 = 128;
//detect collisions and generate collision constraints
//for fluid, get density and constraints
handleCollision << <blockCountr, blockSizer >> >(num_particles, num_voxel, grid_length * 0.99f, dev_loopIdx_fluid, loopSize_fluid, dev_loopIdx_hit, loopSize_hit, dev_lambda,//dev_mutex,
dev_predictPosition, dev_deltaPosition, dev_particles, dev_grid, dev_particle_voxel_id, delta_t, dev_n);
checkCUDAError("ERROR: handle collision");
//hipDeviceSynchronize();
FluidApplyLambdaDelta << <blockCountr, blockSizer >> >(num_particles, num_voxel, grid_length * 0.99f, dev_loopIdx_fluid, loopSize_fluid, dev_lambda,//dev_mutex,
dev_predictPosition, dev_deltaPosition, dev_particles, dev_grid, dev_particle_voxel_id, delta_t, dev_n);
checkCUDAError("ERROR: handle collision");
//hipDeviceSynchronize();
//---- Shape matching constraint --------
int base = 0;
for (int i = 0; i < num_rigidBodies; i++)
{
ParticleType body_type = rigidBody[i].getType();
int size = rigidBody[i].m_particles.size();
dim3 blockCountrPerRigidBody((size + blockSizer - 1) / blockSizer);
dim3 blockCountrPerRigidBody2((size + blockSizer2 - 1) / blockSizer2);
//generate constraints
if (body_type == SOLID)
{
//Rigid solid body part
//TODO: turn into standard constraint method
if (rigidBody[i].getInvMassScale() < FLT_EPSILON)
{
//static object, no need for shape matching
base += size;
continue;
}
thrust::device_vector<glm::mat3> dev_Apq(size);
glm::mat3 * dev_Apq_ptr = thrust::raw_pointer_cast(&dev_Apq[0]);
//calculate current cm
thrust::device_vector<glm::vec3> dev_px(size); //predict position
//glm::vec3 * dev_px_ptr = thrust::raw_pointer_cast(&dev_px[0]);
//hipMemcpy(dev_px_ptr, dev_predictPosition + base, size * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
thrust::device_ptr<glm::vec3> dev_predict_base(dev_predictPosition);
thrust::copy_n(dev_predict_base + base, size, dev_px.begin());
//update collision delta for calculating cm
glm::vec3 * dev_px_ptr = thrust::raw_pointer_cast(&dev_px[0]);
applyDeltaForCM << <blockCountrPerRigidBody, blockSizer >> >(dev_px_ptr, dev_deltaPosition, dev_n, size, base);
glm::vec3 cm = thrust::reduce(dev_px.begin(), dev_px.end(), glm::vec3(0.0), thrust::plus<glm::vec3>());
cm = cm / ((float)size);
//calculate A matrix
// Pre-process; calculate individual outer products
setAValue << <blockCountrPerRigidBody, blockSizer >> >(base, size, dev_Apq_ptr, dev_particle_x0, dev_predictPosition,
cm, hst_cm0[i]);
glm::mat3 Apq = thrust::reduce(dev_Apq.begin(), dev_Apq.end(), glm::mat3(0.0), thrust::plus<glm::mat3>());
//modify predict positions
// Also find A and R within the kernel
shapeMatching << <blockCountrPerRigidBody2, blockSizer2 >> >(base, size, dev_deltaPosition, dev_predictPosition, dev_particle_x0, hst_cm0[i], cm, Apq, dev_n);
}
//next body
base += size;
}
applyDelta << <blockCountr, blockSizer >> >(dev_predictPosition, dev_deltaPosition, dev_n, num_particles);
//update to position float array
updatePositionFloatArray << <blockCountr, blockSizer >> >(num_particles, dev_predictPosition, dev_particles, dev_positions, delta_t);
checkCUDAError("ERROR: copy to dev_position");
hipMemcpy(opengl_buffer, dev_positions, 3 * num_particles * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError("ERROR: copy to opengl_buffer");
} | c7abbe8a84eb232027d96adc62857ae387fdd83a.cu | #include <cmath>
#include <cstdio>
#include <cuda.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <cuda_runtime.h>
#include <util/checkCUDAError.h>
#include <glm/gtx/transform.hpp>
#include "simulate.h"
#include <vector>
#include "fluid.h"
#define H_KERNAL_WIDTH (0.1f)
#define NEIGHBOUR_R (2)
#define HITTEST_R (1)
#define LAMBDA_EPSILON (0.0f)
#define SPLEEFING_COFF (1000.0f)
#define RHO0 (1.0f)
static int num_rigidBodies;
static int num_particles;
static Particle * dev_particles;
static glm::vec3 * dev_predictPosition;
static glm::vec3 * dev_deltaPosition;
static int * dev_n;
static float * dev_positions;
// Pre-calculated local hit test loop indices
static int loopSize_fluid = (NEIGHBOUR_R * 2 + 1)*(NEIGHBOUR_R * 2 + 1)*(NEIGHBOUR_R * 2 + 1);
static int loopSize_hit = (HITTEST_R * 2 + 1)*(HITTEST_R * 2 + 1)*(HITTEST_R * 2 + 1);
static int * dev_loopIdx_fluid;
static int * dev_loopIdx_hit;
static float grid_length;
static glm::ivec3 grid_resolution;
static glm::vec3 grid_min_x;
static glm::vec3 grid_max_x;
static Voxel * dev_grid;
static int * dev_particle_voxel_id;
static float * dev_lambda;
//lock per particle
//static int * dev_mutex;
//--------data for shape rematching--------------
//struct RigidBodyWrapper
//{
// int base; // first particle id
// int size; // size of particle
// glm::vec3 cm_0; //center of mass of original
//};
//__constant__ static RigidBodyWrapper* dev_rigidBodyWrappers;
glm::vec3 * hst_cm0 = NULL;
//__constant__ static glm::vec3* dev_rigid_body_cm_0; //center mass origin
__constant__ static glm::vec3* dev_particle_x0;
//static glm::vec3* dev_particle_x0;
//-----------------------------------------------
//void initSimulate(int num_rigidBody, RigidBody * rigidbodys, glm::vec3 bmin, glm::vec3 bmax, float particle_diameter)
//{
// assembleParticleArray(num_rigidBody, rigidbodys);
// initUniformGrid(bmin, bmax, particle_diameter);
//}
// Particle transformation
__global__
void transformParticlePositionPerRigidBody(int base,int size, Particle * particles, glm::vec3* x0,glm::mat4 mat){
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < size){
threadId += base;
x0[threadId] = particles[threadId].x;
glm::vec4 tmp = mat * glm::vec4(particles[threadId].x, 1.0f);
tmp /= tmp.w;
particles[threadId].x = glm::vec3(tmp.x, tmp.y, tmp.z);
}
}
// Initial assembly of information
void assembleParticleArray(int num_rigidBody, RigidBody * rigidbodys)
{
hst_cm0 = new glm::vec3[num_rigidBody];
num_rigidBodies = num_rigidBody;
num_particles = 0;
for (int i = 0; i < num_rigidBody; i++)
{
num_particles += rigidbodys[i].m_particles.size();
}
cudaMalloc(&dev_particles, num_particles * sizeof(Particle));
cudaMalloc(&dev_predictPosition, num_particles * sizeof(glm::vec3));
cudaMemset(dev_predictPosition, 0, num_particles * sizeof(glm::vec3));
cudaMalloc(&dev_deltaPosition, num_particles * sizeof(glm::vec3));
cudaMalloc(&dev_n, num_particles * sizeof(int));
cudaMalloc(&dev_positions, 3 * num_particles * sizeof(float));
cudaMemset(dev_positions, 0, 3 * num_particles * sizeof(float));
checkCUDAError("ERROR: cudaMalloc");
cudaMalloc(&dev_particle_x0, num_particles * sizeof(glm::vec3));
//lambda
cudaMalloc(&dev_lambda, num_particles * sizeof(float));
cudaMemset(dev_lambda, 0, num_particles * sizeof(float));
//lock
//cudaMalloc(&dev_mutex, num_particles * sizeof(int));
//cudaMemset(dev_mutex, 0, num_particles * sizeof(int));
int cur = 0;
//glm::vec3 * hst_cm0 = new glm::vec3[num_rigidBody];
for (int i = 0; i < num_rigidBody; i++)
{
hst_cm0[i] = rigidbodys[i].getCenterOfMass();
// Particle objects
int size = rigidbodys[i].m_particles.size();
cudaMemcpy(dev_particles + cur, rigidbodys[i].m_particles.data(), size * sizeof(Particle), cudaMemcpyHostToDevice);
const int blockSizer = 192;
dim3 blockCountr((size + blockSizer - 1) / blockSizer);
transformParticlePositionPerRigidBody << <blockCountr, blockSizer >> >(cur, size, dev_particles, dev_particle_x0, rigidbodys[i].getTransformMatrix());
// Initialize rest config positions
//cudaMemcpy(dev_particle_x0 + cur, rigidbodys[i].m_x0.data(), size * sizeof(glm::vec3), cudaMemcpyHostToDevice);
cur += size;
}
//cudaMalloc(&dev_rigid_body_cm_0, num_rigidBody * sizeof(glm::vec3));
//cudaMemcpy(dev_rigid_body_cm_0, hst_cm0, num_rigidBody * sizeof(glm::vec3), cudaMemcpyHostToDevice);
//delete []hst_cm0;
checkCUDAError("ERROR: assemble particle array");
}
// Initialize hit test loop indices
__global__
void initTestLoopIdx(int * fluidLoop, int * hitLoop, const glm::ivec3 resolution){
int x, y, z, idSum2, idSum3, yTimesZ = resolution.y * resolution.z, j = 0;
for (x = -NEIGHBOUR_R; x <= NEIGHBOUR_R; x++)
{
idSum2 = x * yTimesZ;
for (y = -NEIGHBOUR_R; y <= NEIGHBOUR_R; y++)
{
idSum3 = idSum2 + y * resolution.z;
for (z = -NEIGHBOUR_R; z <= NEIGHBOUR_R; z++)
{
fluidLoop[j] = idSum3 + z;
j++;
}
}
}
j = 0;
for (x = -HITTEST_R; x <= HITTEST_R; x++)
{
idSum2 = x * yTimesZ;
for (y = -HITTEST_R; y <= HITTEST_R; y++)
{
idSum3 = idSum2 + y * resolution.z;
for (z = -HITTEST_R; z <= HITTEST_R; z++)
{
hitLoop[j] = idSum3 + z;
j++;
}
}
}
}
// Initialize voxel grid
void initUniformGrid(glm::vec3 bmin, glm::vec3 bmax, float diameter)
{
//init size
grid_min_x = bmin;
grid_max_x = bmax;
grid_length = diameter;
grid_resolution = ceil((grid_max_x - grid_min_x) / grid_length);
int grid_size = grid_resolution.x * grid_resolution.y * grid_resolution.z;
cudaMalloc(&dev_grid, grid_size * sizeof(Voxel));
cudaMemset(dev_grid, 0, grid_size * sizeof(Voxel));
cudaMalloc(&dev_particle_voxel_id, num_particles *sizeof(int));
cudaMalloc(&dev_loopIdx_fluid, loopSize_fluid *sizeof(int));
cudaMalloc(&dev_loopIdx_hit, loopSize_hit *sizeof(int));
initTestLoopIdx << <1, 1 >> >(dev_loopIdx_fluid, dev_loopIdx_hit, grid_resolution);
}
void endSimulation()
{
cudaFree(dev_particles);
cudaFree(dev_predictPosition);
cudaFree(dev_deltaPosition);
cudaFree(dev_n);
cudaFree(dev_positions);
cudaFree(dev_grid);
cudaFree(dev_particle_voxel_id);
cudaFree(dev_particle_x0);
cudaFree(dev_lambda);
cudaFree(dev_loopIdx_fluid);
cudaFree(dev_loopIdx_hit);
//lock
//cudaFree(dev_mutex);
if (hst_cm0 != NULL)
{
delete []hst_cm0;
hst_cm0 = NULL;
}
}
__device__
glm::ivec3 gridMap(glm::vec3 x, glm::vec3 min_x, float grid_length)
{
return (glm::ivec3)(floor((x - min_x) / grid_length));
}
__global__
void updateVoxelIndex(int N , glm::ivec3 grid_resolution, glm::vec3 min_x, float grid_length, glm::vec3 * particlePositions, Voxel * grid, int * ids )
{
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < N)
{
glm::ivec3 coordinate = gridMap(particlePositions[threadId], min_x, grid_length);
//outof simulate area
if (coordinate.x >= grid_resolution.x || coordinate.x < 0
|| coordinate.y >= grid_resolution.y || coordinate.y < 0
|| coordinate.z >= grid_resolution.z || coordinate.z < 0)
{
//don't assign to vertex
//printf("out of simulation region\n"); //test
return;
}
int voxel_id = coordinate.x * grid_resolution.y * grid_resolution.z
+ coordinate.y * grid_resolution.z
+ coordinate.z;
//not taken into account when n > NUM_PRIACTICLE_VOXEL ?
grid[voxel_id].particle_id[grid[voxel_id].num] = threadId;
grid[voxel_id].num += 1;
ids[threadId] = voxel_id;
}
}
// Prediction using simple force-based translation
__global__
void kernApplyForces(int N, Particle * particles, glm::vec3 * predictPosition, const glm::vec3 forces, const float delta_t)
{
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < N)
{
//apply forces
particles[threadId].v += particles[threadId].invmass * forces * delta_t;
//predict positions
predictPosition[threadId] = particles[threadId].x + particles[threadId].v * delta_t;
}
}
// Particle hit test for a single particle
__device__
void hitTestVoxelSolid(const int num_voxel, const float diameter, const int particle_id, const int particlePhase, const float particleInvmass, const glm::vec3 particlePos,
const int voxel_id, const glm::vec3 * predict_positions, glm::vec3 * delta_positions,
const Particle * particles, const Voxel * grid, int * dev_n)
{
if (voxel_id < 0 || voxel_id >= num_voxel)
{
//voxel_id is invalid
return;
}
// Delta X for collision
glm::vec3 delta_pos(0.0), d;
// Average count
int n = 0;
float delta;
for (int i = 0; i < grid[voxel_id].num; i++)
{
if (particles[grid[voxel_id].particle_id[i]].phase == particlePhase)
{
continue;
}
// Distance vector from particle i to particle j (on particle centers)
d = predict_positions[grid[voxel_id].particle_id[i]] - particlePos;
delta = diameter - glm::length(d);
// If particles overlap
if (0 > delta)
{
continue;
}
// Momentum weighing based on particle mass
// Not true momentum, but approximation
delta = particles[particle_id].type == FLUID ? -delta : delta * (-particleInvmass / (particleInvmass + particles[grid[voxel_id].particle_id[i]].invmass));
n++;
// Move particle i along the vector so that i and j are in the post-collision states
delta_pos += glm::normalize(d) * delta;
}
// Apply average delta X position (treat as results of constraint solver)
delta_positions[particle_id] += delta_pos;
dev_n[particle_id] += n;
}
// ------------------fluid---------------------
__device__
inline float getH(float diameter)
{
//return ((float)NEIGHBOUR_R + 0.5f) * diameter;
return 2.5f * diameter;
//return (float)NEIGHBOUR_R * diameter;
}
__device__
inline float getRHO0(float diameter)
{
return 0.7f * 1.0f / powf(diameter / 0.99f, 3.0f);
}
__device__
inline float SmoothKernel(float r, float h)
{
//poly 6 kernel
return r > h ? 0.0f : 315.0f / 64.0f / (float)PI / powf(h, 9.0f) * powf(h*h - r*r, 3.0f);
//for test
//float res = r > h ? 0.0f : 315.0f / 64.0f / (float)PI / powf(h, 9.0f) * powf(h*h - r*r, 3.0f);
//printf("%f,%f\tres:%f\n", r, h, res);
//return res;
//nearest neighbour
//return 1.0f / glm::dot(r,r);
}
__device__
inline glm::vec3 gradientSmoothKernel(glm::vec3 vec_r, float h)
{
//r = || pi - pj||
float r = glm::length(vec_r);
//spiky kernel gradient
return r>h ? glm::vec3(0.0f) : (-45.0f) / (float)PI / powf(h, 6.0f) * powf(h-r,2.0f) * glm::normalize(vec_r);
//return glm::normalize(vec_r);
//tmp
//return 1.0f;
}
__device__
float fluidInfoSum(glm::vec3 & gradient, float & gradient2,
int num_voxel, float diameter, float H, int particle_id, glm::vec3 particlePos, int voxel_id, glm::vec3 * predict_positions, glm::vec3 * delta_positions,
Particle * particles, Voxel * grid, int * dev_n)
{
if (voxel_id < 0 || voxel_id >= num_voxel)
{
//voxel_id is invalid
return;
}
glm::vec3 g;
float density = 0.0f; //for particles in this voxel
float distance;
for (int i = 0; i < grid[voxel_id].num; i++)
{
if (grid[voxel_id].particle_id[i] == particle_id)
{
continue;
}
//pi - pj
g = particlePos - predict_positions[grid[voxel_id].particle_id[i]];
distance = glm::length(g);
if (distance > H)
{
continue;
}
density += SmoothKernel(distance, H);
g = gradientSmoothKernel(g, H);
gradient += g;
gradient2 += glm::dot(g,g);
}
//correct version should update based on gradient matrix of Kernal
//update all positions together
return density;
}
__device__
void fluidNeighbourEnforce(float* lambda, //int * mutex,
int num_voxel, float diameter, float H, float oneOverRho, int particle_id, glm::vec3 particlePos, float particleLambda, int voxel_id, glm::vec3 * predict_positions, glm::vec3 * delta_positions,
Particle * particles, Voxel * grid, int * dev_n)
{
if (voxel_id < 0 || voxel_id >= num_voxel)
{
//voxel_id is invalid
return;
}
int gridSize = grid[voxel_id].num, gridPartId;
glm::vec3 d;
float delta_w;
for (int i = 0; i < gridSize; i++)
{
gridPartId = grid[voxel_id].particle_id[i];
if (gridPartId == particle_id)
{
continue;
}
// Distance vector from particle i to particle j (on particle centers)
d = particlePos - predict_positions[gridPartId];
if (glm::length(d) > H)
{
continue;
}
delta_w = oneOverRho * (particleLambda + lambda[gridPartId]);
dev_n[particle_id] += 1;
//gradent of W(pi-pj)
delta_positions[particle_id] += delta_w * (gradientSmoothKernel(d, H));
}
}
//---------------------------------------------
// Collision constraints handler
__global__
void handleCollision(int N, int num_voxel, float diameter, int * fluidLoop, int fluidLoopSize, int * hitLoop, int hitLoopSize, float* lambda,//int * mutex,
glm::vec3 * predictPositions, glm::vec3 * deltaPositions, Particle * particles,Voxel * grid, int * ids, float delta_t, int * dev_n)
{
int particle_id = blockDim.x * blockIdx.x + threadIdx.x;
if (particle_id < N)
{
// Collision detection & reaction; simplified SDF constraint
// hitTest particles in neighbour voxel
// float particleInvmass
float density = particles[particle_id].invmass;
int particlePhase = particles[particle_id].phase;
int i, idSum = ids[particle_id];
glm::vec3 particlePos = predictPositions[particle_id];
for (i = 0; i < hitLoopSize; i++)
{
hitTestVoxelSolid(num_voxel, diameter, particle_id, particlePhase, density, particlePos,
idSum + hitLoop[i],
predictPositions, deltaPositions, particles, grid, dev_n);
}
if (particles[particle_id].type == FLUID)
{
density = 0.0f;
float H = getH(diameter);
glm::vec3 sum_gradient(0.0f);
float sum_gradient2 = 0.0f;
//first loop used to get the sum of density rho_i, sum of gradient
//fluid density constraint
for (i = 0; i < fluidLoopSize; i++)
{
density += fluidInfoSum(sum_gradient, sum_gradient2,
num_voxel, diameter, H, particle_id, particlePos,
idSum + fluidLoop[i],
predictPositions, deltaPositions, particles, grid, dev_n);
}
// when density / rho_0 -1.0f > 0 , move
// i.e. when lambda < 0, move
// float ci
H = density / getRHO0(diameter) - 1.0f;
// float denominator
density = sum_gradient2 + glm::dot(sum_gradient, sum_gradient) + LAMBDA_EPSILON;
// float lambda_i
H = -20.0f * H / density;
lambda[particle_id] = min(0.0f, H);
}
}
}
// fluid apply delta; retargeting
__global__
void FluidApplyLambdaDelta(int N, int num_voxel, float diameter, int * fluidLoop, int loopSize, float* lambda,//int * mutex,
glm::vec3 * predictPositions, glm::vec3 * deltaPositions, Particle * particles, Voxel * grid, int * ids, float delta_t, int * dev_n)
{
int particle_id = blockDim.x * blockIdx.x + threadIdx.x;
if (particle_id < N)
{
// Collision detection & reaction; simplified SDF constraint
// hitTest particles in neighbour voxel
if (particles[particle_id].type == FLUID)
{
int idSum = ids[particle_id];
glm::vec3 particlePos = predictPositions[particle_id];
float particleLambda = lambda[particle_id];
float H = getH(diameter);
float oneOverRho = 1.0f / getRHO0(diameter);
for (int i = 0; i < loopSize; i++)
{
fluidNeighbourEnforce(lambda,//mutex,
num_voxel, diameter, H, oneOverRho, particle_id, particlePos, particleLambda,
idSum + fluidLoop[i],
predictPositions, deltaPositions, particles, grid, dev_n);
}
}
}
}
// Shape matching matrix A components
__global__
void setAValue(int base, int N, glm::mat3 * Apq, glm::vec3 * x0 , glm::vec3 * predict_x, glm::vec3 cm, glm::vec3 cm0)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < N)
{
//treat every particle in one rigid body has the same mass
Apq[tid] = glm::outerProduct(predict_x[tid + base] - cm
, x0[tid + base] - cm0);
}
}
__global__
void shapeMatching(int base, int size, glm::vec3 * delta_positions, glm::vec3 * predictions, glm::vec3 *x0, glm::vec3 cm0, glm::vec3 cm, glm::mat3 Apq, int * dev_n){
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < size){
threadId += base;
glm::mat3 R(0.0f), ROOT(0.0f);
glm::mat3 A = glm::transpose(Apq)*Apq;
// Denman¨CBeavers iteration
// https://en.wikipedia.org/wiki/Square_root_of_a_matrix
glm::mat3 Y = A, Z(1.0f);
//older 8
for (int i = 0; i < 16; i++){
Y = 0.5f*(Y + glm::inverse(Z));
Z = 0.5f*(Z + glm::inverse(Y));
}
ROOT = Y;
// https://en.wikipedia.org/wiki/Polar_decomposition
R = Apq * glm::inverse(ROOT);
// Delta X for shape matching
delta_positions[threadId] += R * (x0[threadId] - cm0) + cm - predictions[threadId];
dev_n[threadId]++;
}
}
__global__
void applyDelta(glm::vec3 * predictions, const glm::vec3 * delta, const int * n, const int num_particles){
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < num_particles){
// Average and update
float nd = n[threadId] == 0 ? 1.0 : (float)n[threadId];
predictions[threadId] += delta[threadId] / nd;
}
}
// Only need for center-of-mass calculation
__global__
void applyDeltaForCM(glm::vec3 * predictions, const glm::vec3 * delta, const int * n, const int num_particles, int base){
//for one rigid body
//predictions here are temporary, calculating for cm only
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < num_particles){
// Average and update
int offset = threadId + base;
float nd = n[offset] == 0 ? 1.0 : (float)n[offset];
predictions[threadId] += delta[offset] / nd;
}
}
__global__
void updatePositionFloatArray(int N, glm::vec3 * predictions, Particle * particles, float * positions, const float delta_t)
{
//N = num of particles
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < N)
{
glm::vec3 delta_d = predictions[threadId] - particles[threadId].x;
// Update velocity
particles[threadId].v = delta_d / delta_t;
// Particle sleeping
// Truncate super small values so avoid if-statement
particles[threadId].x = particles[threadId].x + glm::trunc(delta_d*SPLEEFING_COFF) / SPLEEFING_COFF;
//if (particles[threadId].type == SOLID)
//{
// particles[threadId].x = particles[threadId].x + glm::trunc(delta_d*SPLEEFING_COFF) / SPLEEFING_COFF;
//}
//else
//{
// particles[threadId].x = particles[threadId].x + delta_d;
//}
// Update positions
positions[3 * threadId] = particles[threadId].x.x;
positions[3 * threadId + 1] = particles[threadId].x.y;
positions[3 * threadId + 2] = particles[threadId].x.z;
}
}
void simulate(const glm::vec3 forces, const float delta_t, float * opengl_buffer, RigidBody * rigidBody)
{
cudaMemset(dev_deltaPosition, 0, num_particles * sizeof(glm::vec3));
cudaMemset(dev_n, 0, num_particles * sizeof(int));
const int blockSizer = 192;
dim3 blockCountr((num_particles + blockSizer - 1) / blockSizer);
checkCUDAError("ERROR: LOL");
//apply forces
kernApplyForces << <blockCountr, blockSizer >> >(num_particles, dev_particles, dev_predictPosition, forces, delta_t);
checkCUDAError("ERROR: apply forces update");
//update voxel index
//clean
int num_voxel = grid_resolution.x * grid_resolution.y * grid_resolution.z;
cudaMemset(dev_grid, 0, num_voxel * sizeof(Voxel));
cudaMemset(dev_particle_voxel_id, 0, num_particles * sizeof(int));
//update
updateVoxelIndex << <blockCountr, blockSizer >> >(num_particles, grid_resolution, grid_min_x, grid_length, dev_predictPosition, dev_grid, dev_particle_voxel_id);
//updateVoxelIndexBefore << <blockCountr, blockSizer >> >(num_particles, grid_resolution, grid_min_x, grid_length, dev_positions, dev_grid, dev_particle_voxel_id);
checkCUDAError("ERROR: updateVoxelIndex");
const int blockSizer2 = 128;
//detect collisions and generate collision constraints
//for fluid, get density and constraints
handleCollision << <blockCountr, blockSizer >> >(num_particles, num_voxel, grid_length * 0.99f, dev_loopIdx_fluid, loopSize_fluid, dev_loopIdx_hit, loopSize_hit, dev_lambda,//dev_mutex,
dev_predictPosition, dev_deltaPosition, dev_particles, dev_grid, dev_particle_voxel_id, delta_t, dev_n);
checkCUDAError("ERROR: handle collision");
//cudaDeviceSynchronize();
FluidApplyLambdaDelta << <blockCountr, blockSizer >> >(num_particles, num_voxel, grid_length * 0.99f, dev_loopIdx_fluid, loopSize_fluid, dev_lambda,//dev_mutex,
dev_predictPosition, dev_deltaPosition, dev_particles, dev_grid, dev_particle_voxel_id, delta_t, dev_n);
checkCUDAError("ERROR: handle collision");
//cudaDeviceSynchronize();
//---- Shape matching constraint --------
int base = 0;
for (int i = 0; i < num_rigidBodies; i++)
{
ParticleType body_type = rigidBody[i].getType();
int size = rigidBody[i].m_particles.size();
dim3 blockCountrPerRigidBody((size + blockSizer - 1) / blockSizer);
dim3 blockCountrPerRigidBody2((size + blockSizer2 - 1) / blockSizer2);
//generate constraints
if (body_type == SOLID)
{
//Rigid solid body part
//TODO: turn into standard constraint method
if (rigidBody[i].getInvMassScale() < FLT_EPSILON)
{
//static object, no need for shape matching
base += size;
continue;
}
thrust::device_vector<glm::mat3> dev_Apq(size);
glm::mat3 * dev_Apq_ptr = thrust::raw_pointer_cast(&dev_Apq[0]);
//calculate current cm
thrust::device_vector<glm::vec3> dev_px(size); //predict position
//glm::vec3 * dev_px_ptr = thrust::raw_pointer_cast(&dev_px[0]);
//cudaMemcpy(dev_px_ptr, dev_predictPosition + base, size * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
thrust::device_ptr<glm::vec3> dev_predict_base(dev_predictPosition);
thrust::copy_n(dev_predict_base + base, size, dev_px.begin());
//update collision delta for calculating cm
glm::vec3 * dev_px_ptr = thrust::raw_pointer_cast(&dev_px[0]);
applyDeltaForCM << <blockCountrPerRigidBody, blockSizer >> >(dev_px_ptr, dev_deltaPosition, dev_n, size, base);
glm::vec3 cm = thrust::reduce(dev_px.begin(), dev_px.end(), glm::vec3(0.0), thrust::plus<glm::vec3>());
cm = cm / ((float)size);
//calculate A matrix
// Pre-process; calculate individual outer products
setAValue << <blockCountrPerRigidBody, blockSizer >> >(base, size, dev_Apq_ptr, dev_particle_x0, dev_predictPosition,
cm, hst_cm0[i]);
glm::mat3 Apq = thrust::reduce(dev_Apq.begin(), dev_Apq.end(), glm::mat3(0.0), thrust::plus<glm::mat3>());
//modify predict positions
// Also find A and R within the kernel
shapeMatching << <blockCountrPerRigidBody2, blockSizer2 >> >(base, size, dev_deltaPosition, dev_predictPosition, dev_particle_x0, hst_cm0[i], cm, Apq, dev_n);
}
//next body
base += size;
}
applyDelta << <blockCountr, blockSizer >> >(dev_predictPosition, dev_deltaPosition, dev_n, num_particles);
//update to position float array
updatePositionFloatArray << <blockCountr, blockSizer >> >(num_particles, dev_predictPosition, dev_particles, dev_positions, delta_t);
checkCUDAError("ERROR: copy to dev_position");
cudaMemcpy(opengl_buffer, dev_positions, 3 * num_particles * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("ERROR: copy to opengl_buffer");
} |
cee57508f39a2a34998143905f3938fad9f25415.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "get_hausdorff_dis_gpu.h"
#include "cuda_utils.h"
#define gt_num 42
#define voxel_dim 31
#define dict_grid_num (voxel_dim*voxel_dim*voxel_dim)
#define prior_point_num 9
__global__ void get_hausdorff_dis_kernel_fast(const float *__restrict__ neighbor_points,
float *__restrict__ features, float radius,
int batch_size, int whole_point_num,
int keypoint_num, int neighbor_point_num,
const float* __restrict__ prior_points,
const float* __restrict__ dis_dicts,
float voxel_len, hipStream_t stream){
// whole_points: B N C
// keypoints: B M C
// neighbor_points: B M nsample C
// prior_points: Nshapes Npoints_per_shape Cxyz
// dis_dicts: Nshapes Ngrid Cxyz
// output:
// features: batch_size Nshapes point_num
// dim3 blocks(DIVUP(point_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row)
// dim3 threads(THREADS_PER_BLOCK);
int batch_idx = blockIdx.y;
int point_idx = blockIdx.x * blockDim.x + threadIdx.x;
neighbor_points += batch_idx * keypoint_num * neighbor_point_num * 3 + point_idx * neighbor_point_num * 3;
features += batch_idx * keypoint_num * gt_num + point_idx * gt_num;
float to_prior_dis = 0;
float tmp_dis;
int xth, yth, zth;
int i, j;
int prior_hash_idx;
float prior_to_dis = 0;
float min_point_pair_dis = radius;
float hsdf_dis = radius;
for(int gt_idx = 0; gt_idx < gt_num; gt_idx++ ){
to_prior_dis = 0;
for( i = 0; i < neighbor_point_num; i++ ){
xth = floor(abs(neighbor_points[i*3 + 0] + radius) / voxel_len);
yth = floor(abs(neighbor_points[i*3 + 1] + radius) / voxel_len);
zth = floor(abs(neighbor_points[i*3 + 2] + radius) / voxel_len);
prior_hash_idx = xth + yth * voxel_dim + zth * voxel_dim * voxel_dim;
tmp_dis = dis_dicts[gt_idx*dict_grid_num + prior_hash_idx];
if( to_prior_dis < tmp_dis ){
to_prior_dis = tmp_dis;
}
}
prior_to_dis = 0;
for( i = 0; i < prior_point_num; i++ ){
min_point_pair_dis = 99.9;
for( j = 0; j < neighbor_point_num; j++ ){
tmp_dis = ( pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 0]
- neighbor_points[j*3 + 0], 2) +
pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 1]
- neighbor_points[j*3 + 1], 2) +
pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 2]
- neighbor_points[j*3 + 2], 2) );
if( min_point_pair_dis > tmp_dis ){
min_point_pair_dis = tmp_dis;
}
}
if( min_point_pair_dis > prior_to_dis ){
prior_to_dis = min_point_pair_dis;
}
}
prior_to_dis = sqrt(prior_to_dis);
hsdf_dis = (prior_to_dis > to_prior_dis? prior_to_dis : to_prior_dis) / radius;
features[gt_idx] = (hsdf_dis > 1? 1: hsdf_dis) < 0.1? 0: hsdf_dis;
}
}
void get_hausdorff_dis_kernel_launcher_fast(const float* neighbor_points,
float* features, float radius,
int batch_size, int whole_point_num, int keypoint_num,
int neighbor_point_num,
const float* prior_points, const float* dis_dicts,
float voxel_len, hipStream_t stream){
// whole_points: B N C
// keypoints: B N C
// neighbor_points: B N nsample C
// prior_points: Nshapes Npoints_per_shape Cxyz
// dis_dicts: Nshapes N_hash_grid_per_shape Cxyz
// output:
// features: batch_size point_num Nshapes
hipError_t err;
dim3 blocks(DIVUP(keypoint_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( get_hausdorff_dis_kernel_fast), dim3(blocks), dim3(threads), 0, stream,
neighbor_points, features, radius, batch_size, whole_point_num,
keypoint_num, neighbor_point_num, prior_points, dis_dicts, voxel_len, stream);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| cee57508f39a2a34998143905f3938fad9f25415.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "get_hausdorff_dis_gpu.h"
#include "cuda_utils.h"
#define gt_num 42
#define voxel_dim 31
#define dict_grid_num (voxel_dim*voxel_dim*voxel_dim)
#define prior_point_num 9
__global__ void get_hausdorff_dis_kernel_fast(const float *__restrict__ neighbor_points,
float *__restrict__ features, float radius,
int batch_size, int whole_point_num,
int keypoint_num, int neighbor_point_num,
const float* __restrict__ prior_points,
const float* __restrict__ dis_dicts,
float voxel_len, cudaStream_t stream){
// whole_points: B N C
// keypoints: B M C
// neighbor_points: B M nsample C
// prior_points: Nshapes Npoints_per_shape Cxyz
// dis_dicts: Nshapes Ngrid Cxyz
// output:
// features: batch_size Nshapes point_num
// dim3 blocks(DIVUP(point_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row)
// dim3 threads(THREADS_PER_BLOCK);
int batch_idx = blockIdx.y;
int point_idx = blockIdx.x * blockDim.x + threadIdx.x;
neighbor_points += batch_idx * keypoint_num * neighbor_point_num * 3 + point_idx * neighbor_point_num * 3;
features += batch_idx * keypoint_num * gt_num + point_idx * gt_num;
float to_prior_dis = 0;
float tmp_dis;
int xth, yth, zth;
int i, j;
int prior_hash_idx;
float prior_to_dis = 0;
float min_point_pair_dis = radius;
float hsdf_dis = radius;
for(int gt_idx = 0; gt_idx < gt_num; gt_idx++ ){
to_prior_dis = 0;
for( i = 0; i < neighbor_point_num; i++ ){
xth = floor(abs(neighbor_points[i*3 + 0] + radius) / voxel_len);
yth = floor(abs(neighbor_points[i*3 + 1] + radius) / voxel_len);
zth = floor(abs(neighbor_points[i*3 + 2] + radius) / voxel_len);
prior_hash_idx = xth + yth * voxel_dim + zth * voxel_dim * voxel_dim;
tmp_dis = dis_dicts[gt_idx*dict_grid_num + prior_hash_idx];
if( to_prior_dis < tmp_dis ){
to_prior_dis = tmp_dis;
}
}
prior_to_dis = 0;
for( i = 0; i < prior_point_num; i++ ){
min_point_pair_dis = 99.9;
for( j = 0; j < neighbor_point_num; j++ ){
tmp_dis = ( pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 0]
- neighbor_points[j*3 + 0], 2) +
pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 1]
- neighbor_points[j*3 + 1], 2) +
pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 2]
- neighbor_points[j*3 + 2], 2) );
if( min_point_pair_dis > tmp_dis ){
min_point_pair_dis = tmp_dis;
}
}
if( min_point_pair_dis > prior_to_dis ){
prior_to_dis = min_point_pair_dis;
}
}
prior_to_dis = sqrt(prior_to_dis);
hsdf_dis = (prior_to_dis > to_prior_dis? prior_to_dis : to_prior_dis) / radius;
features[gt_idx] = (hsdf_dis > 1? 1: hsdf_dis) < 0.1? 0: hsdf_dis;
}
}
void get_hausdorff_dis_kernel_launcher_fast(const float* neighbor_points,
float* features, float radius,
int batch_size, int whole_point_num, int keypoint_num,
int neighbor_point_num,
const float* prior_points, const float* dis_dicts,
float voxel_len, cudaStream_t stream){
// whole_points: B N C
// keypoints: B N C
// neighbor_points: B N nsample C
// prior_points: Nshapes Npoints_per_shape Cxyz
// dis_dicts: Nshapes N_hash_grid_per_shape Cxyz
// output:
// features: batch_size point_num Nshapes
cudaError_t err;
dim3 blocks(DIVUP(keypoint_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
get_hausdorff_dis_kernel_fast<<<blocks, threads, 0, stream>>>(
neighbor_points, features, radius, batch_size, whole_point_num,
keypoint_num, neighbor_point_num, prior_points, dis_dicts, voxel_len, stream);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
a9ddc9c93c0417980b63a2587b510f3601e73861.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2019 Jij Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef USE_ROCM
#include <hip/hip_runtime.h>
#include <system/gpu/chimera_cuda/kernel.hpp>
#include <iostream>
namespace openjij {
namespace system {
//for cuda device (kernel)
namespace chimera_cuda {
template<
typename FloatType,
std::size_t block_row,
std::size_t block_col,
std::size_t block_trot,
std::size_t unitsize
>
__global__ void metropolis(
int32_t sw,
int32_t* spin, const FloatType* rand,
FloatType* dE,
const FloatType* J_out_p,
const FloatType* J_out_n,
const FloatType* J_in_04,
const FloatType* J_in_15,
const FloatType* J_in_26,
const FloatType* J_in_37,
const FloatType* h,
ChimeraInfo info,
double beta, FloatType gamma, double s
){
static_assert(block_row*block_col*block_trot*unitsize <= 1024, "max limit of the number of thread for each block is 1024.");
//switch
//(0 -> 1st chimera unit (t==0, i==0, j==0) -> (0...3))
//(1 -> 1st chimera unit (t==0, i==0, j==0) -> (4...7))
//shared memory
//spin with boundaries
__shared__ int32_t spincache[(block_row+2) * (block_col+2) * (block_trot+2) * unitsize];
__shared__ FloatType randcache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_out_p_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_out_n_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_04_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_15_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_26_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_37_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType h_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType dE_cache[block_row * block_col * block_trot * unitsize];
FloatType J_trot = 0;
//know who and where we are
int32_t r = idx_r(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t c = idx_c(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t i = idx_i(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t t = idx_t(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t b_r = r%block_row;
int32_t b_c = c%block_col;
int32_t b_t = t%block_trot;
int32_t global_index = glIdx(info, r, c, i, t);
int32_t local_index = glIdx(info, r, c, i);
int32_t block_index = bkIdx<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t);
int32_t spin_index = bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t);
if(info.trotters > 1){
J_trot = 0.5*log(tanh(beta*gamma*(1-s)/(FloatType)info.trotters)); //-(1/2)log(coth(beta*gamma/M))
}
J_out_p_cache[block_index] = J_out_p[local_index];
J_out_n_cache[block_index] = J_out_n[local_index];
J_in_04_cache[block_index] = J_in_04[local_index];
J_in_15_cache[block_index] = J_in_15[local_index];
J_in_26_cache[block_index] = J_in_26[local_index];
J_in_37_cache[block_index] = J_in_37[local_index];
h_cache[block_index] = h[local_index];
randcache[block_index] = rand[global_index];
spincache[spin_index] = spin[global_index];
//be sure that dE_cache is initialized with zero
dE_cache[block_index] = 0;
//copy boundary spins to shared memory
//row
if(r%block_row == 0 && r != 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,-1,b_c,i,b_t)]
= spin[glIdx(info,r-1,c,i,t)];
}
if(r%block_row == block_row-1 && r != info.rows-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,block_row,b_c,i,b_t)]
= spin[glIdx(info,r+1,c,i,t)];
}
//col
if(c%block_col == 0 && c != 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,-1,i,b_t)]
= spin[glIdx(info,r,c-1,i,t)];
}
if(c%block_col == block_col-1 && c != info.cols-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,block_col,i,b_t)]
= spin[glIdx(info,r,c+1,i,t)];
}
//trotter slices
if(info.trotters > 1){
if(t%block_trot == 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,-1)]
= spin[glIdx(info,r,c,i,(t!=0)?t-1:info.trotters-1)];
}
if(t%block_trot == block_trot-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,block_trot)]
= spin[glIdx(info,r,c,i,(t!=info.trotters-1)?t+1:0)];
}
}
__syncthreads();
//do metropolis
if(((r+c+t)%2 == sw && i <= 3) || ((r+c+t)%2 != sw && 4 <= i)){
FloatType temp_dE =
-2*s*spincache[spin_index]*beta/(FloatType)(info.trotters)*(
//outside chimera unit
J_out_p_cache[block_index]
//0 to 3 -> go up 4 to 7 -> go left
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,(i<=3)?b_r-1:b_r, (4<=i)?b_c-1:b_c,i,b_t)]+
J_out_n_cache[block_index]
//0 to 3 -> go down 4 to 7 -> go right
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,(i<=3)?b_r+1:b_r, (4<=i)?b_c+1:b_c,i,b_t)]+
//inside chimera unit
J_in_04_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?4:0,b_t)]+
J_in_15_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?5:1,b_t)]+
J_in_26_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?6:2,b_t)]+
J_in_37_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?7:3,b_t)]+
//local magnetization
h_cache[block_index]);
//trotter slices
if(info.trotters > 1){
temp_dE +=
-2*spincache[spin_index]*J_trot*(
//trotter slices
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t+1)]+
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t-1)]
);
}
//update
spin[global_index] = ((exp(-temp_dE) > randcache[block_index])?-1:1)*spincache[spin_index];
}
__syncthreads();
// reduction for calculating dE
uint32_t count = block_row * block_col * block_trot * unitsize; // <= 1024
// thread index
uint32_t ti = threadIdx.z*(blockDim.y*blockDim.x)+threadIdx.y*(blockDim.x)+threadIdx.x;
count = count/2; //1024 -> 512
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //512 -> 256
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //256 -> 128
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //128 -> 64
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //64 -> 32
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //32 -> 16
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //16 -> 8
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //8 -> 4
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //4 -> 2
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //2 -> 1
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
if(ti == 0){
//add 'em
atomicAdd(&dE[0], dE_cache[ti]);
}
}
template<
typename FloatType,
std::size_t block_row,
std::size_t block_col,
std::size_t block_trot>
void metropolis_interface(
int32_t sw,
int32_t* spin, const FloatType* rand,
FloatType* dE,
const FloatType* J_out_p,
const FloatType* J_out_n,
const FloatType* J_in_04,
const FloatType* J_in_15,
const FloatType* J_in_26,
const FloatType* J_in_37,
const FloatType* h,
const ChimeraInfo& info, const dim3& grid, const dim3& block,
double beta, FloatType gamma, double s){
hipLaunchKernelGGL(( metropolis<FloatType, block_row, block_col, block_trot, info.chimera_unitsize>), dim3(grid), dim3(block), 0, 0,
sw,
spin, rand,
dE,
J_out_p,
J_out_n,
J_in_04,
J_in_15,
J_in_26,
J_in_37,
h,
info,
beta, gamma, s
);
}
//template instantiation
#define FLOAT_ARGTYPE int32_t,int32_t*,const float*,float*,const float*,const float*,const float*,const float*,const float*,const float*,const float*,const ChimeraInfo&,const dim3&,const dim3&,double,float,double
#define DOUBLE_ARGTYPE int32_t,int32_t*,const double*,double*,const double*,const double*,const double*,const double*,const double*,const double*,const double*,const ChimeraInfo&,const dim3&,const dim3&,double,double,double
template void metropolis_interface<float,1,1,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,2,2,2>(FLOAT_ARGTYPE);
template void metropolis_interface<float,3,3,3>(FLOAT_ARGTYPE);
template void metropolis_interface<float,4,4,4>(FLOAT_ARGTYPE);
template void metropolis_interface<float,2,2,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,3,3,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,4,4,1>(FLOAT_ARGTYPE);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600
template void metropolis_interface<double,1,1,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,2,2,2>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,3,3,3>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,4,4,4>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,2,2,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,3,3,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,4,4,1>(DOUBLE_ARGTYPE);
#endif
} // namespace chimera_cuda
} // namespace system
} // namespace openjij
#endif
| a9ddc9c93c0417980b63a2587b510f3601e73861.cu | // Copyright 2019 Jij Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef USE_CUDA
#include <cuda_runtime.h>
#include <system/gpu/chimera_cuda/kernel.hpp>
#include <iostream>
namespace openjij {
namespace system {
//for cuda device (kernel)
namespace chimera_cuda {
template<
typename FloatType,
std::size_t block_row,
std::size_t block_col,
std::size_t block_trot,
std::size_t unitsize
>
__global__ void metropolis(
int32_t sw,
int32_t* spin, const FloatType* rand,
FloatType* dE,
const FloatType* J_out_p,
const FloatType* J_out_n,
const FloatType* J_in_04,
const FloatType* J_in_15,
const FloatType* J_in_26,
const FloatType* J_in_37,
const FloatType* h,
ChimeraInfo info,
double beta, FloatType gamma, double s
){
static_assert(block_row*block_col*block_trot*unitsize <= 1024, "max limit of the number of thread for each block is 1024.");
//switch
//(0 -> 1st chimera unit (t==0, i==0, j==0) -> (0...3))
//(1 -> 1st chimera unit (t==0, i==0, j==0) -> (4...7))
//shared memory
//spin with boundaries
__shared__ int32_t spincache[(block_row+2) * (block_col+2) * (block_trot+2) * unitsize];
__shared__ FloatType randcache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_out_p_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_out_n_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_04_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_15_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_26_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType J_in_37_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType h_cache[block_row * block_col * block_trot * unitsize];
__shared__ FloatType dE_cache[block_row * block_col * block_trot * unitsize];
FloatType J_trot = 0;
//know who and where we are
int32_t r = idx_r(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t c = idx_c(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t i = idx_i(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t t = idx_t(info, blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z);
int32_t b_r = r%block_row;
int32_t b_c = c%block_col;
int32_t b_t = t%block_trot;
int32_t global_index = glIdx(info, r, c, i, t);
int32_t local_index = glIdx(info, r, c, i);
int32_t block_index = bkIdx<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t);
int32_t spin_index = bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t);
if(info.trotters > 1){
J_trot = 0.5*log(tanh(beta*gamma*(1-s)/(FloatType)info.trotters)); //-(1/2)log(coth(beta*gamma/M))
}
J_out_p_cache[block_index] = J_out_p[local_index];
J_out_n_cache[block_index] = J_out_n[local_index];
J_in_04_cache[block_index] = J_in_04[local_index];
J_in_15_cache[block_index] = J_in_15[local_index];
J_in_26_cache[block_index] = J_in_26[local_index];
J_in_37_cache[block_index] = J_in_37[local_index];
h_cache[block_index] = h[local_index];
randcache[block_index] = rand[global_index];
spincache[spin_index] = spin[global_index];
//be sure that dE_cache is initialized with zero
dE_cache[block_index] = 0;
//copy boundary spins to shared memory
//row
if(r%block_row == 0 && r != 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,-1,b_c,i,b_t)]
= spin[glIdx(info,r-1,c,i,t)];
}
if(r%block_row == block_row-1 && r != info.rows-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,block_row,b_c,i,b_t)]
= spin[glIdx(info,r+1,c,i,t)];
}
//col
if(c%block_col == 0 && c != 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,-1,i,b_t)]
= spin[glIdx(info,r,c-1,i,t)];
}
if(c%block_col == block_col-1 && c != info.cols-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,block_col,i,b_t)]
= spin[glIdx(info,r,c+1,i,t)];
}
//trotter slices
if(info.trotters > 1){
if(t%block_trot == 0){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,-1)]
= spin[glIdx(info,r,c,i,(t!=0)?t-1:info.trotters-1)];
}
if(t%block_trot == block_trot-1){
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,block_trot)]
= spin[glIdx(info,r,c,i,(t!=info.trotters-1)?t+1:0)];
}
}
__syncthreads();
//do metropolis
if(((r+c+t)%2 == sw && i <= 3) || ((r+c+t)%2 != sw && 4 <= i)){
FloatType temp_dE =
-2*s*spincache[spin_index]*beta/(FloatType)(info.trotters)*(
//outside chimera unit
J_out_p_cache[block_index]
//0 to 3 -> go up 4 to 7 -> go left
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,(i<=3)?b_r-1:b_r, (4<=i)?b_c-1:b_c,i,b_t)]+
J_out_n_cache[block_index]
//0 to 3 -> go down 4 to 7 -> go right
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,(i<=3)?b_r+1:b_r, (4<=i)?b_c+1:b_c,i,b_t)]+
//inside chimera unit
J_in_04_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?4:0,b_t)]+
J_in_15_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?5:1,b_t)]+
J_in_26_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?6:2,b_t)]+
J_in_37_cache[block_index]
*spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,(i<=3)?7:3,b_t)]+
//local magnetization
h_cache[block_index]);
//trotter slices
if(info.trotters > 1){
temp_dE +=
-2*spincache[spin_index]*J_trot*(
//trotter slices
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t+1)]+
spincache[bkIdx_ext<block_row,block_col,block_trot>(info,b_r,b_c,i,b_t-1)]
);
}
//update
spin[global_index] = ((exp(-temp_dE) > randcache[block_index])?-1:1)*spincache[spin_index];
}
__syncthreads();
// reduction for calculating dE
uint32_t count = block_row * block_col * block_trot * unitsize; // <= 1024
// thread index
uint32_t ti = threadIdx.z*(blockDim.y*blockDim.x)+threadIdx.y*(blockDim.x)+threadIdx.x;
count = count/2; //1024 -> 512
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //512 -> 256
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //256 -> 128
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //128 -> 64
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //64 -> 32
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //32 -> 16
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //16 -> 8
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //8 -> 4
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //4 -> 2
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
count = count/2; //2 -> 1
if(ti < count){
dE_cache[ti] += dE_cache[ti+count];
}
__syncthreads();
if(ti == 0){
//add 'em
atomicAdd(&dE[0], dE_cache[ti]);
}
}
template<
typename FloatType,
std::size_t block_row,
std::size_t block_col,
std::size_t block_trot>
void metropolis_interface(
int32_t sw,
int32_t* spin, const FloatType* rand,
FloatType* dE,
const FloatType* J_out_p,
const FloatType* J_out_n,
const FloatType* J_in_04,
const FloatType* J_in_15,
const FloatType* J_in_26,
const FloatType* J_in_37,
const FloatType* h,
const ChimeraInfo& info, const dim3& grid, const dim3& block,
double beta, FloatType gamma, double s){
metropolis<FloatType, block_row, block_col, block_trot, info.chimera_unitsize><<<grid, block>>>(
sw,
spin, rand,
dE,
J_out_p,
J_out_n,
J_in_04,
J_in_15,
J_in_26,
J_in_37,
h,
info,
beta, gamma, s
);
}
//template instantiation
#define FLOAT_ARGTYPE int32_t,int32_t*,const float*,float*,const float*,const float*,const float*,const float*,const float*,const float*,const float*,const ChimeraInfo&,const dim3&,const dim3&,double,float,double
#define DOUBLE_ARGTYPE int32_t,int32_t*,const double*,double*,const double*,const double*,const double*,const double*,const double*,const double*,const double*,const ChimeraInfo&,const dim3&,const dim3&,double,double,double
template void metropolis_interface<float,1,1,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,2,2,2>(FLOAT_ARGTYPE);
template void metropolis_interface<float,3,3,3>(FLOAT_ARGTYPE);
template void metropolis_interface<float,4,4,4>(FLOAT_ARGTYPE);
template void metropolis_interface<float,2,2,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,3,3,1>(FLOAT_ARGTYPE);
template void metropolis_interface<float,4,4,1>(FLOAT_ARGTYPE);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600
template void metropolis_interface<double,1,1,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,2,2,2>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,3,3,3>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,4,4,4>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,2,2,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,3,3,1>(DOUBLE_ARGTYPE);
template void metropolis_interface<double,4,4,1>(DOUBLE_ARGTYPE);
#endif
} // namespace chimera_cuda
} // namespace system
} // namespace openjij
#endif
|
d9a31db376e7d2ef6d01bd2092db12fc0ef4a842.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <vector>
#include<hiprand/hiprand_kernel.h>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
constexpr int MAX_SIZE = 300;
constexpr int GENERATIONS = 100;
constexpr int ISLANDS = 32;
constexpr int POPULATION = 64;
__device__ __forceinline__ void swap(int& a, int& b){
int temp = a;
a = b;
b = temp;
}
__device__ __forceinline__ int cal_fitness(int* weights, int* values, char* genom, int W, int n){
int fitness = 0, weight = 0;
for(int i = 0; i < n; i++){
fitness+=values[i]*(int)genom[i];
weight+=weights[i]*(int)genom[i];
}
if(weight>W)
fitness = 0;
return fitness;
}
__device__ __forceinline__ void selection(char* a, char* b, int n){
for(int i = 0; i < n; i++){
a[i] = b[i];
}
}
__device__ __forceinline__ void crossover(char* mother, char* father, char* child, hiprandState_t* state, int n){
for(int i = 0; i < n; i++){
double p = hiprand_uniform(state);
if(p < 0.45)
child[i] = father[i];
else if(p < 0.90)
child[i] = mother[i];
else
child[i] = hiprand(state)%2;
}
}
__device__ __forceinline__ void sort(int* fitness, int* pos, int n){
int tid = threadIdx.x;
for(unsigned int k = 2; k <= POPULATION; k*=2){
for(unsigned int j = k/2; j > 0; j/=2){
unsigned int ixj = tid^j;
if(ixj>tid){
if((tid & k) == 0)
if(fitness[tid] < fitness[ixj]){
swap(fitness[tid], fitness[ixj]);
swap(pos[tid], pos[ixj]);
}
else if(fitness[tid] > fitness[ixj]){
swap(fitness[tid], fitness[ixj]);
swap(pos[tid], pos[ixj]);
}
}
__syncthreads();
}
}
}
__device__ __forceinline__ void prefixmax(int* arr, int* pos, int n){
int x, p;
int tid = threadIdx.x;
for(int i = 1; i < n; i*=2){
if(tid>=i){
x = arr[tid-i];
p = pos[tid-i];
}
__syncthreads();
if(tid>=i&&x>arr[tid]){
arr[tid] = x;
pos[tid] = p;
}
__syncthreads();
}
}
__global__ void kernel(int* w, int* v, int n, int W, char* result, int* profit, hiprandState_t* states) {
__shared__ int weights[MAX_SIZE];
__shared__ int values[MAX_SIZE];
__shared__ char population[POPULATION][MAX_SIZE];
__shared__ char new_population[POPULATION][MAX_SIZE];
__shared__ int fitness[POPULATION];
__shared__ int pos[POPULATION];
int tid = threadIdx.x;
int bid = blockIdx.x;
int id = blockDim.x*bid + tid;
int frac = POPULATION/10;
int p1, p2;
hiprandState_t state = states[id];
for(int i = tid; i < n; i+=POPULATION){
weights[i] = w[i];
values[i] = v[i];
}
__syncthreads();
for(int i = 0; i < n; i++){
population[tid][i] = hiprand(&state)%2;
}
int not_changed = 0;
int prev = 0;
int iter = 0;
for(int g = 0; g < GENERATIONS+1; g++)
//while(not_changed<GENERATIONS)
{
iter++;
fitness[tid] = cal_fitness(weights, values, population[tid], W, n);
pos[tid] = tid;
sort(fitness, pos, n);
__syncthreads();
// if(prev == fitness[0])
// not_changed++;
// else
// not_changed = 0;
// prev = fitness[0];
// __syncthreads();
if(tid < frac){
selection(new_population[tid], population[pos[tid]], n);
}
if(tid >= frac){
p1 = ceilf(hiprand_uniform(&state) * (POPULATION/2));
p2 = ceilf(hiprand_uniform(&state) * (POPULATION/2));
crossover(population[pos[p1]], population[pos[p2]], new_population[tid], &state, n);
}
__syncthreads();
for(int i = 0; i < n; i++)
population[tid][i] = new_population[tid][i];
__syncthreads();
}
fitness[tid] = cal_fitness(weights, values, population[tid], W, n);
pos[tid] = tid;
__syncthreads();
prefixmax(fitness, pos, n);
if(tid == 0){
profit[bid] = fitness[POPULATION-1];
// stats[bid] = iter;
}
__syncthreads();
for(int i = tid; i < n; i+=POPULATION)
result[bid*n+i] = population[pos[POPULATION-1]][i];
}
__global__ void init(hiprandState_t* states, unsigned int seed){
int id = blockDim.x*blockIdx.x + threadIdx.x;
hiprand_init(seed, id, 0, &states[id]);
}
int main(){
hipSetDevice(0);
int *d_weights, *d_values, *d_profit;
// int* d_stats;
char* d_result;
hiprandState_t* states;
int n, W;
cin>>n>>W;
vector<int> weights(n), values(n), profit(ISLANDS);
vector<char> result(ISLANDS*n);
// vector<int> stats(ISLANDS);
for(int i = 0; i < n; i++){
cin>>weights[i]>>values[i];
}
gpuErrchk(hipMalloc(&d_weights, n*sizeof(int)));
gpuErrchk(hipMalloc(&d_values, n*sizeof(int)));
gpuErrchk(hipMalloc(&d_result, ISLANDS*n*sizeof(int)));
gpuErrchk(hipMalloc(&d_profit, ISLANDS*sizeof(int)));
// gpuErrchk(hipMalloc(&d_stats, ISLANDS*sizeof(int)));
gpuErrchk(hipMalloc(&states, ISLANDS*POPULATION*sizeof(hiprandState_t)));
gpuErrchk(hipMemcpy(d_weights, weights.data(), n*sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_values, values.data(), n*sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( init), dim3(ISLANDS), dim3(POPULATION), 0, 0, states, time(0));
gpuErrchk(hipDeviceSynchronize());
auto start = chrono::steady_clock::now();
hipLaunchKernelGGL(( kernel), dim3(ISLANDS), dim3(POPULATION), 0, 0, d_weights, d_values, n, W, d_result, d_profit, states);
gpuErrchk(hipDeviceSynchronize());
auto stop = chrono::steady_clock::now();
gpuErrchk(hipMemcpy(profit.data(), d_profit, ISLANDS*sizeof(int), hipMemcpyDeviceToHost));
// gpuErrchk(hipMemcpy(stats.data(), d_stats, ISLANDS*sizeof(int), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(result.data(), d_result, n*ISLANDS, hipMemcpyDeviceToHost));
int best = 0;
// int worst = 0;
for(int i = 0; i < ISLANDS; i++){
if(profit[i]>profit[best])
best = i;
// if(stats[i]>stats[worst])
// worst = i;
}
cout<<"Best island: "<<best<<endl;
cout<<"Profit: "<<profit[best]<<endl;
// cout<<"Max generations:"<<stats[worst]<<endl;
for(int i = 0; i < n; i++)
cout<<+result[best*n+i]<<" ";
cout<<endl;
cerr << "Elapsed time: " << chrono::duration_cast<chrono::microseconds>(stop - start).count() << "s\n";
gpuErrchk(hipFree(states));
gpuErrchk(hipFree(d_weights));
gpuErrchk(hipFree(d_values));
gpuErrchk(hipFree(d_profit));
gpuErrchk(hipFree(d_result));
// gpuErrchk(hipFree(d_stats));
return 0;
} | d9a31db376e7d2ef6d01bd2092db12fc0ef4a842.cu | #include <iostream>
#include <chrono>
#include <vector>
#include<curand_kernel.h>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
constexpr int MAX_SIZE = 300;
constexpr int GENERATIONS = 100;
constexpr int ISLANDS = 32;
constexpr int POPULATION = 64;
__device__ __forceinline__ void swap(int& a, int& b){
int temp = a;
a = b;
b = temp;
}
__device__ __forceinline__ int cal_fitness(int* weights, int* values, char* genom, int W, int n){
int fitness = 0, weight = 0;
for(int i = 0; i < n; i++){
fitness+=values[i]*(int)genom[i];
weight+=weights[i]*(int)genom[i];
}
if(weight>W)
fitness = 0;
return fitness;
}
__device__ __forceinline__ void selection(char* a, char* b, int n){
for(int i = 0; i < n; i++){
a[i] = b[i];
}
}
__device__ __forceinline__ void crossover(char* mother, char* father, char* child, curandState_t* state, int n){
for(int i = 0; i < n; i++){
double p = curand_uniform(state);
if(p < 0.45)
child[i] = father[i];
else if(p < 0.90)
child[i] = mother[i];
else
child[i] = curand(state)%2;
}
}
__device__ __forceinline__ void sort(int* fitness, int* pos, int n){
int tid = threadIdx.x;
for(unsigned int k = 2; k <= POPULATION; k*=2){
for(unsigned int j = k/2; j > 0; j/=2){
unsigned int ixj = tid^j;
if(ixj>tid){
if((tid & k) == 0)
if(fitness[tid] < fitness[ixj]){
swap(fitness[tid], fitness[ixj]);
swap(pos[tid], pos[ixj]);
}
else if(fitness[tid] > fitness[ixj]){
swap(fitness[tid], fitness[ixj]);
swap(pos[tid], pos[ixj]);
}
}
__syncthreads();
}
}
}
__device__ __forceinline__ void prefixmax(int* arr, int* pos, int n){
int x, p;
int tid = threadIdx.x;
for(int i = 1; i < n; i*=2){
if(tid>=i){
x = arr[tid-i];
p = pos[tid-i];
}
__syncthreads();
if(tid>=i&&x>arr[tid]){
arr[tid] = x;
pos[tid] = p;
}
__syncthreads();
}
}
__global__ void kernel(int* w, int* v, int n, int W, char* result, int* profit, curandState_t* states) {
__shared__ int weights[MAX_SIZE];
__shared__ int values[MAX_SIZE];
__shared__ char population[POPULATION][MAX_SIZE];
__shared__ char new_population[POPULATION][MAX_SIZE];
__shared__ int fitness[POPULATION];
__shared__ int pos[POPULATION];
int tid = threadIdx.x;
int bid = blockIdx.x;
int id = blockDim.x*bid + tid;
int frac = POPULATION/10;
int p1, p2;
curandState_t state = states[id];
for(int i = tid; i < n; i+=POPULATION){
weights[i] = w[i];
values[i] = v[i];
}
__syncthreads();
for(int i = 0; i < n; i++){
population[tid][i] = curand(&state)%2;
}
int not_changed = 0;
int prev = 0;
int iter = 0;
for(int g = 0; g < GENERATIONS+1; g++)
//while(not_changed<GENERATIONS)
{
iter++;
fitness[tid] = cal_fitness(weights, values, population[tid], W, n);
pos[tid] = tid;
sort(fitness, pos, n);
__syncthreads();
// if(prev == fitness[0])
// not_changed++;
// else
// not_changed = 0;
// prev = fitness[0];
// __syncthreads();
if(tid < frac){
selection(new_population[tid], population[pos[tid]], n);
}
if(tid >= frac){
p1 = ceilf(curand_uniform(&state) * (POPULATION/2));
p2 = ceilf(curand_uniform(&state) * (POPULATION/2));
crossover(population[pos[p1]], population[pos[p2]], new_population[tid], &state, n);
}
__syncthreads();
for(int i = 0; i < n; i++)
population[tid][i] = new_population[tid][i];
__syncthreads();
}
fitness[tid] = cal_fitness(weights, values, population[tid], W, n);
pos[tid] = tid;
__syncthreads();
prefixmax(fitness, pos, n);
if(tid == 0){
profit[bid] = fitness[POPULATION-1];
// stats[bid] = iter;
}
__syncthreads();
for(int i = tid; i < n; i+=POPULATION)
result[bid*n+i] = population[pos[POPULATION-1]][i];
}
__global__ void init(curandState_t* states, unsigned int seed){
int id = blockDim.x*blockIdx.x + threadIdx.x;
curand_init(seed, id, 0, &states[id]);
}
int main(){
cudaSetDevice(0);
int *d_weights, *d_values, *d_profit;
// int* d_stats;
char* d_result;
curandState_t* states;
int n, W;
cin>>n>>W;
vector<int> weights(n), values(n), profit(ISLANDS);
vector<char> result(ISLANDS*n);
// vector<int> stats(ISLANDS);
for(int i = 0; i < n; i++){
cin>>weights[i]>>values[i];
}
gpuErrchk(cudaMalloc(&d_weights, n*sizeof(int)));
gpuErrchk(cudaMalloc(&d_values, n*sizeof(int)));
gpuErrchk(cudaMalloc(&d_result, ISLANDS*n*sizeof(int)));
gpuErrchk(cudaMalloc(&d_profit, ISLANDS*sizeof(int)));
// gpuErrchk(cudaMalloc(&d_stats, ISLANDS*sizeof(int)));
gpuErrchk(cudaMalloc(&states, ISLANDS*POPULATION*sizeof(curandState_t)));
gpuErrchk(cudaMemcpy(d_weights, weights.data(), n*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_values, values.data(), n*sizeof(int), cudaMemcpyHostToDevice));
init<<<ISLANDS, POPULATION>>>(states, time(0));
gpuErrchk(cudaDeviceSynchronize());
auto start = chrono::steady_clock::now();
kernel<<<ISLANDS, POPULATION>>>(d_weights, d_values, n, W, d_result, d_profit, states);
gpuErrchk(cudaDeviceSynchronize());
auto stop = chrono::steady_clock::now();
gpuErrchk(cudaMemcpy(profit.data(), d_profit, ISLANDS*sizeof(int), cudaMemcpyDeviceToHost));
// gpuErrchk(cudaMemcpy(stats.data(), d_stats, ISLANDS*sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(result.data(), d_result, n*ISLANDS, cudaMemcpyDeviceToHost));
int best = 0;
// int worst = 0;
for(int i = 0; i < ISLANDS; i++){
if(profit[i]>profit[best])
best = i;
// if(stats[i]>stats[worst])
// worst = i;
}
cout<<"Best island: "<<best<<endl;
cout<<"Profit: "<<profit[best]<<endl;
// cout<<"Max generations:"<<stats[worst]<<endl;
for(int i = 0; i < n; i++)
cout<<+result[best*n+i]<<" ";
cout<<endl;
cerr << "Elapsed time: " << chrono::duration_cast<chrono::microseconds>(stop - start).count() << "μs\n";
gpuErrchk(cudaFree(states));
gpuErrchk(cudaFree(d_weights));
gpuErrchk(cudaFree(d_values));
gpuErrchk(cudaFree(d_profit));
gpuErrchk(cudaFree(d_result));
// gpuErrchk(cudaFree(d_stats));
return 0;
} |
e866cb19ebbe73d35861534907ef7ceba0a53e91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <chrono>
using namespace std::chrono;
#define CSC(call) \
do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "ERROR: in %s:%d. Message: %s\n", \
__FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while(0)
struct comparator {
__host__ __device__ double fabs(double a){
return a < 0.0 ? -a : a;
}
__host__ __device__ bool operator()(double a, double b)
{
return fabs(a) < fabs(b);
}
};
__host__ void Printer(double* matrix, int height, int width)
{
std::cout << "Printer\n";
for (int i = 0; i < width; ++i)
{
for (int j = 0; j < height; ++j)
{
printf("a[i=%d, j=%d->%d] = %.1f ", i, j, j * width + i, matrix[j * width + i]);
}
printf("\n");
}
}
__global__ void SwapGPU(double* matrix, int width, int height, int row, int rowWithMax)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
double tmp;
for (int i = idx + row; i < height; i += xOffset)
{
tmp = matrix[i * width + row];
matrix[i * width + row] = matrix[i * width + rowWithMax];
matrix[i * width + rowWithMax] = tmp;
}
}
__global__ void Normalization(double* matrix, int width, int height, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
for (int i = idx + row + 1; i < height; i += xOffset)
{
matrix[i * width + row] /= matrix[row * width + row];
}
}
__global__ void ForwardGauss(double* matrix, int width, int height, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int xOffset = gridDim.x * blockDim.x;
int yOffset = gridDim.y * blockDim.y;
for (int i = idx + row + 1; i < width; i += xOffset)
{
for (int j = idy + row + 1; j < height; j += yOffset)
{
matrix[j * width + i] -= matrix[j * width + row] * matrix[row * width + i];
}
}
}
__global__ void BackwardGauss(double* matrix, double* x, int size, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
for (int i = row - 1 - idx; i >= 0; i -= xOffset)
{
x[i] -= matrix[row * size + i] * x[row];
}
}
void CustomGauss(double* matrix, int width, int height)
{
for (int row = 0; row < width - 1; ++row)
{
double max = 0.0;
int rowWithMax = 0;
for (int i = row; i < width; ++i) {
if (abs(matrix[row * width + i]) > max) {
max = abs(matrix[row * width + i]);
rowWithMax = i;
}
}
if (rowWithMax != row)
{
double tmp = 0.0;
for (int i = row; i < height; ++i)
{
tmp = matrix[i * width + row];
matrix[i * width + row] = matrix[i * width + rowWithMax];
matrix[i * width + rowWithMax] = tmp;
}
}
for (int i = row + 1; i < height; ++i)
{
matrix[i * width + row] /= matrix[row * width + row];
}
for (int i = row + 1; i < width; ++i)
{
for (int j = row + 1; j < height; ++j)
{
matrix[j * width + i] -= matrix[j * width + row] * matrix[row * width + i];
}
}
}
double* x = new double[width];
for (int i = 0; i < width; ++i)
{
x[i] = matrix[width * width + i];
}
x[width - 1] /= matrix[(width - 1) * width + (width - 1)];
for (int row = width - 1; row > 0; --row)
{
for (int i = row - 1; i >= 0; --i)
{
x[i] -= matrix[row * width + i] * x[row];
}
}
}
int main(int argc, const char* argv[])
{
std::ios_base::sync_with_stdio(false);
std::cin.tie(nullptr);
int size;
std::cin >> size;
int height = size + 1;
int width = size;
double* matrix = new double[height * width];
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size; ++j)
{
std::cin >> matrix[j * width + i];
}
}
for (int i = 0; i < size; ++i)
{
std::cin >> matrix[size * size + i];
}
double* matrixGPU;
CSC(hipMalloc(&matrixGPU, sizeof(double) * height * width));
CSC(hipMemcpy(matrixGPU, matrix, sizeof(double) * height * width, hipMemcpyHostToDevice));
int xThreadCount = 32;
int yThreadCount = 32;
int xBlockCount = 32;
int yBlockCount = 32;
comparator comp;
thrust::device_ptr<double> ptr, ptrMax;
int rowWithMax;
auto startt = steady_clock::now();
//custom
CustomGauss(matrix, width, height);
auto endd = steady_clock::now();
std::cout << "CPU" << std::endl;
std::cout << "time = " << ((double)duration_cast<microseconds>(endd - startt).count()) / 1000.0 << std::endl;
hipEvent_t start, end;
CSC(hipEventCreate(&start));
CSC(hipEventCreate(&end));
CSC(hipEventRecord(start));
for (int row = 0; row < size - 1; ++row)
{
ptr = thrust::device_pointer_cast(matrixGPU + row * size);
ptrMax = thrust::max_element(ptr + row, ptr + size, comp);
rowWithMax = ptrMax - ptr;
if (rowWithMax != row)
{
hipLaunchKernelGGL(( SwapGPU), dim3(dim3(xBlockCount * yBlockCount)), dim3(dim3(xThreadCount * yThreadCount)), 0, 0, matrixGPU, width, height, row, rowWithMax);
CSC(hipGetLastError());
}
hipLaunchKernelGGL(( Normalization), dim3(dim3(xBlockCount * yBlockCount)), dim3(dim3(xThreadCount * yThreadCount)), 0, 0, matrixGPU, width, height, row);
CSC(hipGetLastError());
hipLaunchKernelGGL(( ForwardGauss), dim3(dim3(xBlockCount, yBlockCount)), dim3(dim3(xThreadCount, yThreadCount)), 0, 0, matrixGPU, width, height, row);
CSC(hipGetLastError());
}
CSC(hipMemcpy(matrix, matrixGPU, sizeof(double) * width * height, hipMemcpyDeviceToHost));
double* x = new double[size];
for (int i = 0; i < size; ++i)
{
x[i] = matrix[width * width + i];
}
x[size - 1] /= matrix[(width - 1) * width + (width - 1)];
double* xGPU;
CSC(hipMalloc(&xGPU, sizeof(double) * size));
CSC(hipMemcpy(xGPU, x, sizeof(double) * size, hipMemcpyHostToDevice));
for (int row = size - 1; row > 0; --row)
{
hipLaunchKernelGGL(( BackwardGauss), dim3(dim3(xBlockCount * yBlockCount)), dim3(dim3(xThreadCount * yThreadCount)), 0, 0, matrixGPU, xGPU, size, row);
CSC(hipGetLastError());
}
CSC(hipEventRecord(end));
CSC(hipEventSynchronize(end));
float t;
CSC(hipEventElapsedTime(&t, start, end));
CSC(hipEventDestroy(start));
CSC(hipEventDestroy(end));
printf("GPU\n");
printf("time = %f\n", t);
printf("blocks = (%d, %d)\n", xBlockCount, yBlockCount);
printf("threads = (%d, %d)\n", xThreadCount, yThreadCount);
CSC(hipMemcpy(x, xGPU, sizeof(double) * size, hipMemcpyDeviceToHost));
// const int accuracy = 10;
// for (int i = 0; i < size - 1; ++i)
// {
// std::cout << std::scientific << std::setprecision(accuracy) << x[i] << " ";
// }
// std::cout << std::scientific << std::setprecision(accuracy) << x[size - 1];
CSC(hipFree(matrixGPU));
CSC(hipFree(xGPU));
delete[] matrix;
delete[] x;
return 0;
} | e866cb19ebbe73d35861534907ef7ceba0a53e91.cu | #include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <chrono>
using namespace std::chrono;
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR: in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
struct comparator {
__host__ __device__ double fabs(double a){
return a < 0.0 ? -a : a;
}
__host__ __device__ bool operator()(double a, double b)
{
return fabs(a) < fabs(b);
}
};
__host__ void Printer(double* matrix, int height, int width)
{
std::cout << "Printer\n";
for (int i = 0; i < width; ++i)
{
for (int j = 0; j < height; ++j)
{
printf("a[i=%d, j=%d->%d] = %.1f ", i, j, j * width + i, matrix[j * width + i]);
}
printf("\n");
}
}
__global__ void SwapGPU(double* matrix, int width, int height, int row, int rowWithMax)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
double tmp;
for (int i = idx + row; i < height; i += xOffset)
{
tmp = matrix[i * width + row];
matrix[i * width + row] = matrix[i * width + rowWithMax];
matrix[i * width + rowWithMax] = tmp;
}
}
__global__ void Normalization(double* matrix, int width, int height, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
for (int i = idx + row + 1; i < height; i += xOffset)
{
matrix[i * width + row] /= matrix[row * width + row];
}
}
__global__ void ForwardGauss(double* matrix, int width, int height, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int xOffset = gridDim.x * blockDim.x;
int yOffset = gridDim.y * blockDim.y;
for (int i = idx + row + 1; i < width; i += xOffset)
{
for (int j = idy + row + 1; j < height; j += yOffset)
{
matrix[j * width + i] -= matrix[j * width + row] * matrix[row * width + i];
}
}
}
__global__ void BackwardGauss(double* matrix, double* x, int size, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
for (int i = row - 1 - idx; i >= 0; i -= xOffset)
{
x[i] -= matrix[row * size + i] * x[row];
}
}
void CustomGauss(double* matrix, int width, int height)
{
for (int row = 0; row < width - 1; ++row)
{
double max = 0.0;
int rowWithMax = 0;
for (int i = row; i < width; ++i) {
if (abs(matrix[row * width + i]) > max) {
max = abs(matrix[row * width + i]);
rowWithMax = i;
}
}
if (rowWithMax != row)
{
double tmp = 0.0;
for (int i = row; i < height; ++i)
{
tmp = matrix[i * width + row];
matrix[i * width + row] = matrix[i * width + rowWithMax];
matrix[i * width + rowWithMax] = tmp;
}
}
for (int i = row + 1; i < height; ++i)
{
matrix[i * width + row] /= matrix[row * width + row];
}
for (int i = row + 1; i < width; ++i)
{
for (int j = row + 1; j < height; ++j)
{
matrix[j * width + i] -= matrix[j * width + row] * matrix[row * width + i];
}
}
}
double* x = new double[width];
for (int i = 0; i < width; ++i)
{
x[i] = matrix[width * width + i];
}
x[width - 1] /= matrix[(width - 1) * width + (width - 1)];
for (int row = width - 1; row > 0; --row)
{
for (int i = row - 1; i >= 0; --i)
{
x[i] -= matrix[row * width + i] * x[row];
}
}
}
int main(int argc, const char* argv[])
{
std::ios_base::sync_with_stdio(false);
std::cin.tie(nullptr);
int size;
std::cin >> size;
int height = size + 1;
int width = size;
double* matrix = new double[height * width];
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size; ++j)
{
std::cin >> matrix[j * width + i];
}
}
for (int i = 0; i < size; ++i)
{
std::cin >> matrix[size * size + i];
}
double* matrixGPU;
CSC(cudaMalloc(&matrixGPU, sizeof(double) * height * width));
CSC(cudaMemcpy(matrixGPU, matrix, sizeof(double) * height * width, cudaMemcpyHostToDevice));
int xThreadCount = 32;
int yThreadCount = 32;
int xBlockCount = 32;
int yBlockCount = 32;
comparator comp;
thrust::device_ptr<double> ptr, ptrMax;
int rowWithMax;
auto startt = steady_clock::now();
//custom
CustomGauss(matrix, width, height);
auto endd = steady_clock::now();
std::cout << "CPU" << std::endl;
std::cout << "time = " << ((double)duration_cast<microseconds>(endd - startt).count()) / 1000.0 << std::endl;
cudaEvent_t start, end;
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&end));
CSC(cudaEventRecord(start));
for (int row = 0; row < size - 1; ++row)
{
ptr = thrust::device_pointer_cast(matrixGPU + row * size);
ptrMax = thrust::max_element(ptr + row, ptr + size, comp);
rowWithMax = ptrMax - ptr;
if (rowWithMax != row)
{
SwapGPU<<<dim3(xBlockCount * yBlockCount), dim3(xThreadCount * yThreadCount)>>>(matrixGPU, width, height, row, rowWithMax);
CSC(cudaGetLastError());
}
Normalization<<<dim3(xBlockCount * yBlockCount), dim3(xThreadCount * yThreadCount)>>>(matrixGPU, width, height, row);
CSC(cudaGetLastError());
ForwardGauss<<<dim3(xBlockCount, yBlockCount), dim3(xThreadCount, yThreadCount)>>>(matrixGPU, width, height, row);
CSC(cudaGetLastError());
}
CSC(cudaMemcpy(matrix, matrixGPU, sizeof(double) * width * height, cudaMemcpyDeviceToHost));
double* x = new double[size];
for (int i = 0; i < size; ++i)
{
x[i] = matrix[width * width + i];
}
x[size - 1] /= matrix[(width - 1) * width + (width - 1)];
double* xGPU;
CSC(cudaMalloc(&xGPU, sizeof(double) * size));
CSC(cudaMemcpy(xGPU, x, sizeof(double) * size, cudaMemcpyHostToDevice));
for (int row = size - 1; row > 0; --row)
{
BackwardGauss<<<dim3(xBlockCount * yBlockCount), dim3(xThreadCount * yThreadCount)>>>(matrixGPU, xGPU, size, row);
CSC(cudaGetLastError());
}
CSC(cudaEventRecord(end));
CSC(cudaEventSynchronize(end));
float t;
CSC(cudaEventElapsedTime(&t, start, end));
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(end));
printf("GPU\n");
printf("time = %f\n", t);
printf("blocks = (%d, %d)\n", xBlockCount, yBlockCount);
printf("threads = (%d, %d)\n", xThreadCount, yThreadCount);
CSC(cudaMemcpy(x, xGPU, sizeof(double) * size, cudaMemcpyDeviceToHost));
// const int accuracy = 10;
// for (int i = 0; i < size - 1; ++i)
// {
// std::cout << std::scientific << std::setprecision(accuracy) << x[i] << " ";
// }
// std::cout << std::scientific << std::setprecision(accuracy) << x[size - 1];
CSC(cudaFree(matrixGPU));
CSC(cudaFree(xGPU));
delete[] matrix;
delete[] x;
return 0;
} |
e385d9602bc1d4242450911b59a62e34953cc40a.hip | // !!! This is a file automatically generated by hipify!!!
#include "cal.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_gl_interop.h>
#include <cudnn.h>
#define Dt 0.000005f
float *act[8] = { NULL };
float *para[6] = { NULL };
float *grad[4] = { NULL };
unsigned char *data = NULL;
float *buffer = NULL;
float *bufferd = NULL;
int n;
cudaGraphicsResource *cuda_vbo_resource;
cudnnHandle_t dnnh;
cudnnActivationDescriptor_t dnnad;
cudnnTensorDescriptor_t dnntd[9];
cudnnFilterDescriptor_t dnnfd[3];
cudnnConvolutionDescriptor_t dnncd[2];
cudnnConvolutionFwdAlgo_t dnnalgo[3];
cudnnPoolingDescriptor_t dnnpd;
int cudainit(HWND hWnd) {
hipError_t cudaStatus;
cudnnStatus_t cudnnStatus;
int i, j, k, l;
float x;
float alpha, beta;
alpha = 1.0;
beta = 0.0;
FILE *fi;
cudaStatus = hipSetDevice(0);
cudaStatus = hipMalloc(act, (28 * 28 + 8 * 28 * 28 + 8 * 28 * 28 + 8 * 14 * 14 + 16 * 14 * 14 + 16 * 14 * 14 + 16 * 7 * 7 + 10) * sizeof(float) * 128);
act[1] = act[0] + 128 * 28 * 28;
act[2] = act[1] + 128 * 8 * 28 * 28;
act[3] = act[2] + 128 * 8 * 28 * 28;
act[4] = act[3] + 128 * 8 * 14 * 14;
act[5] = act[4] + 128 * 16 * 14 * 14;
act[6] = act[5] + 128 * 16 * 14 * 14;
act[7] = act[6] + 128 * 16 * 7 * 7;
cudaStatus = hipMalloc(para, (8 * 5 * 5 + 8 * 1 * 1 + 16 * 8 * 5 * 5 + 16 * 1 * 1 + 10 * 16 * 7 * 7 + 10) * sizeof(float));
para[1] = para[0] + 8 * 5 * 5;
para[2] = para[1] + 8 * 1 * 1;
para[3] = para[2] + 16 * 8 * 5 * 5;
para[4] = para[3] + 16 * 1 * 1;
para[5] = para[4] + 10 * 16 * 7 * 7;
cudaStatus = hipMalloc(grad, (8 * 28 * 28 + 8 * 14 * 14 + 16 * 14 * 14 + 16 * 7 * 7) * 128 * sizeof(float));
grad[1] = grad[0] + 128 * 8 * 28 * 28;
grad[2] = grad[1] + 128 * 8 * 14 * 14;
grad[3] = grad[2] + 128 * 16 * 14 * 14;
cudaStatus = hipMalloc(&bufferd, (28 * 28 + 10) * 60000 * sizeof(float));
data = (unsigned char*)malloc((28 * 28 * 60000 + 60000) * sizeof(unsigned char));
if (!fopen_s(&fi, "D:/download/train-images.idx3-ubyte", "r")) {
fseek(fi, 16, SEEK_SET);
fread(data, 1, 28 * 28 * 60000, fi);
fclose(fi);
}
if (!fopen_s(&fi, "D:/download/train-labels.idx1-ubyte", "r")) {
fseek(fi, 8, SEEK_SET);
fread(data + 28 * 28 * 60000, 1, 60000, fi);
fclose(fi);
}
buffer = (float*)malloc((28 * 28 + 10) * 60000 * sizeof(float));
for (i = 0; i < 28 * 28 * 60000; i++) {
buffer[i] = (float)(data[i])/256.0f;
}
memset(buffer + 28 * 28 * 60000, 0, 60000 * 10 * sizeof(float));
for (i = 0; i < 60000; i++) {
buffer[28 * 28 * 60000 + i * 10 + data[28 * 28 * 60000 + i]] = 1.0f;
}
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (k = 0; k < 28; k++) {
for (l = 0; l < 28; l++) {
data[((i * 28 + k) * 1024 + j * 28 + l) * 3] = buffer[(i * 32 + j) * 28 * 28 + k * 28 + l] * 256;
data[((i * 28 + k) * 1024 + j * 28 + l) * 3 + 1] = buffer[(i * 32 + j) * 28 * 28 + k * 28 + l] * 256;
data[((i * 28 + k) * 1024 + j * 28 + l) * 3 + 2] = buffer[(i * 32 + j) * 28 * 28 + k * 28 + l] * 256;
}
}
}
}
cudaStatus = hipMemcpy(bufferd, buffer, (28 * 28 + 10) * 60000 * sizeof(float), hipMemcpyHostToDevice);
if (!fopen_s(&fi, "D:/files/data/fig", "r")) {
fread(buffer, sizeof(float), (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10), fi);
fclose(fi);
}
else {
for (i = 0; i < 8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10; i++) {
buffer[i] = (rand() / 32768.0f - 0.5f)*0.25f;
}
for (i = 0; i < 8 * 5 * 5; i++) {
buffer[i] = (rand() / 32768.0f - 0.5f)*1.0f;
}
}
cudaStatus = hipMemcpy(para[0], buffer, (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10) * sizeof(float), hipMemcpyHostToDevice);
cudnnStatus = cudnnCreate(&dnnh);
cudnnStatus = cudnnCreateActivationDescriptor(&dnnad);
cudnnStatus = cudnnSetActivationDescriptor(dnnad, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0.0);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 0);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 1);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 2);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 3);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 4);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 5);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 6);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 7);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 8);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[0], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 1, 28, 28);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[1], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 8, 28, 28);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[2], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 8, 14, 14);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[3], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 16, 14, 14);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[4], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 16, 7, 7);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[5], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 10, 1, 1);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[6], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 8, 1, 1);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[7], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 16, 1, 1);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[8], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1);
cudnnStatus = cudnnCreateFilterDescriptor(dnnfd + 0);
cudnnStatus = cudnnCreateFilterDescriptor(dnnfd + 1);
cudnnStatus = cudnnCreateFilterDescriptor(dnnfd + 2);
cudnnStatus = cudnnSetFilter4dDescriptor(dnnfd[0], CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 8, 1, 5, 5);
cudnnStatus = cudnnSetFilter4dDescriptor(dnnfd[1], CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 8, 5, 5);
cudnnStatus = cudnnSetFilter4dDescriptor(dnnfd[2], CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 10, 16, 7, 7);
cudnnStatus = cudnnCreateConvolutionDescriptor(dnncd + 0);
cudnnStatus = cudnnCreateConvolutionDescriptor(dnncd + 1);
cudnnStatus = cudnnSetConvolution2dDescriptor(dnncd[0], 2, 2, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT);
cudnnStatus = cudnnSetConvolution2dDescriptor(dnncd[1], 0, 0, 1, 1, 1, 1, CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT);
cudnnStatus = cudnnCreatePoolingDescriptor(&dnnpd);
cudnnStatus = cudnnSetPooling2dDescriptor(dnnpd, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2);//AVERAGE_COUNT_INCLUDE_PADDING
cudnnStatus = cudnnGetConvolutionForwardAlgorithm(dnnh, dnntd[0], dnnfd[0], dnncd[0], dnntd[1], CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, dnnalgo + 0);
cudnnStatus = cudnnGetConvolutionForwardAlgorithm(dnnh, dnntd[2], dnnfd[1], dnncd[0], dnntd[3], CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, dnnalgo + 1);
cudnnStatus = cudnnGetConvolutionForwardAlgorithm(dnnh, dnntd[4], dnnfd[2], dnncd[1], dnntd[5], CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, dnnalgo + 2);
n = 0;
return 0;
Error:
hipFree(act);
hipFree(para);
return 1;
}
int cudacalc(float rate,float decay,float *loss) {
hipError_t cudaStatus;
cudnnStatus_t cudnnStatus;
FILE *fi;
float alpha, beta, alpha2;
alpha = 1.0f;
beta = 0.0f;
alpha2 = -1.0f;
*loss = 0.0f;
int i, j, k, l;
for (i = n * 10 + 0; i < n * 10 + 450; i++) {
cudaStatus = hipMemcpy(act[0], bufferd + 28 * 28 * 128 * i, (28 * 28 * 128) * sizeof(float), hipMemcpyDeviceToDevice);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[0], act[0], dnnfd[0], para[0], dnncd[0], dnnalgo[0], NULL, 0, &beta, dnntd[1], act[1]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[6], para[1], &alpha, dnntd[1], act[1]);
cudnnStatus = cudnnActivationForward(dnnh, dnnad, &alpha, dnntd[1], act[1], &beta, dnntd[1], act[2]);
cudnnStatus = cudnnPoolingForward(dnnh, dnnpd, &alpha, dnntd[1], act[2], &beta, dnntd[2], act[3]);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[2], act[3], dnnfd[1], para[2], dnncd[0], dnnalgo[1], NULL, 0, &beta, dnntd[3], act[4]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[7], para[3], &alpha, dnntd[3], act[4]);
cudnnStatus = cudnnActivationForward(dnnh, dnnad, &alpha, dnntd[3], act[4], &beta, dnntd[3], act[5]);
cudnnStatus = cudnnPoolingForward(dnnh, dnnpd, &alpha, dnntd[3], act[5], &beta, dnntd[4], act[6]);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[4], act[6], dnnfd[2], para[4], dnncd[1], dnnalgo[2], NULL, 0, &beta, dnntd[5], act[7]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[8], para[5], &alpha, dnntd[5], act[7]);
cudnnStatus = cudnnSoftmaxForward(dnnh, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, dnntd[5], act[7], &beta, dnntd[5], act[7]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha2, dnntd[5], bufferd + 28 * 28 * 60000 + i * 128 * 10, &alpha, dnntd[5], act[7]);
cudnnStatus = cudnnConvolutionBackwardBias(dnnh, &rate, dnntd[5], act[7], &decay, dnntd[8], para[5]);
cudnnStatus = cudnnConvolutionBackwardData(dnnh, &alpha, dnnfd[2], para[4], dnntd[5], act[7], dnncd[1], CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, NULL, 0, &beta, dnntd[4], grad[3]);
cudnnStatus = cudnnConvolutionBackwardFilter(dnnh, &rate, dnntd[4], act[6], dnntd[5], act[7], dnncd[1], CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, NULL, 0, &decay, dnnfd[2], para[4]);
cudnnStatus = cudnnPoolingBackward(dnnh, dnnpd, &alpha, dnntd[4], act[6], dnntd[4], grad[3], dnntd[3], act[5], &beta, dnntd[3], grad[2]);
cudnnStatus = cudnnActivationBackward(dnnh, dnnad, &alpha, dnntd[3], act[5], dnntd[3], grad[2], dnntd[3], act[4], &beta, dnntd[3], grad[2]);
cudnnStatus = cudnnConvolutionBackwardBias(dnnh, &rate, dnntd[3], grad[2], &decay, dnntd[7], para[3]);
cudnnStatus = cudnnConvolutionBackwardData(dnnh, &alpha, dnnfd[1], para[2], dnntd[3], grad[2], dnncd[0], CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, NULL, 0, &beta, dnntd[2], grad[1]);
cudnnStatus = cudnnConvolutionBackwardFilter(dnnh, &rate, dnntd[2], act[3], dnntd[3], grad[2], dnncd[0], CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, NULL, 0, &decay, dnnfd[1], para[2]);
cudnnStatus = cudnnPoolingBackward(dnnh, dnnpd, &alpha, dnntd[2], act[3], dnntd[2], grad[1], dnntd[1], act[2], &beta, dnntd[1], grad[0]);
cudnnStatus = cudnnActivationBackward(dnnh, dnnad, &alpha, dnntd[1], act[2], dnntd[1], grad[0], dnntd[1], act[1], &beta, dnntd[1], grad[0]);
cudnnStatus = cudnnConvolutionBackwardBias(dnnh, &rate, dnntd[1], grad[0], &decay, dnntd[6], para[1]);
cudnnStatus = cudnnConvolutionBackwardFilter(dnnh, &rate, dnntd[0], act[0], dnntd[1], grad[0], dnncd[0], CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, NULL, 0, &decay, dnnfd[0], para[0]);
// cudaStatus = hipMemcpy(buffer, para[0], (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10) * sizeof(float), hipMemcpyDeviceToHost);
}
n++;
//if (n == 45)
n = 0;
cudaStatus = hipMemcpy(act[0], bufferd + 28 * 28 * 128 * 450, (28 * 28 * 128) * sizeof(float), hipMemcpyDeviceToDevice);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[0], act[0], dnnfd[0], para[0], dnncd[0], dnnalgo[0], NULL, 0, &beta, dnntd[1], act[1]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[6], para[1], &alpha, dnntd[1], act[1]);
cudnnStatus = cudnnActivationForward(dnnh, dnnad, &alpha, dnntd[1], act[1], &beta, dnntd[1], act[2]);
cudnnStatus = cudnnPoolingForward(dnnh, dnnpd, &alpha, dnntd[1], act[2], &beta, dnntd[2], act[3]);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[2], act[3], dnnfd[1], para[2], dnncd[0], dnnalgo[1], NULL, 0, &beta, dnntd[3], act[4]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[7], para[3], &alpha, dnntd[3], act[4]);
cudnnStatus = cudnnActivationForward(dnnh, dnnad, &alpha, dnntd[3], act[4], &beta, dnntd[3], act[5]);
cudnnStatus = cudnnPoolingForward(dnnh, dnnpd, &alpha, dnntd[3], act[5], &beta, dnntd[4], act[6]);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[4], act[6], dnnfd[2], para[4], dnncd[1], dnnalgo[2], NULL, 0, &beta, dnntd[5], act[7]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[8], para[5], &alpha, dnntd[5], act[7]);
cudnnStatus = cudnnSoftmaxForward(dnnh, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, dnntd[5], act[7], &beta, dnntd[5], act[7]);
// cudnnStatus = cudnnAddTensor(dnnh, &alpha2, dnntd[5], bufferd + 28 * 28 * 60000 + i * 128 * 10, &alpha, dnntd[5], act[7]);
cudaStatus = hipMemcpy(buffer, para[0], (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10) * sizeof(float), hipMemcpyDeviceToHost);
if (!fopen_s(&fi, "D:/files/data/fig", "wb")) {
fwrite(buffer, sizeof(float), (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10), fi);
fclose(fi);
}
cudaStatus = hipMemcpy(buffer, act[7], (10) * 128 * sizeof(float), hipMemcpyDeviceToHost);
for (i = 0; i < 128 * 10; i++) {
*loss -= buffer[28 * 28 * 60000 + 450 * 128 * 10 + i] * log10(buffer[i]);
}
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (l = 0; l < 10; l++) {
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 0] = buffer[(i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 1] = buffer[(i * 4 + j / 8) * 10 + l] * 0;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 2] = buffer[(i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 3] = buffer[(i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 4] = buffer[(i * 4 + j / 8) * 10 + l] * 0;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 5] = buffer[(i * 4 + j / 8) * 10 + l] * 255;
}
}
}
cudaStatus = hipMemcpy(buffer, act[2], (8*28*28) * 128 * sizeof(float), hipMemcpyDeviceToHost);
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (k = 0; k < 28; k++) {
for (l = 0; l < 28; l++) {
data[((i * 30 + k + 2) * 1024 + j * 28 + l) * 3] = min(buffer[(i * 32 + j) * 28 * 28 + k * 28 + l], 2.0f) * 127;
data[((i * 30 + k + 2) * 1024 + j * 28 + l) * 3 + 1] = min(buffer[(i * 32 + j) * 28 * 28 + k * 28 + l], 2.0f) * 127;
data[((i * 30 + k + 2) * 1024 + j * 28 + l) * 3 + 2] = min(buffer[(i * 32 + j) * 28 * 28 + k * 28 + l], 2.0f) * 127;
}
}
}
}
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (l = 0; l < 10; l++) {
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 0] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 1] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 2] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 0;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 3] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 4] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 5] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 0;
}
}
}
if (cudaStatus != hipSuccess || cudnnStatus != hipSuccess) {
return 1;
}
return 0;
}
int cudafin(void) {
hipError_t cudaStatus;;
cudnnDestroy(dnnh);
hipFree(act[0]);
hipFree(para[0]);
free(data);
free(buffer);
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
return 1;
}
return 0;
} | e385d9602bc1d4242450911b59a62e34953cc40a.cu | #include "cal.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_gl_interop.h>
#include <cudnn.h>
#define Dt 0.000005f
float *act[8] = { NULL };
float *para[6] = { NULL };
float *grad[4] = { NULL };
unsigned char *data = NULL;
float *buffer = NULL;
float *bufferd = NULL;
int n;
cudaGraphicsResource *cuda_vbo_resource;
cudnnHandle_t dnnh;
cudnnActivationDescriptor_t dnnad;
cudnnTensorDescriptor_t dnntd[9];
cudnnFilterDescriptor_t dnnfd[3];
cudnnConvolutionDescriptor_t dnncd[2];
cudnnConvolutionFwdAlgo_t dnnalgo[3];
cudnnPoolingDescriptor_t dnnpd;
int cudainit(HWND hWnd) {
cudaError_t cudaStatus;
cudnnStatus_t cudnnStatus;
int i, j, k, l;
float x;
float alpha, beta;
alpha = 1.0;
beta = 0.0;
FILE *fi;
cudaStatus = cudaSetDevice(0);
cudaStatus = cudaMalloc(act, (28 * 28 + 8 * 28 * 28 + 8 * 28 * 28 + 8 * 14 * 14 + 16 * 14 * 14 + 16 * 14 * 14 + 16 * 7 * 7 + 10) * sizeof(float) * 128);
act[1] = act[0] + 128 * 28 * 28;
act[2] = act[1] + 128 * 8 * 28 * 28;
act[3] = act[2] + 128 * 8 * 28 * 28;
act[4] = act[3] + 128 * 8 * 14 * 14;
act[5] = act[4] + 128 * 16 * 14 * 14;
act[6] = act[5] + 128 * 16 * 14 * 14;
act[7] = act[6] + 128 * 16 * 7 * 7;
cudaStatus = cudaMalloc(para, (8 * 5 * 5 + 8 * 1 * 1 + 16 * 8 * 5 * 5 + 16 * 1 * 1 + 10 * 16 * 7 * 7 + 10) * sizeof(float));
para[1] = para[0] + 8 * 5 * 5;
para[2] = para[1] + 8 * 1 * 1;
para[3] = para[2] + 16 * 8 * 5 * 5;
para[4] = para[3] + 16 * 1 * 1;
para[5] = para[4] + 10 * 16 * 7 * 7;
cudaStatus = cudaMalloc(grad, (8 * 28 * 28 + 8 * 14 * 14 + 16 * 14 * 14 + 16 * 7 * 7) * 128 * sizeof(float));
grad[1] = grad[0] + 128 * 8 * 28 * 28;
grad[2] = grad[1] + 128 * 8 * 14 * 14;
grad[3] = grad[2] + 128 * 16 * 14 * 14;
cudaStatus = cudaMalloc(&bufferd, (28 * 28 + 10) * 60000 * sizeof(float));
data = (unsigned char*)malloc((28 * 28 * 60000 + 60000) * sizeof(unsigned char));
if (!fopen_s(&fi, "D:/download/train-images.idx3-ubyte", "r")) {
fseek(fi, 16, SEEK_SET);
fread(data, 1, 28 * 28 * 60000, fi);
fclose(fi);
}
if (!fopen_s(&fi, "D:/download/train-labels.idx1-ubyte", "r")) {
fseek(fi, 8, SEEK_SET);
fread(data + 28 * 28 * 60000, 1, 60000, fi);
fclose(fi);
}
buffer = (float*)malloc((28 * 28 + 10) * 60000 * sizeof(float));
for (i = 0; i < 28 * 28 * 60000; i++) {
buffer[i] = (float)(data[i])/256.0f;
}
memset(buffer + 28 * 28 * 60000, 0, 60000 * 10 * sizeof(float));
for (i = 0; i < 60000; i++) {
buffer[28 * 28 * 60000 + i * 10 + data[28 * 28 * 60000 + i]] = 1.0f;
}
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (k = 0; k < 28; k++) {
for (l = 0; l < 28; l++) {
data[((i * 28 + k) * 1024 + j * 28 + l) * 3] = buffer[(i * 32 + j) * 28 * 28 + k * 28 + l] * 256;
data[((i * 28 + k) * 1024 + j * 28 + l) * 3 + 1] = buffer[(i * 32 + j) * 28 * 28 + k * 28 + l] * 256;
data[((i * 28 + k) * 1024 + j * 28 + l) * 3 + 2] = buffer[(i * 32 + j) * 28 * 28 + k * 28 + l] * 256;
}
}
}
}
cudaStatus = cudaMemcpy(bufferd, buffer, (28 * 28 + 10) * 60000 * sizeof(float), cudaMemcpyHostToDevice);
if (!fopen_s(&fi, "D:/files/data/fig", "r")) {
fread(buffer, sizeof(float), (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10), fi);
fclose(fi);
}
else {
for (i = 0; i < 8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10; i++) {
buffer[i] = (rand() / 32768.0f - 0.5f)*0.25f;
}
for (i = 0; i < 8 * 5 * 5; i++) {
buffer[i] = (rand() / 32768.0f - 0.5f)*1.0f;
}
}
cudaStatus = cudaMemcpy(para[0], buffer, (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10) * sizeof(float), cudaMemcpyHostToDevice);
cudnnStatus = cudnnCreate(&dnnh);
cudnnStatus = cudnnCreateActivationDescriptor(&dnnad);
cudnnStatus = cudnnSetActivationDescriptor(dnnad, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0.0);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 0);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 1);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 2);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 3);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 4);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 5);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 6);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 7);
cudnnStatus = cudnnCreateTensorDescriptor(dnntd + 8);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[0], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 1, 28, 28);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[1], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 8, 28, 28);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[2], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 8, 14, 14);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[3], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 16, 14, 14);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[4], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 16, 7, 7);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[5], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 128, 10, 1, 1);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[6], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 8, 1, 1);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[7], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 16, 1, 1);
cudnnStatus = cudnnSetTensor4dDescriptor(dnntd[8], CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1);
cudnnStatus = cudnnCreateFilterDescriptor(dnnfd + 0);
cudnnStatus = cudnnCreateFilterDescriptor(dnnfd + 1);
cudnnStatus = cudnnCreateFilterDescriptor(dnnfd + 2);
cudnnStatus = cudnnSetFilter4dDescriptor(dnnfd[0], CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 8, 1, 5, 5);
cudnnStatus = cudnnSetFilter4dDescriptor(dnnfd[1], CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 8, 5, 5);
cudnnStatus = cudnnSetFilter4dDescriptor(dnnfd[2], CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 10, 16, 7, 7);
cudnnStatus = cudnnCreateConvolutionDescriptor(dnncd + 0);
cudnnStatus = cudnnCreateConvolutionDescriptor(dnncd + 1);
cudnnStatus = cudnnSetConvolution2dDescriptor(dnncd[0], 2, 2, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT);
cudnnStatus = cudnnSetConvolution2dDescriptor(dnncd[1], 0, 0, 1, 1, 1, 1, CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT);
cudnnStatus = cudnnCreatePoolingDescriptor(&dnnpd);
cudnnStatus = cudnnSetPooling2dDescriptor(dnnpd, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2);//AVERAGE_COUNT_INCLUDE_PADDING
cudnnStatus = cudnnGetConvolutionForwardAlgorithm(dnnh, dnntd[0], dnnfd[0], dnncd[0], dnntd[1], CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, dnnalgo + 0);
cudnnStatus = cudnnGetConvolutionForwardAlgorithm(dnnh, dnntd[2], dnnfd[1], dnncd[0], dnntd[3], CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, dnnalgo + 1);
cudnnStatus = cudnnGetConvolutionForwardAlgorithm(dnnh, dnntd[4], dnnfd[2], dnncd[1], dnntd[5], CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, dnnalgo + 2);
n = 0;
return 0;
Error:
cudaFree(act);
cudaFree(para);
return 1;
}
int cudacalc(float rate,float decay,float *loss) {
cudaError cudaStatus;
cudnnStatus_t cudnnStatus;
FILE *fi;
float alpha, beta, alpha2;
alpha = 1.0f;
beta = 0.0f;
alpha2 = -1.0f;
*loss = 0.0f;
int i, j, k, l;
for (i = n * 10 + 0; i < n * 10 + 450; i++) {
cudaStatus = cudaMemcpy(act[0], bufferd + 28 * 28 * 128 * i, (28 * 28 * 128) * sizeof(float), cudaMemcpyDeviceToDevice);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[0], act[0], dnnfd[0], para[0], dnncd[0], dnnalgo[0], NULL, 0, &beta, dnntd[1], act[1]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[6], para[1], &alpha, dnntd[1], act[1]);
cudnnStatus = cudnnActivationForward(dnnh, dnnad, &alpha, dnntd[1], act[1], &beta, dnntd[1], act[2]);
cudnnStatus = cudnnPoolingForward(dnnh, dnnpd, &alpha, dnntd[1], act[2], &beta, dnntd[2], act[3]);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[2], act[3], dnnfd[1], para[2], dnncd[0], dnnalgo[1], NULL, 0, &beta, dnntd[3], act[4]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[7], para[3], &alpha, dnntd[3], act[4]);
cudnnStatus = cudnnActivationForward(dnnh, dnnad, &alpha, dnntd[3], act[4], &beta, dnntd[3], act[5]);
cudnnStatus = cudnnPoolingForward(dnnh, dnnpd, &alpha, dnntd[3], act[5], &beta, dnntd[4], act[6]);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[4], act[6], dnnfd[2], para[4], dnncd[1], dnnalgo[2], NULL, 0, &beta, dnntd[5], act[7]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[8], para[5], &alpha, dnntd[5], act[7]);
cudnnStatus = cudnnSoftmaxForward(dnnh, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, dnntd[5], act[7], &beta, dnntd[5], act[7]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha2, dnntd[5], bufferd + 28 * 28 * 60000 + i * 128 * 10, &alpha, dnntd[5], act[7]);
cudnnStatus = cudnnConvolutionBackwardBias(dnnh, &rate, dnntd[5], act[7], &decay, dnntd[8], para[5]);
cudnnStatus = cudnnConvolutionBackwardData(dnnh, &alpha, dnnfd[2], para[4], dnntd[5], act[7], dnncd[1], CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, NULL, 0, &beta, dnntd[4], grad[3]);
cudnnStatus = cudnnConvolutionBackwardFilter(dnnh, &rate, dnntd[4], act[6], dnntd[5], act[7], dnncd[1], CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, NULL, 0, &decay, dnnfd[2], para[4]);
cudnnStatus = cudnnPoolingBackward(dnnh, dnnpd, &alpha, dnntd[4], act[6], dnntd[4], grad[3], dnntd[3], act[5], &beta, dnntd[3], grad[2]);
cudnnStatus = cudnnActivationBackward(dnnh, dnnad, &alpha, dnntd[3], act[5], dnntd[3], grad[2], dnntd[3], act[4], &beta, dnntd[3], grad[2]);
cudnnStatus = cudnnConvolutionBackwardBias(dnnh, &rate, dnntd[3], grad[2], &decay, dnntd[7], para[3]);
cudnnStatus = cudnnConvolutionBackwardData(dnnh, &alpha, dnnfd[1], para[2], dnntd[3], grad[2], dnncd[0], CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, NULL, 0, &beta, dnntd[2], grad[1]);
cudnnStatus = cudnnConvolutionBackwardFilter(dnnh, &rate, dnntd[2], act[3], dnntd[3], grad[2], dnncd[0], CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, NULL, 0, &decay, dnnfd[1], para[2]);
cudnnStatus = cudnnPoolingBackward(dnnh, dnnpd, &alpha, dnntd[2], act[3], dnntd[2], grad[1], dnntd[1], act[2], &beta, dnntd[1], grad[0]);
cudnnStatus = cudnnActivationBackward(dnnh, dnnad, &alpha, dnntd[1], act[2], dnntd[1], grad[0], dnntd[1], act[1], &beta, dnntd[1], grad[0]);
cudnnStatus = cudnnConvolutionBackwardBias(dnnh, &rate, dnntd[1], grad[0], &decay, dnntd[6], para[1]);
cudnnStatus = cudnnConvolutionBackwardFilter(dnnh, &rate, dnntd[0], act[0], dnntd[1], grad[0], dnncd[0], CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, NULL, 0, &decay, dnnfd[0], para[0]);
// cudaStatus = cudaMemcpy(buffer, para[0], (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10) * sizeof(float), cudaMemcpyDeviceToHost);
}
n++;
//if (n == 45)
n = 0;
cudaStatus = cudaMemcpy(act[0], bufferd + 28 * 28 * 128 * 450, (28 * 28 * 128) * sizeof(float), cudaMemcpyDeviceToDevice);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[0], act[0], dnnfd[0], para[0], dnncd[0], dnnalgo[0], NULL, 0, &beta, dnntd[1], act[1]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[6], para[1], &alpha, dnntd[1], act[1]);
cudnnStatus = cudnnActivationForward(dnnh, dnnad, &alpha, dnntd[1], act[1], &beta, dnntd[1], act[2]);
cudnnStatus = cudnnPoolingForward(dnnh, dnnpd, &alpha, dnntd[1], act[2], &beta, dnntd[2], act[3]);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[2], act[3], dnnfd[1], para[2], dnncd[0], dnnalgo[1], NULL, 0, &beta, dnntd[3], act[4]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[7], para[3], &alpha, dnntd[3], act[4]);
cudnnStatus = cudnnActivationForward(dnnh, dnnad, &alpha, dnntd[3], act[4], &beta, dnntd[3], act[5]);
cudnnStatus = cudnnPoolingForward(dnnh, dnnpd, &alpha, dnntd[3], act[5], &beta, dnntd[4], act[6]);
cudnnStatus = cudnnConvolutionForward(dnnh, &alpha, dnntd[4], act[6], dnnfd[2], para[4], dnncd[1], dnnalgo[2], NULL, 0, &beta, dnntd[5], act[7]);
cudnnStatus = cudnnAddTensor(dnnh, &alpha, dnntd[8], para[5], &alpha, dnntd[5], act[7]);
cudnnStatus = cudnnSoftmaxForward(dnnh, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, dnntd[5], act[7], &beta, dnntd[5], act[7]);
// cudnnStatus = cudnnAddTensor(dnnh, &alpha2, dnntd[5], bufferd + 28 * 28 * 60000 + i * 128 * 10, &alpha, dnntd[5], act[7]);
cudaStatus = cudaMemcpy(buffer, para[0], (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10) * sizeof(float), cudaMemcpyDeviceToHost);
if (!fopen_s(&fi, "D:/files/data/fig", "wb")) {
fwrite(buffer, sizeof(float), (8 * 5 * 5 + 8 + 16 * 8 * 5 * 5 + 16 + 10 * 16 * 7 * 7 + 10), fi);
fclose(fi);
}
cudaStatus = cudaMemcpy(buffer, act[7], (10) * 128 * sizeof(float), cudaMemcpyDeviceToHost);
for (i = 0; i < 128 * 10; i++) {
*loss -= buffer[28 * 28 * 60000 + 450 * 128 * 10 + i] * log10(buffer[i]);
}
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (l = 0; l < 10; l++) {
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 0] = buffer[(i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 1] = buffer[(i * 4 + j / 8) * 10 + l] * 0;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 2] = buffer[(i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 3] = buffer[(i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 4] = buffer[(i * 4 + j / 8) * 10 + l] * 0;
data[((i * 30 + 1) * 1024 + j * 28 + l * 2) * 3 + 5] = buffer[(i * 4 + j / 8) * 10 + l] * 255;
}
}
}
cudaStatus = cudaMemcpy(buffer, act[2], (8*28*28) * 128 * sizeof(float), cudaMemcpyDeviceToHost);
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (k = 0; k < 28; k++) {
for (l = 0; l < 28; l++) {
data[((i * 30 + k + 2) * 1024 + j * 28 + l) * 3] = min(buffer[(i * 32 + j) * 28 * 28 + k * 28 + l], 2.0f) * 127;
data[((i * 30 + k + 2) * 1024 + j * 28 + l) * 3 + 1] = min(buffer[(i * 32 + j) * 28 * 28 + k * 28 + l], 2.0f) * 127;
data[((i * 30 + k + 2) * 1024 + j * 28 + l) * 3 + 2] = min(buffer[(i * 32 + j) * 28 * 28 + k * 28 + l], 2.0f) * 127;
}
}
}
}
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (l = 0; l < 10; l++) {
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 0] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 1] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 2] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 0;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 3] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 4] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 255;
data[((i * 30) * 1024 + j * 28 + l * 2) * 3 + 5] = buffer[28 * 28 * 60000 + 450 * 128 * 10 + (i * 4 + j / 8) * 10 + l] * 0;
}
}
}
if (cudaStatus != cudaSuccess || cudnnStatus != cudaSuccess) {
return 1;
}
return 0;
}
int cudafin(void) {
cudaError cudaStatus;;
cudnnDestroy(dnnh);
cudaFree(act[0]);
cudaFree(para[0]);
free(data);
free(buffer);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
return 1;
}
return 0;
} |
86b3cceed486ff1e2e4e0b2472c9b28542004f81.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void process_kernel1(float *input1, float *input2, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=sin(input1[i])+cos(input2[i]);
}
}
__global__ void process_kernel2(float *input, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=log(input[i]);
}
}
__global__ void process_kernel3(float *input, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=sqrt(input[i]);
}
}
int main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
int numElements = 0;
scanf("%d",&numElements);
size_t size = numElements * sizeof(float);
float *h_input1 = (float *)malloc(size);
float *h_input2 = (float *)malloc(size);
float *h_output = (float *)malloc(size);
// Verify that allocations succeeded
if (h_input1 == NULL || h_input2 == NULL || h_output == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//Trying for random values
/*for (int i = 0; i < numElements; ++i)
{
h_input1[i] = rand()/(float)RAND_MAX;
h_input2[i] = rand()/(float)RAND_MAX;
}*/
//taking inputs
printf("Enter input1 elements: \n");
for (int i = 0; i < numElements; ++i)
scanf("%f", &h_input1[i]);
printf("Enter input2 elements: \n");
for (int i = 0; i < numElements; ++i)
scanf("%f", &h_input2[i]);
float *d_input1 = NULL;
err = hipMalloc((void **)&d_input1, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_input2 = NULL;
err = hipMalloc((void **)&d_input2, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_output1 = NULL;
err = hipMalloc((void **)&d_output1, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output2 = NULL;
err = hipMalloc((void **)&d_output2, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output3 = NULL;
err = hipMalloc((void **)&d_output3, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors input1 and input2 in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_input1, h_input1, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector input1 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_input2, h_input2, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector input2 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
dim3 threadsPerBlock1(32,32,1);
dim3 blocksPerGrid1(4,2,2);
hipLaunchKernelGGL(( process_kernel1), dim3(blocksPerGrid1), dim3(threadsPerBlock1), 0, 0, d_input1, d_input2, d_output1, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 threadsPerBlock2(8,8,16);
dim3 blocksPerGrid2(2,8,1);
hipLaunchKernelGGL(( process_kernel2), dim3(blocksPerGrid2), dim3(threadsPerBlock2), 0, 0, d_output1, d_output2, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch process_kernel2 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 threadsPerBlock3(128,8,1);
dim3 blocksPerGrid3(16,1,1);
hipLaunchKernelGGL(( process_kernel3), dim3(blocksPerGrid3), dim3(threadsPerBlock3), 0, 0, d_output2, d_output3, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch process_kernel3 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_output, d_output3, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector output from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
printf("%.2f ",h_output[i]);
}
// Free device global memory
err = hipFree(d_input1);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector input1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_input2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector input2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output1);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector output1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector output2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output3);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector output3 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_input1);
free(h_input2);
free(h_output);
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
} | 86b3cceed486ff1e2e4e0b2472c9b28542004f81.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void process_kernel1(float *input1, float *input2, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=sin(input1[i])+cos(input2[i]);
}
}
__global__ void process_kernel2(float *input, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=log(input[i]);
}
}
__global__ void process_kernel3(float *input, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=sqrt(input[i]);
}
}
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int numElements = 0;
scanf("%d",&numElements);
size_t size = numElements * sizeof(float);
float *h_input1 = (float *)malloc(size);
float *h_input2 = (float *)malloc(size);
float *h_output = (float *)malloc(size);
// Verify that allocations succeeded
if (h_input1 == NULL || h_input2 == NULL || h_output == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//Trying for random values
/*for (int i = 0; i < numElements; ++i)
{
h_input1[i] = rand()/(float)RAND_MAX;
h_input2[i] = rand()/(float)RAND_MAX;
}*/
//taking inputs
printf("Enter input1 elements: \n");
for (int i = 0; i < numElements; ++i)
scanf("%f", &h_input1[i]);
printf("Enter input2 elements: \n");
for (int i = 0; i < numElements; ++i)
scanf("%f", &h_input2[i]);
float *d_input1 = NULL;
err = cudaMalloc((void **)&d_input1, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_input2 = NULL;
err = cudaMalloc((void **)&d_input2, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_output1 = NULL;
err = cudaMalloc((void **)&d_output1, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output2 = NULL;
err = cudaMalloc((void **)&d_output2, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output3 = NULL;
err = cudaMalloc((void **)&d_output3, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors input1 and input2 in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_input1, h_input1, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector input1 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_input2, h_input2, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector input2 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
dim3 threadsPerBlock1(32,32,1);
dim3 blocksPerGrid1(4,2,2);
process_kernel1<<<blocksPerGrid1, threadsPerBlock1>>>(d_input1, d_input2, d_output1, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 threadsPerBlock2(8,8,16);
dim3 blocksPerGrid2(2,8,1);
process_kernel2<<<blocksPerGrid2, threadsPerBlock2>>>(d_output1, d_output2, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel2 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 threadsPerBlock3(128,8,1);
dim3 blocksPerGrid3(16,1,1);
process_kernel3<<<blocksPerGrid3, threadsPerBlock3>>>(d_output2, d_output3, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel3 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_output, d_output3, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector output from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
printf("%.2f ",h_output[i]);
}
// Free device global memory
err = cudaFree(d_input1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector input1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_input2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector input2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector output1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector output2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output3);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector output3 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_input1);
free(h_input2);
free(h_output);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
} |
128c7b514eef389878cb04fc776f46f78f4405b3.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Extended for use in CS 374 at Calvin College by Joel C. Adams.
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
#include <omp.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C.
* The 3 vectors have the same number of elements numElements.
*/
__global__
void vectorSquare(const float *A, float *C, unsigned long numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i]*A[i];
}
}
void checkErr(hipError_t err, const char* msg)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s (error code %d: '%s')!\n", msg, err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* Host main routine
*/
int main(int argc, char** argv)
{
// timing variables
double startCuda, stopCuda;
double startSeq, stopSeq;
double startHostDev, stopHostDev;
double startComp, stopComp;
double startDevHost, stopDevHost;
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
unsigned long numElements = 50000;
if (argc == 2) {
numElements = strtoul( argv[1] , 0, 10 );
}
size_t size = numElements * sizeof(float);
printf("[Vector addition of %lu elements]\n", numElements);
// Allocate the host input vectors A & B
float * h_A = (float *)malloc(size);
// Allocate the host output vector C
float * h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
}
// 1a. Allocate the device input vectors A & B
float * d_A = NULL;
err = hipMalloc((void **)&d_A, size);
checkErr(err, "Failed to allocate device vector A");
// 1.b. Allocate the device output vector C
float * d_C = NULL;
err = hipMalloc((void **)&d_C, size);
checkErr(err, "Failed to allocate device vector C");
// 2. Copy the host input vectors A and B in host memory
// to the device input vectors in device memory
startCuda = omp_get_wtime();
startHostDev = omp_get_wtime();
//printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
checkErr(err, "Failed to copy device vector A from host to device");
stopHostDev = omp_get_wtime();
startComp = omp_get_wtime();
// 3. Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorSquare)
, dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_C, numElements);
err = hipGetLastError();
checkErr(err, "Failed to launch vectorSquare kernel");
stopComp = omp_get_wtime();
startDevHost = omp_get_wtime();
// 4. Copy the device result vector in device memory
// to the host result vector in host memory.
//printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
checkErr(err, "Failed to copy vector C from device to host");
stopDevHost = omp_get_wtime();
stopCuda = omp_get_wtime();
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i]*h_A[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("CUDA test PASSED\n");
printf("CUDA host-to-dev time: %lf\nCUDA comp time: %lf\nCUDA dev-to-host time: %lf\n", stopHostDev - startHostDev,
stopComp - startComp,
stopDevHost - startDevHost);
printf("CUDA total time: %lf\n", stopCuda-startCuda);
// Free device global memory
err = hipFree(d_A);
checkErr(err, "Failed to free device vector A");
err = hipFree(d_C);
checkErr(err, "Failed to free device vector C");
startSeq = omp_get_wtime();
// repeat the computation sequentially
for (int i = 0; i < numElements; ++i)
{
h_C[i] = h_A[i]*h_A[i];
}
stopSeq = omp_get_wtime();
// verify again
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i]*h_A[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("\nNormal test PASSED\n");
printf("Normal time: %lf\n", stopSeq-startSeq);
// Free host memory
free(h_A);
free(h_C);
// Reset the device and exit
err = hipDeviceReset();
checkErr(err, "Unable to reset device");
printf("Done\n");
return 0;
}
| 128c7b514eef389878cb04fc776f46f78f4405b3.cu | /**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Extended for use in CS 374 at Calvin College by Joel C. Adams.
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
#include <omp.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C.
* The 3 vectors have the same number of elements numElements.
*/
__global__
void vectorSquare(const float *A, float *C, unsigned long numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i]*A[i];
}
}
void checkErr(cudaError_t err, const char* msg)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s (error code %d: '%s')!\n", msg, err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
* Host main routine
*/
int main(int argc, char** argv)
{
// timing variables
double startCuda, stopCuda;
double startSeq, stopSeq;
double startHostDev, stopHostDev;
double startComp, stopComp;
double startDevHost, stopDevHost;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
unsigned long numElements = 50000;
if (argc == 2) {
numElements = strtoul( argv[1] , 0, 10 );
}
size_t size = numElements * sizeof(float);
printf("[Vector addition of %lu elements]\n", numElements);
// Allocate the host input vectors A & B
float * h_A = (float *)malloc(size);
// Allocate the host output vector C
float * h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
}
// 1a. Allocate the device input vectors A & B
float * d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
checkErr(err, "Failed to allocate device vector A");
// 1.b. Allocate the device output vector C
float * d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
checkErr(err, "Failed to allocate device vector C");
// 2. Copy the host input vectors A and B in host memory
// to the device input vectors in device memory
startCuda = omp_get_wtime();
startHostDev = omp_get_wtime();
//printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
checkErr(err, "Failed to copy device vector A from host to device");
stopHostDev = omp_get_wtime();
startComp = omp_get_wtime();
// 3. Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorSquare
<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, numElements);
err = cudaGetLastError();
checkErr(err, "Failed to launch vectorSquare kernel");
stopComp = omp_get_wtime();
startDevHost = omp_get_wtime();
// 4. Copy the device result vector in device memory
// to the host result vector in host memory.
//printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
checkErr(err, "Failed to copy vector C from device to host");
stopDevHost = omp_get_wtime();
stopCuda = omp_get_wtime();
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i]*h_A[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("CUDA test PASSED\n");
printf("CUDA host-to-dev time: %lf\nCUDA comp time: %lf\nCUDA dev-to-host time: %lf\n", stopHostDev - startHostDev,
stopComp - startComp,
stopDevHost - startDevHost);
printf("CUDA total time: %lf\n", stopCuda-startCuda);
// Free device global memory
err = cudaFree(d_A);
checkErr(err, "Failed to free device vector A");
err = cudaFree(d_C);
checkErr(err, "Failed to free device vector C");
startSeq = omp_get_wtime();
// repeat the computation sequentially
for (int i = 0; i < numElements; ++i)
{
h_C[i] = h_A[i]*h_A[i];
}
stopSeq = omp_get_wtime();
// verify again
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i]*h_A[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("\nNormal test PASSED\n");
printf("Normal time: %lf\n", stopSeq-startSeq);
// Free host memory
free(h_A);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
checkErr(err, "Unable to reset device");
printf("Done\n");
return 0;
}
|
d37d7181604eb333bd2e9f762bc87d00aa2405f9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_util.h"
#include <hip/hip_runtime.h>
#define MUL24(x, y) ((x) * (y))
#define WARP_SIZE 32
#define ELTWISE_FLAT_THREADS_X 128
#define ELTWISE_THREADS_X 32
#define ELTWISE_THREADS_Y 8
#define ADD_VEC_THREADS_X 64
#define ADD_VEC_THREADS_Y 4
#define NUM_BLOCKS_MAX 65535
#define NUM_SUM_COLS_THREADS_PER_BLOCK 128
#define AGG_SHORT_ROWS_THREADS_X 32
#define AGG_SHORT_ROWS_THREADS_Y 8
#define AGG_SHORT_ROWS_LOOPS_Y 32
#define AWR_NUM_THREADS 256
#define AWR_LOG_NUM_WARPS 3
#define LOG_WARP_SIZE 5
#define AWR_NUM_WARPS AWR_NUM_THREADS / WARP_SIZE
#define LOGREG_GRAD_THREADS_X 32
#define LOGREG_GRAD_THREADS_Y 4
#define DP_BLOCKSIZE 512
// device
template<typename T>
__device__ T shfl_down(T a, int b, int c = WARP_SIZE) {
#if __CUDA_ARCH__ >= 300
return __shfl_down(a, b, c);
#else
return 0;
#endif
}
__device__
__constant__ float kDevEps = (float) PRECISION_EPS;
// global
class UnaryOp {
public:
class Identity {
public:
__device__ inline float operator()(const float a) const {
return a;
}
};
class Validate {
public:
__device__ inline float operator()(const float a) const {
if (-kDevEps < a && a < kDevEps) return 0;
return a;
}
};
class Sign {
public:
__device__ inline float operator()(const float a) const {
return (a > kDevEps) - (a < -kDevEps);
}
};
class Sqrt {
public:
__device__ inline float operator()(const float a) const {
return sqrtf(a);
}
};
class Log {
public:
__device__ inline float operator()(const float a) const {
return __logf(a);
}
};
class Exp {
public:
__device__ inline float operator()(const float a) const {
return __expf(a);
}
};
class Sigmoid {
public:
__device__ inline float operator()(const float a) const {
return __fdividef(1.0f, 1.0f + __expf(-a));
}
};
class Scalar {
private:
const float scalar;
public:
Scalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return scalar;
}
};
class AddScalar {
private:
const float scalar;
public:
AddScalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return a + scalar;
}
};
class MultByScalar {
private:
const float scalar;
public:
MultByScalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return a * scalar;
}
};
class DivByScalar {
private:
const float scalar;
public:
DivByScalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return __fdividef(a, scalar);
}
};
class BiggerThanScalar {
private:
const float scalar;
public:
BiggerThanScalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return a > scalar;
}
};
};
class BinaryOp {
public:
class Second {
public:
__device__ inline float operator()(const float a, const float b) const {
return b;
}
};
class Add {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
};
class Subtract {
public:
__device__ inline float operator()(const float a, const float b) const {
return a - b;
}
};
class Multiply {
public:
__device__ inline float operator()(const float a, const float b) const {
return a * b;
}
};
class Divide {
public:
__device__ inline float operator()(const float a, const float b) const {
return __fdividef(a, b);
}
};
class SigmDer {
public:
__device__ inline float operator()(const float a, const float b) const {
return a * b * (1 - b);
}
};
};
class Aggs {
public:
class Sum {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
__device__ inline float getBaseValue() {
return 0;
}
};
class Max {
public:
__device__ inline float operator()(const float a, const float b) const {
return a > b ? a : b;
}
__device__ inline float getBaseValue() {
return -2e38;
}
};
};
/* ------- Unary operations ------- */
template<class Op>
__global__ void kEltwiseUnaryOpFlat(const float* a, float* const dest, int numElements, Op op) {
const int idxX = blockIdx.x * ELTWISE_FLAT_THREADS_X + threadIdx.x;
for (int x = idxX; x < numElements; x += gridDim.x * ELTWISE_FLAT_THREADS_X) {
dest[x] = op(a[x]);
}
}
template <class Op>
void _applyUnaryOp(MatGPU &mat, Op op) {
if (mat.empty()) return;
mexAssert(mat.stride_ == 1, "In _applyUnaryOp stride_ should be 1");
int _numElements = (int) (mat.size1_ * mat.size2_);
hipStream_t stream = MatGPU::_defaultStream;
dim3 threads = dim3(ELTWISE_FLAT_THREADS_X);
dim3 blocks = dim3(::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X)));
hipLaunchKernelGGL(( kEltwiseUnaryOpFlat<Op>), dim3(blocks), dim3(threads), 0, stream, mat.data_, mat.data_, _numElements, op);
mexAssert(hipGetLastError() == hipSuccess, "kEltwiseUnaryOpFlat: kernel execution failed");
}
void cuda_validate(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Validate());
}
void cuda_sign(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Sign());
}
void cuda_sqrt(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Sqrt());
}
void cuda_log(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Log());
}
void cuda_exp(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Exp());
}
void cuda_sigmoid(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Sigmoid());
}
/* ------- Unary operations with scalars ------- */
void cuda_assval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::Scalar(val));
}
void cuda_addval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::AddScalar(val));
}
void cuda_subval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::AddScalar(-val));
}
void cuda_multval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::MultByScalar(val));
}
void cuda_divval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::DivByScalar(val));
}
/* ------- Binary operations ------- */
template<class Op>
__global__ void kEltwiseBinaryOpFlat(const float* a, const float* b, float* const dest, int numElements, Op op) {
const int idxX = blockIdx.x * ELTWISE_FLAT_THREADS_X + threadIdx.x;
for (int x = idxX; x < numElements; x += gridDim.x * ELTWISE_FLAT_THREADS_X) {
dest[x] = op(a[x], b[x]);
}
}
template <class Op>
void _applyBinaryOp(MatGPU& mat, const MatGPU& b, Op op) {
if (mat.empty()) return;
mexAssert(mat.stride_ == 1 && b.stride_ == 1, "In _applyBinaryOp strides should be 1");
mexAssert(mat.order_ == b.order_, "In _applyBinaryOp orders should be the same");
mexAssert(mat.size1_ == b.size1_ && mat.size2_ == b.size2_,
"In _applyBinaryOp the sizes of matrices do not correspond");
int _numElements = (int) (mat.size1_ * mat.size2_);
hipStream_t stream = MatGPU::_defaultStream;
dim3 threads = dim3(ELTWISE_FLAT_THREADS_X);
dim3 blocks = dim3(::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X)));
hipLaunchKernelGGL(( kEltwiseBinaryOpFlat<Op>), dim3(blocks), dim3(threads), 0, stream, mat.data_, b.data_, mat.data_, _numElements, op);
mexAssert(hipGetLastError() == hipSuccess, "kEltwiseBinaryOpFlat: kernel execution failed");
}
void cuda_addmat(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::Add());
}
void cuda_submat(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::Subtract());
}
void cuda_multmat(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::Multiply());
}
void cuda_divmat(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::Divide());
}
void cuda_sigmder(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::SigmDer());
}
/* ------- Conditional operations ------- */
template<class CondOp, class Op>
__global__ void kEltwiseCondOpFlat(const float* a, const float* condmat, bool incase,
float* const dest, int numElements, CondOp condOp, Op op) {
const int idxX = blockIdx.x * ELTWISE_FLAT_THREADS_X + threadIdx.x;
if (incase) {
for (int x = idxX; x < numElements; x += gridDim.x * ELTWISE_FLAT_THREADS_X) {
if (condOp(condmat[x])) {
dest[x] = op(a[x]);
}
}
} else {
for (int x = idxX; x < numElements; x += gridDim.x * ELTWISE_FLAT_THREADS_X) {
if (!condOp(condmat[x])) {
dest[x] = op(a[x]);
}
}
}
}
template <class CondOp, class Op>
void _applyCondOp(MatGPU& mat, const MatGPU& condmat, bool incase, CondOp condOp, Op op) {
if (mat.empty()) return;
mexAssert(mat.stride_ == 1 && condmat.stride_ == 1, "In _applyCondOp strides should be 1");
mexAssert(mat.order_ == condmat.order_, "In _applyCondOp orders should be the same");
mexAssert(mat.size1_ == condmat.size1_ && mat.size2_ == condmat.size2_,
"In _applyCondOp the sizes of matrices do not correspond");
int _numElements = (int) (mat.size1_ * mat.size2_);
hipStream_t stream = MatGPU::_defaultStream;
dim3 threads = dim3(ELTWISE_FLAT_THREADS_X);
dim3 blocks = dim3(::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X)));
hipLaunchKernelGGL(( kEltwiseCondOpFlat<CondOp, Op>), dim3(blocks), dim3(threads), 0, stream,
mat.data_, condmat.data_, incase, mat.data_, _numElements, condOp, op);
mexAssert(hipGetLastError() == hipSuccess, "kEltwiseCondOpFlat: kernel execution failed");
}
void cuda_condassign(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val) {
_applyCondOp(mat, condmat, incase, UnaryOp::BiggerThanScalar(threshold), UnaryOp::Scalar(val));
}
void cuda_condadd(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val) {
_applyCondOp(mat, condmat, incase, UnaryOp::BiggerThanScalar(threshold), UnaryOp::AddScalar(val));
}
void cuda_condmult(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val) {
_applyCondOp(mat, condmat, incase, UnaryOp::BiggerThanScalar(threshold), UnaryOp::MultByScalar(val));
}
/* ------- Softmax derivatives ------- */
__global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, int numCases, int numOut) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
float v = 0;
for (int j = 0; j < numOut; j++) {
v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]);
}
v *= y_l[tidx];
dE_dx_l[tidx] = v;
}
}
void computeSoftmaxGrad(const MatGPU& acts, const MatGPU& actsGrad, MatGPU& target) {
int numCases = (int) acts.size1_;
int numOut = (int) acts.size2_;
mexAssert(acts.stride_ == 1 && actsGrad.stride_ == 1 && target.stride_ == 1, "csg2");
mexAssert(acts.size1_ == actsGrad.size1_ && acts.size2_ == actsGrad.size2_, "csg1");
mexAssert(acts.size1_ == target.size1_ && acts.size2_ == target.size2_, "csg3");
hipStream_t stream = MatGPU::_defaultStream;
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
hipLaunchKernelGGL(( kSoftmaxGrad), dim3(blocks), dim3(threads), 0, stream, actsGrad.data_, acts.data_, target.data_, numCases, numOut);
mexAssert(hipGetLastError() == hipSuccess, "computeSoftmaxGrad: kernel execution failed");
}
/* ------- Transposition ------- */
/*
* dest here is assumed to be "not transposed" -- height and width correspond to it.
*/
template<class Op, bool checkBounds>
__global__ void kEltwiseUnaryOpTrans(const float* a, float* const dest,
int height, int width, int strideA, int strideDest, Op op) {
__shared__ float shmem[ELTWISE_THREADS_X][ELTWISE_THREADS_X + 1];
for (int by = ELTWISE_THREADS_X * blockIdx.y; by < height; by += ELTWISE_THREADS_X * gridDim.y) {
for (int bx = ELTWISE_THREADS_X * blockIdx.x; bx < width; bx += ELTWISE_THREADS_X * gridDim.x) {
const int readX = by + threadIdx.x;
const int readY = bx + threadIdx.y;
for (int y = 0; y < ELTWISE_THREADS_X; y+= ELTWISE_THREADS_Y) {
if (!checkBounds || (readX < height && readY + y < width)) {
shmem[threadIdx.x][threadIdx.y + y] = op(a[(readY + y) * strideA + readX]);
}
}
__syncthreads();
const int writeX = bx + threadIdx.x;
const int writeY = by + threadIdx.y;
for (int y = 0; y < ELTWISE_THREADS_X; y+= ELTWISE_THREADS_Y) {
if(!checkBounds || (writeX < width && writeY + y < height)) {
dest[(writeY + y) * strideDest + writeX] = shmem[threadIdx.y + y][threadIdx.x];
}
}
__syncthreads();
}
}
}
void cuda_trans(const MatGPU &mat, MatGPU &target) {
mexAssert(mat.order_ == target.order_, "In cuda_trans orders should be the same");
mexAssert(mat.size1_ == target.size2_ && mat.size2_ == target.size1_,
"In cuda_trans sizes does not correspond to each other");
int width = (int) target.size1_;
int height = (int) target.size2_;
int mat_stride = (int) mat.size1_;
int target_stride = (int) target.size1_;
if (mat.order_ == true) {
mat_stride = (int) mat.size2_;
target_stride = (int) target.size2_;
}
hipStream_t stream = MatGPU::_defaultStream;
dim3 blocks(::min(NUM_BLOCKS_MAX, DIVUP(width, ELTWISE_THREADS_X)),
::min(NUM_BLOCKS_MAX, DIVUP(height, ELTWISE_THREADS_Y)));
dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y);
bool checkBounds = !(width % ELTWISE_THREADS_X == 0 && height % ELTWISE_THREADS_X == 0);
if (checkBounds) {
hipLaunchKernelGGL(( kEltwiseUnaryOpTrans<UnaryOp::Identity, true>), dim3(blocks), dim3(threads), 0, stream,
mat.data_, target.data_, height, width, mat_stride, target_stride, UnaryOp::Identity());
} else {
hipLaunchKernelGGL(( kEltwiseUnaryOpTrans<UnaryOp::Identity, false>), dim3(blocks), dim3(threads), 0, stream,
mat.data_, target.data_, height, width, mat_stride, target_stride, UnaryOp::Identity());
}
mexAssert(hipGetLastError() == hipSuccess, "kEltwiseUnaryOpTrans: kernel execution failed");
}
/* ------- Matrix <-> Vector operations ------- */
/*
* Matrix in ROW-MAJOR order!
*/
template <class Op>
__global__ void kRowVectorOp(const float* mat, const float* vec, float* const tgtMat,
int width, int height, int matStride, int tgtStride, Op op) {
__shared__ float shVec[ADD_VEC_THREADS_X];
const int bx = ADD_VEC_THREADS_X * blockIdx.x;
const int by = ADD_VEC_THREADS_Y * blockIdx.y;
for (int x = bx; x < width; x += gridDim.x * ADD_VEC_THREADS_X) {
__syncthreads();
if (x + threadIdx.x < width && threadIdx.y == 0) {
shVec[threadIdx.x] = vec[x + threadIdx.x];
}
__syncthreads();
if (x + threadIdx.x < width) {
for (int y = by + threadIdx.y; y < height; y += gridDim.y * ADD_VEC_THREADS_Y) {
tgtMat[y * tgtStride + x + threadIdx.x] = op(mat[y * matStride + x + threadIdx.x], shVec[threadIdx.x]);
}
}
}
}
/*
* Matrix in ROW-MAJOR order!
*/
template <class Op>
__global__ void kColVectorOp(float* mat, float* vec, float* tgtMat,
int width, int height, int matStride, int tgtStride, Op op) {
__shared__ float shVec[ADD_VEC_THREADS_Y];
const int by = ADD_VEC_THREADS_Y * blockIdx.y;
const int bx = ADD_VEC_THREADS_X * blockIdx.x;
const int tidx = ADD_VEC_THREADS_X * threadIdx.y + threadIdx.x;
mat += threadIdx.y * matStride;
vec += tidx;
tgtMat += threadIdx.y * tgtStride;
for (int y = by; y < height; y += gridDim.y * ADD_VEC_THREADS_Y) {
__syncthreads();
if (y + tidx < height && tidx < ADD_VEC_THREADS_Y) {
shVec[tidx] = vec[y];
}
__syncthreads();
if (y + threadIdx.y < height) {
for (int x = bx + threadIdx.x; x < width; x += gridDim.x * ADD_VEC_THREADS_X) {
tgtMat[(y) * tgtStride + x] = op(mat[(y) * matStride + x], shVec[threadIdx.y]);
}
}
}
}
template <class Op>
void _applyBinaryV(MatGPU &mat, const MatGPU &vect, const MatGPU &mask, size_t dim, Op op) {
mexAssert(mat.data_ != vect.data_, "av1");
mexAssert(mat.stride_ == 1 && vect.stride_ == 1, "av2");
if (!mask.empty()) {
mexAssert(false, "Mask for _applyBinaryV is not implemented yet");
mexAssert(mask.stride_ == 1, "av3");
mexAssert(mat.size1_ == mask.size1_ && mat.size2_ == mask.size2_,
"In '_applyBinaryV' the size of mask matrix is incorrect");
}
int width = (int) mat.size1_;
int height = (int) mat.size2_;
dim3 threads(ADD_VEC_THREADS_X, ADD_VEC_THREADS_Y);
hipStream_t stream = MatGPU::_defaultStream;
if (dim == 1) {
mexAssert(vect.size1_ == 1 && vect.size2_ == mat.size2_, "In '_applyBinaryV' the sizes don't correspond");
if (mask.empty()) {
dim3 blocks(::min(512, DIVUP(width, ADD_VEC_THREADS_X)), ::min(NUM_BLOCKS_MAX, DIVUP(height, ADD_VEC_THREADS_Y)));
hipLaunchKernelGGL(( kColVectorOp<Op>), dim3(blocks), dim3(threads), 0, stream, mat.data_, vect.data_, mat.data_, width, height, width, width, op);
} else {
mexAssert(false, "not implemented yet");
}
}
else if (dim == 2) {
/* actually not used, but let it be here just in case */
mexAssert(vect.size1_ == mat.size1_ && vect.size2_ == 1, "In '_applyBinaryV' the sizes don't correspond");
if (mask.empty()) {
dim3 blocks(::min(NUM_BLOCKS_MAX, DIVUP(width, ADD_VEC_THREADS_X)), ::min(NUM_BLOCKS_MAX, DIVUP(height, ADD_VEC_THREADS_Y)));
hipLaunchKernelGGL(( kRowVectorOp<Op>), dim3(blocks), dim3(threads), 0, stream, mat.data_, vect.data_, mat.data_, width, height, width, width, op);
} else {
}
} else {
mexAssert(false, "_applyBinaryV the dimension parameter must be either 1 or 2");
}
mexAssert(hipGetLastError() == hipSuccess, "_applyBinaryV: kernel execution failed");
}
void cuda_addvect(MatGPU &mat, const MatGPU &vect, const MatGPU &mask, size_t dim) {
_applyBinaryV(mat, vect, mask, dim, BinaryOp::Add());
}
void cuda_multvect(MatGPU &mat, const MatGPU &vect, const MatGPU &mask, size_t dim) {
_applyBinaryV(mat, vect, mask, dim, BinaryOp::Multiply());
}
/*
* To be used when the rows are <= 64.
*
* TODO: try to reduce reg usage. i think this can be made faster too.
*/
//#define AGG_SHORT_ROWS_LOOPS_X 4
template <class Agg, class UnaryOp, class BinaryOp, int LOOPS_X, int THREADS_X>
__global__ void kAggShortRows(const float* mat, float* matSum, int width, int height, Agg agg, UnaryOp uop, BinaryOp bop) {
const int shmemX = THREADS_X + 1;
__shared__ float shmem[AGG_SHORT_ROWS_THREADS_Y*shmemX];
const int tidx = threadIdx.y * THREADS_X + threadIdx.x;
const int ty = LOOPS_X == 1 ? tidx / width : threadIdx.y; // when loops==1, width is gonna be smaller than block x dim
const int tx = LOOPS_X == 1 ? tidx % width : threadIdx.x;
const int bidx = blockIdx.y * gridDim.x + blockIdx.x;
const int blockRowIdx = bidx * AGG_SHORT_ROWS_LOOPS_Y * AGG_SHORT_ROWS_THREADS_Y;
float* shmemWrite = shmem + MUL24(ty, shmemX) + tx;
matSum += blockRowIdx + tidx;
// shmem[MUL24(threadIdx.y, shmemX) + threadIdx.x] = 0;
mat += width * blockRowIdx + MUL24(ty, width) + tx;
float* shmemWriteZeros = &shmem[MUL24(threadIdx.y,shmemX) + threadIdx.x];
bool doAgg = tidx < AGG_SHORT_ROWS_THREADS_Y ;
if (blockRowIdx < height) {
#pragma unroll
for (int y = 0; y < AGG_SHORT_ROWS_LOOPS_Y*AGG_SHORT_ROWS_THREADS_Y; y += AGG_SHORT_ROWS_THREADS_Y) {
doAgg &= tidx + y + blockRowIdx < height;
const bool heightIdxOK = ty < AGG_SHORT_ROWS_THREADS_Y && ty + y + blockRowIdx < height;
shmemWriteZeros[0] = agg.getBaseValue();
__syncthreads();
#pragma unroll
for(int x = 0; x < LOOPS_X * THREADS_X; x+= THREADS_X) {
// __syncthreads();
if (heightIdxOK && x + tx < width) {
shmemWrite[0] = agg(uop(mat[x]), shmemWrite[0]);
}
}
__syncthreads();
if (doAgg) {
/*
* I tried doing this final sum as a 4-step reduction, with 8 threads
* per warp participating. It was slightly slower.
*/
float accum = agg.getBaseValue();
float* shmemRead = shmem + MUL24(tidx, shmemX);
// this loops too much if the rows are really short :(
#pragma unroll
for (int i = 0; i < THREADS_X; i++) {
accum = agg(accum, shmemRead[0]);
shmemRead++;
}
matSum[0] = bop(matSum[0], accum);
matSum += AGG_SHORT_ROWS_THREADS_Y;
}
__syncthreads();
mat += width * AGG_SHORT_ROWS_THREADS_Y;
}
}
}
template <class Agg, class UnaryOp, class BinaryOp>
__global__ void kAggShortRows2(const float* mat, float* matSum, int width, int height, Agg agg, UnaryOp uop, BinaryOp bop) {
const int shmemX = AGG_SHORT_ROWS_THREADS_X + 1;
__shared__ float shmem[AGG_SHORT_ROWS_THREADS_Y*shmemX];
const int LOOPS_X = DIVUP(width, AGG_SHORT_ROWS_THREADS_X);
const int tidx = threadIdx.y * AGG_SHORT_ROWS_THREADS_X + threadIdx.x;
const int bidx = blockIdx.y * gridDim.x + blockIdx.x;
const int blockRowIdx = bidx * AGG_SHORT_ROWS_LOOPS_Y * AGG_SHORT_ROWS_THREADS_Y;
float* shmemWrite = shmem + MUL24(threadIdx.y, shmemX) + threadIdx.x;
matSum += blockRowIdx + tidx;
// shmem[MUL24(threadIdx.y, shmemX) + threadIdx.x] = 0;
mat += width * blockRowIdx + MUL24(threadIdx.y, width) + threadIdx.x;
bool doAgg = tidx < AGG_SHORT_ROWS_THREADS_Y;
if(blockRowIdx < height) {
for (int y = 0; y < AGG_SHORT_ROWS_LOOPS_Y*AGG_SHORT_ROWS_THREADS_Y; y += AGG_SHORT_ROWS_THREADS_Y) {
doAgg &= tidx + y + blockRowIdx < height;
const bool heightIdxOK = threadIdx.y + y + blockRowIdx < height;
float accum = agg.getBaseValue();
shmemWrite[0] = agg.getBaseValue();
for(int x = 0; x < LOOPS_X * AGG_SHORT_ROWS_THREADS_X; x+= AGG_SHORT_ROWS_THREADS_X) {
// __syncthreads();
if (heightIdxOK && x + threadIdx.x < width) {
shmemWrite[0] = agg(uop(mat[x]), shmemWrite[0]);
}
}
__syncthreads();
if (doAgg) {
float* shmemRead = shmem + MUL24(tidx, shmemX);
#pragma unroll
for (int i = 0; i < AGG_SHORT_ROWS_THREADS_X; i++) {
accum = agg(accum, shmemRead[0]);
shmemRead++;
}
matSum[0] = bop(matSum[0], accum);
matSum += AGG_SHORT_ROWS_THREADS_Y;
}
__syncthreads();
mat += width * AGG_SHORT_ROWS_THREADS_Y;
}
}
}
/*
* Implements multiscan idea from http://www.moderngpu.com
* Not really useful for pure reductions but neat nonetheless.
*/
template<class Agg, class UnaryOp, class BinaryOp>
__global__ void kAggRows_wholerow_nosync(const float* mat, float* matSum, int width, int height,
Agg agg, UnaryOp uop, BinaryOp bop) {
const int tidx = threadIdx.x;
const int warpIdx = tidx / WARP_SIZE;
const int lane = tidx % WARP_SIZE;
__shared__ float accum[(WARP_SIZE + 1) * AWR_NUM_WARPS];
__shared__ float finalAccum[AWR_NUM_WARPS];
float* myAccum = &accum[warpIdx * (WARP_SIZE + 1) + lane];
float* myFinalAccum = &finalAccum[tidx];
//volatile float* vMyAccum = &accum[warpIdx * (WARP_SIZE + 1) + lane];
matSum += blockIdx.y;
mat += width * blockIdx.y;
float rAccum = agg.getBaseValue(); // cache in register, a bit faster than shmem
#pragma unroll 32
for (int x = tidx; x < width; x += AWR_NUM_THREADS) {
rAccum = agg(rAccum, uop(mat[x]));
}
myAccum[0] = rAccum;
// Each warp does a reduction that doesn't require synchronizatoin
#pragma unroll
for (int i = 0; i < LOG_WARP_SIZE; i++) {
const int d = 1 << i;
myAccum[0] = agg(myAccum[0], shfl_down(myAccum[0], d));
}
__syncthreads();
// The warps write their results
if (tidx < AWR_NUM_WARPS) {
//volatile float* vMyFinalAccum = &finalAccum[tidx];
myFinalAccum[0] = accum[tidx * (WARP_SIZE + 1)];
#pragma unroll
for (int i = 0; i < AWR_LOG_NUM_WARPS; i++) {
const int d = 1 << i;
myFinalAccum[0] = agg(myFinalAccum[0], shfl_down(myFinalAccum[0], d));
}
if (tidx == 0) {
matSum[0] = bop(matSum[0], myFinalAccum[0]);
matSum += gridDim.y;
}
}
}
/*
* This one gets coalesced reads but computes only a partial sum which
* must either be summed again (recursively) or summed on the host.
*/
template<class Agg, class UnaryOp, class BinaryOp, int blockSize>
__global__ void kAggRows(const float* mat, float* matSum, int width, int height, int sumWidth, Agg agg, UnaryOp uop, BinaryOp bop) {
const int idxX = blockIdx.x * blockSize*2 + threadIdx.x;
__shared__ float accum[blockSize*2];
matSum += blockIdx.y * sumWidth + blockIdx.x;
/*
* Here it's important to make sure that all threads in a block call __syncthreads,
* so I have even the redundant threads (for which idxX >= width) enter this loop
* just so that they may call __syncthreads at the appropriate times.
*/
mat += width * blockIdx.y + idxX;
accum[threadIdx.x] = agg.getBaseValue();
accum[threadIdx.x + blockSize] = agg.getBaseValue();
for (int idxY = blockIdx.y; idxY < height; idxY += gridDim.y) {
if (idxX < width) {
accum[threadIdx.x] = uop(mat[0]);
if(idxX + blockSize < width)
accum[threadIdx.x + blockSize] = uop(mat[blockSize]);
}
if (blockSize >= 512) {
__syncthreads();
if (threadIdx.x < 512)
accum[threadIdx.x] = agg(accum[threadIdx.x], accum[threadIdx.x + 512]);
}
if (blockSize >= 256) {
__syncthreads();
if (threadIdx.x < 256)
accum[threadIdx.x] = agg(accum[threadIdx.x],accum[threadIdx.x + 256]);
}
if (blockSize >= 128) {
__syncthreads();
if (threadIdx.x < 128)
accum[threadIdx.x] = agg(accum[threadIdx.x],accum[threadIdx.x + 128]);
}
if (blockSize >= 64) {
__syncthreads();
if (threadIdx.x < 64)
accum[threadIdx.x] = agg(accum[threadIdx.x],accum[threadIdx.x + 64]);
}
__syncthreads();
volatile float* myAccum = &accum[threadIdx.x];
if (threadIdx.x < 32) { // executed only by first warp
myAccum[0] = agg(myAccum[0], myAccum[32]);
myAccum[0] = agg(myAccum[0], myAccum[16]);
myAccum[0] = agg(myAccum[0], myAccum[8]);
myAccum[0] = agg(myAccum[0], myAccum[4]);
myAccum[0] = agg(myAccum[0], myAccum[2]);
myAccum[0] = agg(myAccum[0], myAccum[1]);
}
if (threadIdx.x == 0) {
matSum[0] = bop(matSum[0], myAccum[0]);
matSum += gridDim.y * sumWidth;
}
__syncthreads();
mat += width * gridDim.y;
}
}
/*
* Bad when there are few columns.
*/
template <class Agg, class UnaryOp, class BinaryOp>
__global__ void kDumbAggCols(hipTextureObject_t mat, float* const vec, int width, int height, Agg agg, UnaryOp uop, BinaryOp bop) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < width) {
float mx = agg.getBaseValue();
for (int j = 0; j < height; j++) {
mx = agg(uop(tex1Dfetch<float>(mat, width * j + idx)), mx);
}
vec[idx] = bop(vec[idx], mx);
}
}
/*
* Better with few columns because it only computes a partial sum.
*/
template <class Agg, class UnaryOp>
__global__ void kAggCols(hipTextureObject_t mat, float* const vec, int width, int height, int sumLength, Agg agg, UnaryOp op) {
const int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const int idxY = blockIdx.y * sumLength;
if (idxX < width) {
float mx = agg.getBaseValue();
for (int j = idxY; j < min(height,idxY + sumLength); j++) {
mx = agg(op(tex1Dfetch<float>(mat, j * width + idxX)), mx);
}
vec[blockIdx.y * width + idxX] = mx;
}
}
/*
* TODO: this is a mess, fix it. it works pretty fast but it's too ugly.
* TODO: this function is _really_ bad for very long aggregations of few columns.
*/
template<class Agg, class UOp, class BOp>
void _aggregate(MatGPU &mat, MatGPU& target, Agg agg, UOp uop, BOp bop, int axis) {
mexAssert(axis == 0 || axis == 1, "ag1");
mexAssert(mat.stride_ == 1 && target.stride_ == 1, "ag2");
mexAssert(mat.data_ != target.data_, "ag3");
mexAssert(!mat.empty(), "ag4");
int width = (int) mat.size1_;
int height = (int) mat.size2_;
hipStream_t stream = MatGPU::_defaultStream;
if (axis == 0 ) { //sum along size2_
mexAssert(target.size1_ == mat.size1_ && target.size2_ == 1, "ag5");
if ((height <= 2048 || width >= 4096)) {
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
mexAssert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width, "ag6");
mexAssert(numBlocks < NUM_BLOCKS_MAX, "ag7");
hipLaunchKernelGGL(( kDumbAggCols<Agg, UOp, BOp>), dim3(numBlocks),dim3(NUM_SUM_COLS_THREADS_PER_BLOCK), 0, stream, mat.getTextureObject(), target.data_, width, height, agg, uop, bop);
mexAssert(hipGetLastError() == hipSuccess, "kDumbAggCols: kernel execution failed");
} else { // Specialize the case when we have very long columns and few of them
const int sumLength = 128;
MatGPU tmp(width, DIVUP(height, sumLength));
int numBlocksX = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
int numBlocksY = DIVUP(height, sumLength);
dim3 blocks(numBlocksX, numBlocksY);
dim3 threads(NUM_SUM_COLS_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kAggCols<Agg, UOp>), dim3(blocks),dim3(threads), 0, stream, mat.getTextureObject(), tmp.data_, width, height, sumLength, agg, uop);
mexAssert(hipGetLastError() == hipSuccess, "kAggCols: kernel execution failed");
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kDumbAggCols<Agg, UOp, BOp>), dim3(numBlocks),dim3(NUM_SUM_COLS_THREADS_PER_BLOCK), 0, stream, tmp.getTextureObject(), target.data_, width, height, agg, uop, bop);
mexAssert(hipGetLastError() == hipSuccess, "kDumbAggCols: kernel execution failed");
}
} else { // sum along size1_
mexAssert(target.size1_ == 1 && target.size2_ == mat.size2_, "ag8");
if (width > 1) {
if (height >= 16384) { // linear aggregation
int numBlocksX = 1;
int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y);
int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X;
int numThreadsY = AGG_SHORT_ROWS_THREADS_Y;
while (numBlocksY > NUM_BLOCKS_MAX) {
numBlocksY = DIVUP(numBlocksY, 2);
numBlocksX *= 2;
}
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
if(width <= 16) {
if(width <= 4) {
hipLaunchKernelGGL(( kAggShortRows<Agg, UOp, BOp, 1, 4>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
} else if(width <= 8) {
hipLaunchKernelGGL(( kAggShortRows<Agg, UOp, BOp, 1, 8>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
} else if(width <= 12) {
hipLaunchKernelGGL(( kAggShortRows<Agg, UOp, BOp, 1, 12>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
} else {
hipLaunchKernelGGL(( kAggShortRows<Agg, UOp, BOp, 1, 16>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
}
} else if(width <= 32) {
hipLaunchKernelGGL(( kAggShortRows<Agg, UOp, BOp, 2, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
} else if(width <= 48){
hipLaunchKernelGGL(( kAggShortRows<Agg, UOp, BOp, 3, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
} else if(width <= 64){
hipLaunchKernelGGL(( kAggShortRows<Agg, UOp, BOp, 4, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
} else {
hipLaunchKernelGGL(( kAggShortRows2<Agg, UOp, BOp>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
}
} else {
if (width >= 512) {
// NOTE: this is the only case which I bothered to try to optimize for Kepler
dim3 threads(AWR_NUM_THREADS);
dim3 blocks(1, height);
hipLaunchKernelGGL(( kAggRows_wholerow_nosync), dim3(blocks), dim3(threads), 0, stream, mat.data_, target.data_, width, height, agg, uop, bop);
} else {
int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512)));
int numThreadsY = 1;
int numBlocksX = DIVUP(width, 2*numThreadsX);
int numBlocksY = ::min(height, NUM_BLOCKS_MAX);
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
mexAssert(numBlocksX <= NUM_BLOCKS_MAX, "ag9");
mexAssert(numBlocksY <= NUM_BLOCKS_MAX, "ag10");
if(width <= 64) {
hipLaunchKernelGGL(( kAggRows<Agg, UOp, BOp, 32>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
} else if(width <= 128) {
hipLaunchKernelGGL(( kAggRows<Agg, UOp, BOp, 64>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
} else if(width <= 256) {
hipLaunchKernelGGL(( kAggRows<Agg, UOp, BOp, 128>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
} else if(width <= 512) {
hipLaunchKernelGGL(( kAggRows<Agg, UOp, BOp, 256>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
} else {
hipLaunchKernelGGL(( kAggRows<Agg, UOp, BOp, 512>), dim3(grid), dim3(threads), 0, stream, mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
}
mexAssert(hipGetLastError() == hipSuccess, "agg rows: kernel execution failed");
}
}
} else {
mexAssert(false, "fake aggregation, use assignment instead");
//target.applyBinary(NVMatrixBinaryOps::CompositeSecond<UOp, BOp>(uop, bop), *this, target, stream);
}
}
}
void cuda_sumvect(MatGPU &mat, MatGPU &vect, size_t dim) {
int axis = 2 - (int) dim;
_aggregate(mat, vect, Aggs::Sum(), UnaryOp::Identity(), BinaryOp::Second(), axis);
}
void cuda_maxvect(MatGPU &mat, MatGPU &vect, size_t dim) {
int axis = 2 - (int) dim;
_aggregate(mat, vect, Aggs::Max(), UnaryOp::Identity(), BinaryOp::Second(), axis);
}
/* ------- Sergey Demyanov ------- */
/* ------- Image jittering ------- */
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread>
__global__ void kTransform(float* imgs, float* targets, int imgSizeX, int imgSizeY, int outputsX, int outputsY,
int numFilters, int numImages, float *shift_mat, float *scale_mat, float *mirror_mat, float *angle_mat, float defval) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = DIVUP(numFilters, B_Y*filtersPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int myFilterIdx = (blockFilterIdx + threadIdx.y*filtersPerThread);
if (myFilterIdx >= numFilters) {
return;
}
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsY;
const int imgPixels = imgSizeX * imgSizeY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += myFilterIdx * imgPixels * numImages + imgIdx;
targets += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
const float m1 = (float) imgSizeX / 2 - 0.5;
const float m2 = (float) imgSizeY / 2 - 0.5;
const float n1 = (float) outputsX / 2 - 0.5;
const float n2 = (float) outputsY / 2 - 0.5;
// #pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
const int curImgIdx = imgIdx + i * B_X;
if (curImgIdx < numImages) {
const float angcos = (float) cos(angle_mat[curImgIdx]);
const float angsin = (float) sin(angle_mat[curImgIdx]);
const float xi1 = (outputIdxX - n1) * scale_mat[curImgIdx]; // scale[0];
const float xi2 = (outputIdxY - n2) * scale_mat[curImgIdx + numImages]; //scale[1];
float x1 = xi1 * angcos - xi2 * angsin + m1 + shift_mat[curImgIdx]; //shift[0];
float x2 = xi1 * angsin + xi2 * angcos + m2 + shift_mat[curImgIdx + numImages]; //shift[1];
if (mirror_mat[curImgIdx] > 0.5) x1 = imgSizeX - 1 - x1;
if (mirror_mat[curImgIdx + numImages] > 0.5) x2 = imgSizeY - 1 - x2;
const int xf1 = (int) x1;
const int xf2 = (int) x2;
if (0 <= xf1 && xf1 + 1 < imgSizeX &&
0 <= xf2 && xf2 + 1 < imgSizeY) {
const int imgPx11 = xf2 * imgSizeX + xf1;
const int imgPx21 = xf2 * imgSizeX + xf1 + 1;
const int imgPx12 = (xf2 + 1) * imgSizeX + xf1;
const int imgPx22 = (xf2 + 1) * imgSizeX + xf1 + 1;
for (int f = 0; f < filtersPerThread; f++) {
const int imgInd11 = (f * imgPixels + imgPx11) * numImages + i * B_X;
const int imgInd21 = (f * imgPixels + imgPx21) * numImages + i * B_X;
const int imgInd12 = (f * imgPixels + imgPx12) * numImages + i * B_X;
const int imgInd22 = (f * imgPixels + imgPx22) * numImages + i * B_X;
const float vl = (x1 - xf1) * imgs[imgInd21] + (xf1 + 1 - x1) * imgs[imgInd11];
const float vh = (x1 - xf1) * imgs[imgInd22] + (xf1 + 1 - x1) * imgs[imgInd12];
prod[f][i] = (x2 - xf2) * vh + (xf2 + 1 - x2) * vl;
}
} else {
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = defval;
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * numOutputs * numImages + i * B_X] = prod[f][i];
}
}
}
}
void _transformActs(const MatGPU &images, MatGPU &target,
size_t imgSize1, size_t imgSize2,
size_t targSize1, size_t targSize2,
const MatGPU &shift_mat, const MatGPU &scale_mat,
const MatGPU &mirror_mat, const MatGPU &angle_mat, float defval) {
int imgSizeX = (int) imgSize1;
int imgSizeY = (int) imgSize2;
int imgPixels = imgSizeX * imgSizeY;
int outputsX = (int) targSize1;
int outputsY = (int) targSize2;
int targPixels = outputsX * outputsY;
int numImages = images.size1_;
mexAssert(images.size2_ % imgPixels == 0, "ta2");
int numFilters = images.size2_ / imgPixels;
mexAssert(target.size1_ == numImages, "ta1");
mexAssert(target.size2_ == targPixels * numFilters, "ta3");
hipStream_t stream = MatGPU::_defaultStream;
int filtersPerThread = 1;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numFilters, 4 * filtersPerThread) * outputsY);
if (imgsPerThread == 4) {
hipLaunchKernelGGL(( kTransform<4, 32, 4, 1>), dim3(blocks), dim3(threads), 0, stream,
images.data_, target.data_, imgSizeX, imgSizeY, outputsX, outputsY, numFilters, numImages,
shift_mat.data_, scale_mat.data_, mirror_mat.data_, angle_mat.data_, defval);
} else if (imgsPerThread == 2) {
hipLaunchKernelGGL(( kTransform<4, 32, 2, 1>), dim3(blocks), dim3(threads), 0, stream,
images.data_, target.data_, imgSizeX, imgSizeY, outputsX, outputsY, numFilters, numImages,
shift_mat.data_, scale_mat.data_, mirror_mat.data_, angle_mat.data_, defval);
} else {
hipLaunchKernelGGL(( kTransform<4, 32, 1, 1>), dim3(blocks), dim3(threads), 0, stream,
images.data_, target.data_, imgSizeX, imgSizeY, outputsX, outputsY, numFilters, numImages,
shift_mat.data_, scale_mat.data_, mirror_mat.data_, angle_mat.data_, defval);
}
mexAssert(hipGetLastError() == hipSuccess, "_transformActs: kernel execution failed");
}
/* ------- Total matrix aggregation ------- */
/*
* Sergey Demyanov
* This kernel is faster than the one in the code of Alex Krizhevsky, so I'll leave it here
*/
__global__
void _totalSum(const float *a, float* const b, size_t n) {
__shared__ float sdata[DP_BLOCKSIZE];
size_t tid = threadIdx.x;
sdata[tid] = 0;
size_t gridSize = blockDim.x * gridDim.x;
size_t i = blockDim.x * blockIdx.x + tid;
if (i >= n) return;
while (i < n) {
sdata[tid] += a[i];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
size_t ns = blockDim.x; // number of elements in the shared array
if (ns > n - blockDim.x * blockIdx.x ) {
ns = n - blockDim.x * blockIdx.x;
}
size_t s = blockDim.x; // sum stride
while (s >= ns) s >>= 1;
while (s > 32) {
if (tid < s && tid + s < ns) sdata[tid] += sdata[tid + s];
__syncthreads();
s >>= 1;
}
// for s <= WARP_SIZE no synchronization is needed
if (tid < 32) {
if (tid + 32 < ns) sdata[tid] += sdata[tid + 32];
if (tid + 16 < ns) sdata[tid] += sdata[tid + 16];
if (tid + 8 < ns) sdata[tid] += sdata[tid + 8];
if (tid + 4 < ns) sdata[tid] += sdata[tid + 4];
if (tid + 2 < ns) sdata[tid] += sdata[tid + 2];
if (tid + 1 < ns) sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if (tid == 0) b[blockIdx.x] = sdata[0];
}
float cuda_sum(const MatGPU &mat) {
mexAssert(!mat.empty(), "In cuda_sum mat is empty");
mexAssert(mat.stride_ == 1, "In cuda_sum stride_ should be 1");
hipStream_t stream = MatGPU::_defaultStream;
size_t numElements = mat.size1_ * mat.size2_;
size_t blocks_number = MIN(DIVUP(numElements, ELTWISE_FLAT_THREADS_X), ELTWISE_FLAT_THREADS_X);
//MatGPU::_sum_buf1.resize(ELTWISE_FLAT_THREADS_X, 1);
//MatGPU::_sum_buf2.resize(1, 1);
MatGPU partsums, totalsum;
MatGPU::swapWithBuffer(partsums, ELTWISE_FLAT_THREADS_X);
partsums.resize(ELTWISE_FLAT_THREADS_X, 1);
MatGPU::swapWithBuffer(totalsum, 1);
totalsum.resize(1, 1);
hipLaunchKernelGGL(( _totalSum), dim3(blocks_number), dim3(ELTWISE_FLAT_THREADS_X), 0, stream,
mat.data_, partsums.data_, numElements);
hipLaunchKernelGGL(( _totalSum), dim3(1), dim3(ELTWISE_FLAT_THREADS_X), 0, stream,
partsums.data_, totalsum.data_, blocks_number);
MatCPU cpusum(1, 1);
DeviceToHost(totalsum, cpusum);
MatGPU::swapWithBuffer(partsums, ELTWISE_FLAT_THREADS_X);
MatGPU::swapWithBuffer(totalsum, 1);
return cpusum(0, 0);
}
| d37d7181604eb333bd2e9f762bc87d00aa2405f9.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_util.h"
#include <cuda.h>
#define MUL24(x, y) ((x) * (y))
#define WARP_SIZE 32
#define ELTWISE_FLAT_THREADS_X 128
#define ELTWISE_THREADS_X 32
#define ELTWISE_THREADS_Y 8
#define ADD_VEC_THREADS_X 64
#define ADD_VEC_THREADS_Y 4
#define NUM_BLOCKS_MAX 65535
#define NUM_SUM_COLS_THREADS_PER_BLOCK 128
#define AGG_SHORT_ROWS_THREADS_X 32
#define AGG_SHORT_ROWS_THREADS_Y 8
#define AGG_SHORT_ROWS_LOOPS_Y 32
#define AWR_NUM_THREADS 256
#define AWR_LOG_NUM_WARPS 3
#define LOG_WARP_SIZE 5
#define AWR_NUM_WARPS AWR_NUM_THREADS / WARP_SIZE
#define LOGREG_GRAD_THREADS_X 32
#define LOGREG_GRAD_THREADS_Y 4
#define DP_BLOCKSIZE 512
// device
template<typename T>
__device__ T shfl_down(T a, int b, int c = WARP_SIZE) {
#if __CUDA_ARCH__ >= 300
return __shfl_down(a, b, c);
#else
return 0;
#endif
}
__device__
__constant__ float kDevEps = (float) PRECISION_EPS;
// global
class UnaryOp {
public:
class Identity {
public:
__device__ inline float operator()(const float a) const {
return a;
}
};
class Validate {
public:
__device__ inline float operator()(const float a) const {
if (-kDevEps < a && a < kDevEps) return 0;
return a;
}
};
class Sign {
public:
__device__ inline float operator()(const float a) const {
return (a > kDevEps) - (a < -kDevEps);
}
};
class Sqrt {
public:
__device__ inline float operator()(const float a) const {
return sqrtf(a);
}
};
class Log {
public:
__device__ inline float operator()(const float a) const {
return __logf(a);
}
};
class Exp {
public:
__device__ inline float operator()(const float a) const {
return __expf(a);
}
};
class Sigmoid {
public:
__device__ inline float operator()(const float a) const {
return __fdividef(1.0f, 1.0f + __expf(-a));
}
};
class Scalar {
private:
const float scalar;
public:
Scalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return scalar;
}
};
class AddScalar {
private:
const float scalar;
public:
AddScalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return a + scalar;
}
};
class MultByScalar {
private:
const float scalar;
public:
MultByScalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return a * scalar;
}
};
class DivByScalar {
private:
const float scalar;
public:
DivByScalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return __fdividef(a, scalar);
}
};
class BiggerThanScalar {
private:
const float scalar;
public:
BiggerThanScalar(const float _scalar) : scalar(_scalar) {
}
__device__ inline float operator()(const float a) const {
return a > scalar;
}
};
};
class BinaryOp {
public:
class Second {
public:
__device__ inline float operator()(const float a, const float b) const {
return b;
}
};
class Add {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
};
class Subtract {
public:
__device__ inline float operator()(const float a, const float b) const {
return a - b;
}
};
class Multiply {
public:
__device__ inline float operator()(const float a, const float b) const {
return a * b;
}
};
class Divide {
public:
__device__ inline float operator()(const float a, const float b) const {
return __fdividef(a, b);
}
};
class SigmDer {
public:
__device__ inline float operator()(const float a, const float b) const {
return a * b * (1 - b);
}
};
};
class Aggs {
public:
class Sum {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
__device__ inline float getBaseValue() {
return 0;
}
};
class Max {
public:
__device__ inline float operator()(const float a, const float b) const {
return a > b ? a : b;
}
__device__ inline float getBaseValue() {
return -2e38;
}
};
};
/* ------- Unary operations ------- */
template<class Op>
__global__ void kEltwiseUnaryOpFlat(const float* a, float* const dest, int numElements, Op op) {
const int idxX = blockIdx.x * ELTWISE_FLAT_THREADS_X + threadIdx.x;
for (int x = idxX; x < numElements; x += gridDim.x * ELTWISE_FLAT_THREADS_X) {
dest[x] = op(a[x]);
}
}
template <class Op>
void _applyUnaryOp(MatGPU &mat, Op op) {
if (mat.empty()) return;
mexAssert(mat.stride_ == 1, "In _applyUnaryOp stride_ should be 1");
int _numElements = (int) (mat.size1_ * mat.size2_);
cudaStream_t stream = MatGPU::_defaultStream;
dim3 threads = dim3(ELTWISE_FLAT_THREADS_X);
dim3 blocks = dim3(std::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X)));
kEltwiseUnaryOpFlat<Op><<<blocks, threads, 0, stream>>>(mat.data_, mat.data_, _numElements, op);
mexAssert(cudaGetLastError() == cudaSuccess, "kEltwiseUnaryOpFlat: kernel execution failed");
}
void cuda_validate(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Validate());
}
void cuda_sign(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Sign());
}
void cuda_sqrt(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Sqrt());
}
void cuda_log(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Log());
}
void cuda_exp(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Exp());
}
void cuda_sigmoid(MatGPU &mat) {
_applyUnaryOp(mat, UnaryOp::Sigmoid());
}
/* ------- Unary operations with scalars ------- */
void cuda_assval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::Scalar(val));
}
void cuda_addval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::AddScalar(val));
}
void cuda_subval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::AddScalar(-val));
}
void cuda_multval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::MultByScalar(val));
}
void cuda_divval(MatGPU &mat, float val) {
_applyUnaryOp(mat, UnaryOp::DivByScalar(val));
}
/* ------- Binary operations ------- */
template<class Op>
__global__ void kEltwiseBinaryOpFlat(const float* a, const float* b, float* const dest, int numElements, Op op) {
const int idxX = blockIdx.x * ELTWISE_FLAT_THREADS_X + threadIdx.x;
for (int x = idxX; x < numElements; x += gridDim.x * ELTWISE_FLAT_THREADS_X) {
dest[x] = op(a[x], b[x]);
}
}
template <class Op>
void _applyBinaryOp(MatGPU& mat, const MatGPU& b, Op op) {
if (mat.empty()) return;
mexAssert(mat.stride_ == 1 && b.stride_ == 1, "In _applyBinaryOp strides should be 1");
mexAssert(mat.order_ == b.order_, "In _applyBinaryOp orders should be the same");
mexAssert(mat.size1_ == b.size1_ && mat.size2_ == b.size2_,
"In _applyBinaryOp the sizes of matrices do not correspond");
int _numElements = (int) (mat.size1_ * mat.size2_);
cudaStream_t stream = MatGPU::_defaultStream;
dim3 threads = dim3(ELTWISE_FLAT_THREADS_X);
dim3 blocks = dim3(std::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X)));
kEltwiseBinaryOpFlat<Op><<<blocks, threads, 0, stream>>>(mat.data_, b.data_, mat.data_, _numElements, op);
mexAssert(cudaGetLastError() == cudaSuccess, "kEltwiseBinaryOpFlat: kernel execution failed");
}
void cuda_addmat(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::Add());
}
void cuda_submat(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::Subtract());
}
void cuda_multmat(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::Multiply());
}
void cuda_divmat(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::Divide());
}
void cuda_sigmder(MatGPU &mat, const MatGPU &b) {
_applyBinaryOp(mat, b, BinaryOp::SigmDer());
}
/* ------- Conditional operations ------- */
template<class CondOp, class Op>
__global__ void kEltwiseCondOpFlat(const float* a, const float* condmat, bool incase,
float* const dest, int numElements, CondOp condOp, Op op) {
const int idxX = blockIdx.x * ELTWISE_FLAT_THREADS_X + threadIdx.x;
if (incase) {
for (int x = idxX; x < numElements; x += gridDim.x * ELTWISE_FLAT_THREADS_X) {
if (condOp(condmat[x])) {
dest[x] = op(a[x]);
}
}
} else {
for (int x = idxX; x < numElements; x += gridDim.x * ELTWISE_FLAT_THREADS_X) {
if (!condOp(condmat[x])) {
dest[x] = op(a[x]);
}
}
}
}
template <class CondOp, class Op>
void _applyCondOp(MatGPU& mat, const MatGPU& condmat, bool incase, CondOp condOp, Op op) {
if (mat.empty()) return;
mexAssert(mat.stride_ == 1 && condmat.stride_ == 1, "In _applyCondOp strides should be 1");
mexAssert(mat.order_ == condmat.order_, "In _applyCondOp orders should be the same");
mexAssert(mat.size1_ == condmat.size1_ && mat.size2_ == condmat.size2_,
"In _applyCondOp the sizes of matrices do not correspond");
int _numElements = (int) (mat.size1_ * mat.size2_);
cudaStream_t stream = MatGPU::_defaultStream;
dim3 threads = dim3(ELTWISE_FLAT_THREADS_X);
dim3 blocks = dim3(std::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X)));
kEltwiseCondOpFlat<CondOp, Op><<<blocks, threads, 0, stream>>>
(mat.data_, condmat.data_, incase, mat.data_, _numElements, condOp, op);
mexAssert(cudaGetLastError() == cudaSuccess, "kEltwiseCondOpFlat: kernel execution failed");
}
void cuda_condassign(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val) {
_applyCondOp(mat, condmat, incase, UnaryOp::BiggerThanScalar(threshold), UnaryOp::Scalar(val));
}
void cuda_condadd(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val) {
_applyCondOp(mat, condmat, incase, UnaryOp::BiggerThanScalar(threshold), UnaryOp::AddScalar(val));
}
void cuda_condmult(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val) {
_applyCondOp(mat, condmat, incase, UnaryOp::BiggerThanScalar(threshold), UnaryOp::MultByScalar(val));
}
/* ------- Softmax derivatives ------- */
__global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, int numCases, int numOut) {
const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const int tidx = ty * numCases + tx;
if (ty < numOut && tx < numCases) {
float v = 0;
for (int j = 0; j < numOut; j++) {
v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]);
}
v *= y_l[tidx];
dE_dx_l[tidx] = v;
}
}
void computeSoftmaxGrad(const MatGPU& acts, const MatGPU& actsGrad, MatGPU& target) {
int numCases = (int) acts.size1_;
int numOut = (int) acts.size2_;
mexAssert(acts.stride_ == 1 && actsGrad.stride_ == 1 && target.stride_ == 1, "csg2");
mexAssert(acts.size1_ == actsGrad.size1_ && acts.size2_ == actsGrad.size2_, "csg1");
mexAssert(acts.size1_ == target.size1_ && acts.size2_ == target.size2_, "csg3");
cudaStream_t stream = MatGPU::_defaultStream;
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y));
kSoftmaxGrad<<<blocks, threads, 0, stream>>>(actsGrad.data_, acts.data_, target.data_, numCases, numOut);
mexAssert(cudaGetLastError() == cudaSuccess, "computeSoftmaxGrad: kernel execution failed");
}
/* ------- Transposition ------- */
/*
* dest here is assumed to be "not transposed" -- height and width correspond to it.
*/
template<class Op, bool checkBounds>
__global__ void kEltwiseUnaryOpTrans(const float* a, float* const dest,
int height, int width, int strideA, int strideDest, Op op) {
__shared__ float shmem[ELTWISE_THREADS_X][ELTWISE_THREADS_X + 1];
for (int by = ELTWISE_THREADS_X * blockIdx.y; by < height; by += ELTWISE_THREADS_X * gridDim.y) {
for (int bx = ELTWISE_THREADS_X * blockIdx.x; bx < width; bx += ELTWISE_THREADS_X * gridDim.x) {
const int readX = by + threadIdx.x;
const int readY = bx + threadIdx.y;
for (int y = 0; y < ELTWISE_THREADS_X; y+= ELTWISE_THREADS_Y) {
if (!checkBounds || (readX < height && readY + y < width)) {
shmem[threadIdx.x][threadIdx.y + y] = op(a[(readY + y) * strideA + readX]);
}
}
__syncthreads();
const int writeX = bx + threadIdx.x;
const int writeY = by + threadIdx.y;
for (int y = 0; y < ELTWISE_THREADS_X; y+= ELTWISE_THREADS_Y) {
if(!checkBounds || (writeX < width && writeY + y < height)) {
dest[(writeY + y) * strideDest + writeX] = shmem[threadIdx.y + y][threadIdx.x];
}
}
__syncthreads();
}
}
}
void cuda_trans(const MatGPU &mat, MatGPU &target) {
mexAssert(mat.order_ == target.order_, "In cuda_trans orders should be the same");
mexAssert(mat.size1_ == target.size2_ && mat.size2_ == target.size1_,
"In cuda_trans sizes does not correspond to each other");
int width = (int) target.size1_;
int height = (int) target.size2_;
int mat_stride = (int) mat.size1_;
int target_stride = (int) target.size1_;
if (mat.order_ == true) {
mat_stride = (int) mat.size2_;
target_stride = (int) target.size2_;
}
cudaStream_t stream = MatGPU::_defaultStream;
dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(width, ELTWISE_THREADS_X)),
std::min(NUM_BLOCKS_MAX, DIVUP(height, ELTWISE_THREADS_Y)));
dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y);
bool checkBounds = !(width % ELTWISE_THREADS_X == 0 && height % ELTWISE_THREADS_X == 0);
if (checkBounds) {
kEltwiseUnaryOpTrans<UnaryOp::Identity, true><<<blocks, threads, 0, stream>>>
(mat.data_, target.data_, height, width, mat_stride, target_stride, UnaryOp::Identity());
} else {
kEltwiseUnaryOpTrans<UnaryOp::Identity, false><<<blocks, threads, 0, stream>>>
(mat.data_, target.data_, height, width, mat_stride, target_stride, UnaryOp::Identity());
}
mexAssert(cudaGetLastError() == cudaSuccess, "kEltwiseUnaryOpTrans: kernel execution failed");
}
/* ------- Matrix <-> Vector operations ------- */
/*
* Matrix in ROW-MAJOR order!
*/
template <class Op>
__global__ void kRowVectorOp(const float* mat, const float* vec, float* const tgtMat,
int width, int height, int matStride, int tgtStride, Op op) {
__shared__ float shVec[ADD_VEC_THREADS_X];
const int bx = ADD_VEC_THREADS_X * blockIdx.x;
const int by = ADD_VEC_THREADS_Y * blockIdx.y;
for (int x = bx; x < width; x += gridDim.x * ADD_VEC_THREADS_X) {
__syncthreads();
if (x + threadIdx.x < width && threadIdx.y == 0) {
shVec[threadIdx.x] = vec[x + threadIdx.x];
}
__syncthreads();
if (x + threadIdx.x < width) {
for (int y = by + threadIdx.y; y < height; y += gridDim.y * ADD_VEC_THREADS_Y) {
tgtMat[y * tgtStride + x + threadIdx.x] = op(mat[y * matStride + x + threadIdx.x], shVec[threadIdx.x]);
}
}
}
}
/*
* Matrix in ROW-MAJOR order!
*/
template <class Op>
__global__ void kColVectorOp(float* mat, float* vec, float* tgtMat,
int width, int height, int matStride, int tgtStride, Op op) {
__shared__ float shVec[ADD_VEC_THREADS_Y];
const int by = ADD_VEC_THREADS_Y * blockIdx.y;
const int bx = ADD_VEC_THREADS_X * blockIdx.x;
const int tidx = ADD_VEC_THREADS_X * threadIdx.y + threadIdx.x;
mat += threadIdx.y * matStride;
vec += tidx;
tgtMat += threadIdx.y * tgtStride;
for (int y = by; y < height; y += gridDim.y * ADD_VEC_THREADS_Y) {
__syncthreads();
if (y + tidx < height && tidx < ADD_VEC_THREADS_Y) {
shVec[tidx] = vec[y];
}
__syncthreads();
if (y + threadIdx.y < height) {
for (int x = bx + threadIdx.x; x < width; x += gridDim.x * ADD_VEC_THREADS_X) {
tgtMat[(y) * tgtStride + x] = op(mat[(y) * matStride + x], shVec[threadIdx.y]);
}
}
}
}
template <class Op>
void _applyBinaryV(MatGPU &mat, const MatGPU &vect, const MatGPU &mask, size_t dim, Op op) {
mexAssert(mat.data_ != vect.data_, "av1");
mexAssert(mat.stride_ == 1 && vect.stride_ == 1, "av2");
if (!mask.empty()) {
mexAssert(false, "Mask for _applyBinaryV is not implemented yet");
mexAssert(mask.stride_ == 1, "av3");
mexAssert(mat.size1_ == mask.size1_ && mat.size2_ == mask.size2_,
"In '_applyBinaryV' the size of mask matrix is incorrect");
}
int width = (int) mat.size1_;
int height = (int) mat.size2_;
dim3 threads(ADD_VEC_THREADS_X, ADD_VEC_THREADS_Y);
cudaStream_t stream = MatGPU::_defaultStream;
if (dim == 1) {
mexAssert(vect.size1_ == 1 && vect.size2_ == mat.size2_, "In '_applyBinaryV' the sizes don't correspond");
if (mask.empty()) {
dim3 blocks(std::min(512, DIVUP(width, ADD_VEC_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ADD_VEC_THREADS_Y)));
kColVectorOp<Op><<<blocks, threads, 0, stream>>>(mat.data_, vect.data_, mat.data_, width, height, width, width, op);
} else {
mexAssert(false, "not implemented yet");
}
}
else if (dim == 2) {
/* actually not used, but let it be here just in case */
mexAssert(vect.size1_ == mat.size1_ && vect.size2_ == 1, "In '_applyBinaryV' the sizes don't correspond");
if (mask.empty()) {
dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(width, ADD_VEC_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ADD_VEC_THREADS_Y)));
kRowVectorOp<Op><<<blocks, threads, 0, stream>>>(mat.data_, vect.data_, mat.data_, width, height, width, width, op);
} else {
}
} else {
mexAssert(false, "_applyBinaryV the dimension parameter must be either 1 or 2");
}
mexAssert(cudaGetLastError() == cudaSuccess, "_applyBinaryV: kernel execution failed");
}
void cuda_addvect(MatGPU &mat, const MatGPU &vect, const MatGPU &mask, size_t dim) {
_applyBinaryV(mat, vect, mask, dim, BinaryOp::Add());
}
void cuda_multvect(MatGPU &mat, const MatGPU &vect, const MatGPU &mask, size_t dim) {
_applyBinaryV(mat, vect, mask, dim, BinaryOp::Multiply());
}
/*
* To be used when the rows are <= 64.
*
* TODO: try to reduce reg usage. i think this can be made faster too.
*/
//#define AGG_SHORT_ROWS_LOOPS_X 4
template <class Agg, class UnaryOp, class BinaryOp, int LOOPS_X, int THREADS_X>
__global__ void kAggShortRows(const float* mat, float* matSum, int width, int height, Agg agg, UnaryOp uop, BinaryOp bop) {
const int shmemX = THREADS_X + 1;
__shared__ float shmem[AGG_SHORT_ROWS_THREADS_Y*shmemX];
const int tidx = threadIdx.y * THREADS_X + threadIdx.x;
const int ty = LOOPS_X == 1 ? tidx / width : threadIdx.y; // when loops==1, width is gonna be smaller than block x dim
const int tx = LOOPS_X == 1 ? tidx % width : threadIdx.x;
const int bidx = blockIdx.y * gridDim.x + blockIdx.x;
const int blockRowIdx = bidx * AGG_SHORT_ROWS_LOOPS_Y * AGG_SHORT_ROWS_THREADS_Y;
float* shmemWrite = shmem + MUL24(ty, shmemX) + tx;
matSum += blockRowIdx + tidx;
// shmem[MUL24(threadIdx.y, shmemX) + threadIdx.x] = 0;
mat += width * blockRowIdx + MUL24(ty, width) + tx;
float* shmemWriteZeros = &shmem[MUL24(threadIdx.y,shmemX) + threadIdx.x];
bool doAgg = tidx < AGG_SHORT_ROWS_THREADS_Y ;
if (blockRowIdx < height) {
#pragma unroll
for (int y = 0; y < AGG_SHORT_ROWS_LOOPS_Y*AGG_SHORT_ROWS_THREADS_Y; y += AGG_SHORT_ROWS_THREADS_Y) {
doAgg &= tidx + y + blockRowIdx < height;
const bool heightIdxOK = ty < AGG_SHORT_ROWS_THREADS_Y && ty + y + blockRowIdx < height;
shmemWriteZeros[0] = agg.getBaseValue();
__syncthreads();
#pragma unroll
for(int x = 0; x < LOOPS_X * THREADS_X; x+= THREADS_X) {
// __syncthreads();
if (heightIdxOK && x + tx < width) {
shmemWrite[0] = agg(uop(mat[x]), shmemWrite[0]);
}
}
__syncthreads();
if (doAgg) {
/*
* I tried doing this final sum as a 4-step reduction, with 8 threads
* per warp participating. It was slightly slower.
*/
float accum = agg.getBaseValue();
float* shmemRead = shmem + MUL24(tidx, shmemX);
// this loops too much if the rows are really short :(
#pragma unroll
for (int i = 0; i < THREADS_X; i++) {
accum = agg(accum, shmemRead[0]);
shmemRead++;
}
matSum[0] = bop(matSum[0], accum);
matSum += AGG_SHORT_ROWS_THREADS_Y;
}
__syncthreads();
mat += width * AGG_SHORT_ROWS_THREADS_Y;
}
}
}
template <class Agg, class UnaryOp, class BinaryOp>
__global__ void kAggShortRows2(const float* mat, float* matSum, int width, int height, Agg agg, UnaryOp uop, BinaryOp bop) {
const int shmemX = AGG_SHORT_ROWS_THREADS_X + 1;
__shared__ float shmem[AGG_SHORT_ROWS_THREADS_Y*shmemX];
const int LOOPS_X = DIVUP(width, AGG_SHORT_ROWS_THREADS_X);
const int tidx = threadIdx.y * AGG_SHORT_ROWS_THREADS_X + threadIdx.x;
const int bidx = blockIdx.y * gridDim.x + blockIdx.x;
const int blockRowIdx = bidx * AGG_SHORT_ROWS_LOOPS_Y * AGG_SHORT_ROWS_THREADS_Y;
float* shmemWrite = shmem + MUL24(threadIdx.y, shmemX) + threadIdx.x;
matSum += blockRowIdx + tidx;
// shmem[MUL24(threadIdx.y, shmemX) + threadIdx.x] = 0;
mat += width * blockRowIdx + MUL24(threadIdx.y, width) + threadIdx.x;
bool doAgg = tidx < AGG_SHORT_ROWS_THREADS_Y;
if(blockRowIdx < height) {
for (int y = 0; y < AGG_SHORT_ROWS_LOOPS_Y*AGG_SHORT_ROWS_THREADS_Y; y += AGG_SHORT_ROWS_THREADS_Y) {
doAgg &= tidx + y + blockRowIdx < height;
const bool heightIdxOK = threadIdx.y + y + blockRowIdx < height;
float accum = agg.getBaseValue();
shmemWrite[0] = agg.getBaseValue();
for(int x = 0; x < LOOPS_X * AGG_SHORT_ROWS_THREADS_X; x+= AGG_SHORT_ROWS_THREADS_X) {
// __syncthreads();
if (heightIdxOK && x + threadIdx.x < width) {
shmemWrite[0] = agg(uop(mat[x]), shmemWrite[0]);
}
}
__syncthreads();
if (doAgg) {
float* shmemRead = shmem + MUL24(tidx, shmemX);
#pragma unroll
for (int i = 0; i < AGG_SHORT_ROWS_THREADS_X; i++) {
accum = agg(accum, shmemRead[0]);
shmemRead++;
}
matSum[0] = bop(matSum[0], accum);
matSum += AGG_SHORT_ROWS_THREADS_Y;
}
__syncthreads();
mat += width * AGG_SHORT_ROWS_THREADS_Y;
}
}
}
/*
* Implements multiscan idea from http://www.moderngpu.com
* Not really useful for pure reductions but neat nonetheless.
*/
template<class Agg, class UnaryOp, class BinaryOp>
__global__ void kAggRows_wholerow_nosync(const float* mat, float* matSum, int width, int height,
Agg agg, UnaryOp uop, BinaryOp bop) {
const int tidx = threadIdx.x;
const int warpIdx = tidx / WARP_SIZE;
const int lane = tidx % WARP_SIZE;
__shared__ float accum[(WARP_SIZE + 1) * AWR_NUM_WARPS];
__shared__ float finalAccum[AWR_NUM_WARPS];
float* myAccum = &accum[warpIdx * (WARP_SIZE + 1) + lane];
float* myFinalAccum = &finalAccum[tidx];
//volatile float* vMyAccum = &accum[warpIdx * (WARP_SIZE + 1) + lane];
matSum += blockIdx.y;
mat += width * blockIdx.y;
float rAccum = agg.getBaseValue(); // cache in register, a bit faster than shmem
#pragma unroll 32
for (int x = tidx; x < width; x += AWR_NUM_THREADS) {
rAccum = agg(rAccum, uop(mat[x]));
}
myAccum[0] = rAccum;
// Each warp does a reduction that doesn't require synchronizatoin
#pragma unroll
for (int i = 0; i < LOG_WARP_SIZE; i++) {
const int d = 1 << i;
myAccum[0] = agg(myAccum[0], shfl_down(myAccum[0], d));
}
__syncthreads();
// The warps write their results
if (tidx < AWR_NUM_WARPS) {
//volatile float* vMyFinalAccum = &finalAccum[tidx];
myFinalAccum[0] = accum[tidx * (WARP_SIZE + 1)];
#pragma unroll
for (int i = 0; i < AWR_LOG_NUM_WARPS; i++) {
const int d = 1 << i;
myFinalAccum[0] = agg(myFinalAccum[0], shfl_down(myFinalAccum[0], d));
}
if (tidx == 0) {
matSum[0] = bop(matSum[0], myFinalAccum[0]);
matSum += gridDim.y;
}
}
}
/*
* This one gets coalesced reads but computes only a partial sum which
* must either be summed again (recursively) or summed on the host.
*/
template<class Agg, class UnaryOp, class BinaryOp, int blockSize>
__global__ void kAggRows(const float* mat, float* matSum, int width, int height, int sumWidth, Agg agg, UnaryOp uop, BinaryOp bop) {
const int idxX = blockIdx.x * blockSize*2 + threadIdx.x;
__shared__ float accum[blockSize*2];
matSum += blockIdx.y * sumWidth + blockIdx.x;
/*
* Here it's important to make sure that all threads in a block call __syncthreads,
* so I have even the redundant threads (for which idxX >= width) enter this loop
* just so that they may call __syncthreads at the appropriate times.
*/
mat += width * blockIdx.y + idxX;
accum[threadIdx.x] = agg.getBaseValue();
accum[threadIdx.x + blockSize] = agg.getBaseValue();
for (int idxY = blockIdx.y; idxY < height; idxY += gridDim.y) {
if (idxX < width) {
accum[threadIdx.x] = uop(mat[0]);
if(idxX + blockSize < width)
accum[threadIdx.x + blockSize] = uop(mat[blockSize]);
}
if (blockSize >= 512) {
__syncthreads();
if (threadIdx.x < 512)
accum[threadIdx.x] = agg(accum[threadIdx.x], accum[threadIdx.x + 512]);
}
if (blockSize >= 256) {
__syncthreads();
if (threadIdx.x < 256)
accum[threadIdx.x] = agg(accum[threadIdx.x],accum[threadIdx.x + 256]);
}
if (blockSize >= 128) {
__syncthreads();
if (threadIdx.x < 128)
accum[threadIdx.x] = agg(accum[threadIdx.x],accum[threadIdx.x + 128]);
}
if (blockSize >= 64) {
__syncthreads();
if (threadIdx.x < 64)
accum[threadIdx.x] = agg(accum[threadIdx.x],accum[threadIdx.x + 64]);
}
__syncthreads();
volatile float* myAccum = &accum[threadIdx.x];
if (threadIdx.x < 32) { // executed only by first warp
myAccum[0] = agg(myAccum[0], myAccum[32]);
myAccum[0] = agg(myAccum[0], myAccum[16]);
myAccum[0] = agg(myAccum[0], myAccum[8]);
myAccum[0] = agg(myAccum[0], myAccum[4]);
myAccum[0] = agg(myAccum[0], myAccum[2]);
myAccum[0] = agg(myAccum[0], myAccum[1]);
}
if (threadIdx.x == 0) {
matSum[0] = bop(matSum[0], myAccum[0]);
matSum += gridDim.y * sumWidth;
}
__syncthreads();
mat += width * gridDim.y;
}
}
/*
* Bad when there are few columns.
*/
template <class Agg, class UnaryOp, class BinaryOp>
__global__ void kDumbAggCols(cudaTextureObject_t mat, float* const vec, int width, int height, Agg agg, UnaryOp uop, BinaryOp bop) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < width) {
float mx = agg.getBaseValue();
for (int j = 0; j < height; j++) {
mx = agg(uop(tex1Dfetch<float>(mat, width * j + idx)), mx);
}
vec[idx] = bop(vec[idx], mx);
}
}
/*
* Better with few columns because it only computes a partial sum.
*/
template <class Agg, class UnaryOp>
__global__ void kAggCols(cudaTextureObject_t mat, float* const vec, int width, int height, int sumLength, Agg agg, UnaryOp op) {
const int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const int idxY = blockIdx.y * sumLength;
if (idxX < width) {
float mx = agg.getBaseValue();
for (int j = idxY; j < min(height,idxY + sumLength); j++) {
mx = agg(op(tex1Dfetch<float>(mat, j * width + idxX)), mx);
}
vec[blockIdx.y * width + idxX] = mx;
}
}
/*
* TODO: this is a mess, fix it. it works pretty fast but it's too ugly.
* TODO: this function is _really_ bad for very long aggregations of few columns.
*/
template<class Agg, class UOp, class BOp>
void _aggregate(MatGPU &mat, MatGPU& target, Agg agg, UOp uop, BOp bop, int axis) {
mexAssert(axis == 0 || axis == 1, "ag1");
mexAssert(mat.stride_ == 1 && target.stride_ == 1, "ag2");
mexAssert(mat.data_ != target.data_, "ag3");
mexAssert(!mat.empty(), "ag4");
int width = (int) mat.size1_;
int height = (int) mat.size2_;
cudaStream_t stream = MatGPU::_defaultStream;
if (axis == 0 ) { //sum along size2_
mexAssert(target.size1_ == mat.size1_ && target.size2_ == 1, "ag5");
if ((height <= 2048 || width >= 4096)) {
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
mexAssert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width, "ag6");
mexAssert(numBlocks < NUM_BLOCKS_MAX, "ag7");
kDumbAggCols<Agg, UOp, BOp><<<numBlocks,NUM_SUM_COLS_THREADS_PER_BLOCK, 0, stream>>>(mat.getTextureObject(), target.data_, width, height, agg, uop, bop);
mexAssert(cudaGetLastError() == cudaSuccess, "kDumbAggCols: kernel execution failed");
} else { // Specialize the case when we have very long columns and few of them
const int sumLength = 128;
MatGPU tmp(width, DIVUP(height, sumLength));
int numBlocksX = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
int numBlocksY = DIVUP(height, sumLength);
dim3 blocks(numBlocksX, numBlocksY);
dim3 threads(NUM_SUM_COLS_THREADS_PER_BLOCK);
kAggCols<Agg, UOp><<<blocks,threads, 0, stream>>>(mat.getTextureObject(), tmp.data_, width, height, sumLength, agg, uop);
mexAssert(cudaGetLastError() == cudaSuccess, "kAggCols: kernel execution failed");
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
kDumbAggCols<Agg, UOp, BOp><<<numBlocks,NUM_SUM_COLS_THREADS_PER_BLOCK, 0, stream>>>(tmp.getTextureObject(), target.data_, width, height, agg, uop, bop);
mexAssert(cudaGetLastError() == cudaSuccess, "kDumbAggCols: kernel execution failed");
}
} else { // sum along size1_
mexAssert(target.size1_ == 1 && target.size2_ == mat.size2_, "ag8");
if (width > 1) {
if (height >= 16384) { // linear aggregation
int numBlocksX = 1;
int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y);
int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X;
int numThreadsY = AGG_SHORT_ROWS_THREADS_Y;
while (numBlocksY > NUM_BLOCKS_MAX) {
numBlocksY = DIVUP(numBlocksY, 2);
numBlocksX *= 2;
}
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
if(width <= 16) {
if(width <= 4) {
kAggShortRows<Agg, UOp, BOp, 1, 4><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
} else if(width <= 8) {
kAggShortRows<Agg, UOp, BOp, 1, 8><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
} else if(width <= 12) {
kAggShortRows<Agg, UOp, BOp, 1, 12><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
} else {
kAggShortRows<Agg, UOp, BOp, 1, 16><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
}
} else if(width <= 32) {
kAggShortRows<Agg, UOp, BOp, 2, AGG_SHORT_ROWS_THREADS_X><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
} else if(width <= 48){
kAggShortRows<Agg, UOp, BOp, 3, AGG_SHORT_ROWS_THREADS_X><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
} else if(width <= 64){
kAggShortRows<Agg, UOp, BOp, 4, AGG_SHORT_ROWS_THREADS_X><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
} else {
kAggShortRows2<Agg, UOp, BOp><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
}
} else {
if (width >= 512) {
// NOTE: this is the only case which I bothered to try to optimize for Kepler
dim3 threads(AWR_NUM_THREADS);
dim3 blocks(1, height);
kAggRows_wholerow_nosync<<<blocks, threads, 0, stream>>>(mat.data_, target.data_, width, height, agg, uop, bop);
} else {
int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512)));
int numThreadsY = 1;
int numBlocksX = DIVUP(width, 2*numThreadsX);
int numBlocksY = std::min(height, NUM_BLOCKS_MAX);
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
mexAssert(numBlocksX <= NUM_BLOCKS_MAX, "ag9");
mexAssert(numBlocksY <= NUM_BLOCKS_MAX, "ag10");
if(width <= 64) {
kAggRows<Agg, UOp, BOp, 32><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
} else if(width <= 128) {
kAggRows<Agg, UOp, BOp, 64><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
} else if(width <= 256) {
kAggRows<Agg, UOp, BOp, 128><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
} else if(width <= 512) {
kAggRows<Agg, UOp, BOp, 256><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
} else {
kAggRows<Agg, UOp, BOp, 512><<<grid, threads, 0, stream>>>(mat.data_, target.data_, width, height, (int) target.size1_, agg, uop, bop);
}
mexAssert(cudaGetLastError() == cudaSuccess, "agg rows: kernel execution failed");
}
}
} else {
mexAssert(false, "fake aggregation, use assignment instead");
//target.applyBinary(NVMatrixBinaryOps::CompositeSecond<UOp, BOp>(uop, bop), *this, target, stream);
}
}
}
void cuda_sumvect(MatGPU &mat, MatGPU &vect, size_t dim) {
int axis = 2 - (int) dim;
_aggregate(mat, vect, Aggs::Sum(), UnaryOp::Identity(), BinaryOp::Second(), axis);
}
void cuda_maxvect(MatGPU &mat, MatGPU &vect, size_t dim) {
int axis = 2 - (int) dim;
_aggregate(mat, vect, Aggs::Max(), UnaryOp::Identity(), BinaryOp::Second(), axis);
}
/* ------- Sergey Demyanov ------- */
/* ------- Image jittering ------- */
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread>
__global__ void kTransform(float* imgs, float* targets, int imgSizeX, int imgSizeY, int outputsX, int outputsY,
int numFilters, int numImages, float *shift_mat, float *scale_mat, float *mirror_mat, float *angle_mat, float defval) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = DIVUP(numFilters, B_Y*filtersPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int myFilterIdx = (blockFilterIdx + threadIdx.y*filtersPerThread);
if (myFilterIdx >= numFilters) {
return;
}
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsY;
const int imgPixels = imgSizeX * imgSizeY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += myFilterIdx * imgPixels * numImages + imgIdx;
targets += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
const float m1 = (float) imgSizeX / 2 - 0.5;
const float m2 = (float) imgSizeY / 2 - 0.5;
const float n1 = (float) outputsX / 2 - 0.5;
const float n2 = (float) outputsY / 2 - 0.5;
// #pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
const int curImgIdx = imgIdx + i * B_X;
if (curImgIdx < numImages) {
const float angcos = (float) cos(angle_mat[curImgIdx]);
const float angsin = (float) sin(angle_mat[curImgIdx]);
const float xi1 = (outputIdxX - n1) * scale_mat[curImgIdx]; // scale[0];
const float xi2 = (outputIdxY - n2) * scale_mat[curImgIdx + numImages]; //scale[1];
float x1 = xi1 * angcos - xi2 * angsin + m1 + shift_mat[curImgIdx]; //shift[0];
float x2 = xi1 * angsin + xi2 * angcos + m2 + shift_mat[curImgIdx + numImages]; //shift[1];
if (mirror_mat[curImgIdx] > 0.5) x1 = imgSizeX - 1 - x1;
if (mirror_mat[curImgIdx + numImages] > 0.5) x2 = imgSizeY - 1 - x2;
const int xf1 = (int) x1;
const int xf2 = (int) x2;
if (0 <= xf1 && xf1 + 1 < imgSizeX &&
0 <= xf2 && xf2 + 1 < imgSizeY) {
const int imgPx11 = xf2 * imgSizeX + xf1;
const int imgPx21 = xf2 * imgSizeX + xf1 + 1;
const int imgPx12 = (xf2 + 1) * imgSizeX + xf1;
const int imgPx22 = (xf2 + 1) * imgSizeX + xf1 + 1;
for (int f = 0; f < filtersPerThread; f++) {
const int imgInd11 = (f * imgPixels + imgPx11) * numImages + i * B_X;
const int imgInd21 = (f * imgPixels + imgPx21) * numImages + i * B_X;
const int imgInd12 = (f * imgPixels + imgPx12) * numImages + i * B_X;
const int imgInd22 = (f * imgPixels + imgPx22) * numImages + i * B_X;
const float vl = (x1 - xf1) * imgs[imgInd21] + (xf1 + 1 - x1) * imgs[imgInd11];
const float vh = (x1 - xf1) * imgs[imgInd22] + (xf1 + 1 - x1) * imgs[imgInd12];
prod[f][i] = (x2 - xf2) * vh + (xf2 + 1 - x2) * vl;
}
} else {
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = defval;
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * numOutputs * numImages + i * B_X] = prod[f][i];
}
}
}
}
void _transformActs(const MatGPU &images, MatGPU &target,
size_t imgSize1, size_t imgSize2,
size_t targSize1, size_t targSize2,
const MatGPU &shift_mat, const MatGPU &scale_mat,
const MatGPU &mirror_mat, const MatGPU &angle_mat, float defval) {
int imgSizeX = (int) imgSize1;
int imgSizeY = (int) imgSize2;
int imgPixels = imgSizeX * imgSizeY;
int outputsX = (int) targSize1;
int outputsY = (int) targSize2;
int targPixels = outputsX * outputsY;
int numImages = images.size1_;
mexAssert(images.size2_ % imgPixels == 0, "ta2");
int numFilters = images.size2_ / imgPixels;
mexAssert(target.size1_ == numImages, "ta1");
mexAssert(target.size2_ == targPixels * numFilters, "ta3");
cudaStream_t stream = MatGPU::_defaultStream;
int filtersPerThread = 1;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numFilters, 4 * filtersPerThread) * outputsY);
if (imgsPerThread == 4) {
kTransform<4, 32, 4, 1><<<blocks, threads, 0, stream>>>
(images.data_, target.data_, imgSizeX, imgSizeY, outputsX, outputsY, numFilters, numImages,
shift_mat.data_, scale_mat.data_, mirror_mat.data_, angle_mat.data_, defval);
} else if (imgsPerThread == 2) {
kTransform<4, 32, 2, 1><<<blocks, threads, 0, stream>>>
(images.data_, target.data_, imgSizeX, imgSizeY, outputsX, outputsY, numFilters, numImages,
shift_mat.data_, scale_mat.data_, mirror_mat.data_, angle_mat.data_, defval);
} else {
kTransform<4, 32, 1, 1><<<blocks, threads, 0, stream>>>
(images.data_, target.data_, imgSizeX, imgSizeY, outputsX, outputsY, numFilters, numImages,
shift_mat.data_, scale_mat.data_, mirror_mat.data_, angle_mat.data_, defval);
}
mexAssert(cudaGetLastError() == cudaSuccess, "_transformActs: kernel execution failed");
}
/* ------- Total matrix aggregation ------- */
/*
* Sergey Demyanov
* This kernel is faster than the one in the code of Alex Krizhevsky, so I'll leave it here
*/
__global__
void _totalSum(const float *a, float* const b, size_t n) {
__shared__ float sdata[DP_BLOCKSIZE];
size_t tid = threadIdx.x;
sdata[tid] = 0;
size_t gridSize = blockDim.x * gridDim.x;
size_t i = blockDim.x * blockIdx.x + tid;
if (i >= n) return;
while (i < n) {
sdata[tid] += a[i];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
size_t ns = blockDim.x; // number of elements in the shared array
if (ns > n - blockDim.x * blockIdx.x ) {
ns = n - blockDim.x * blockIdx.x;
}
size_t s = blockDim.x; // sum stride
while (s >= ns) s >>= 1;
while (s > 32) {
if (tid < s && tid + s < ns) sdata[tid] += sdata[tid + s];
__syncthreads();
s >>= 1;
}
// for s <= WARP_SIZE no synchronization is needed
if (tid < 32) {
if (tid + 32 < ns) sdata[tid] += sdata[tid + 32];
if (tid + 16 < ns) sdata[tid] += sdata[tid + 16];
if (tid + 8 < ns) sdata[tid] += sdata[tid + 8];
if (tid + 4 < ns) sdata[tid] += sdata[tid + 4];
if (tid + 2 < ns) sdata[tid] += sdata[tid + 2];
if (tid + 1 < ns) sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if (tid == 0) b[blockIdx.x] = sdata[0];
}
float cuda_sum(const MatGPU &mat) {
mexAssert(!mat.empty(), "In cuda_sum mat is empty");
mexAssert(mat.stride_ == 1, "In cuda_sum stride_ should be 1");
cudaStream_t stream = MatGPU::_defaultStream;
size_t numElements = mat.size1_ * mat.size2_;
size_t blocks_number = MIN(DIVUP(numElements, ELTWISE_FLAT_THREADS_X), ELTWISE_FLAT_THREADS_X);
//MatGPU::_sum_buf1.resize(ELTWISE_FLAT_THREADS_X, 1);
//MatGPU::_sum_buf2.resize(1, 1);
MatGPU partsums, totalsum;
MatGPU::swapWithBuffer(partsums, ELTWISE_FLAT_THREADS_X);
partsums.resize(ELTWISE_FLAT_THREADS_X, 1);
MatGPU::swapWithBuffer(totalsum, 1);
totalsum.resize(1, 1);
_totalSum<<<blocks_number, ELTWISE_FLAT_THREADS_X, 0, stream>>>
(mat.data_, partsums.data_, numElements);
_totalSum<<<1, ELTWISE_FLAT_THREADS_X, 0, stream>>>
(partsums.data_, totalsum.data_, blocks_number);
MatCPU cpusum(1, 1);
DeviceToHost(totalsum, cpusum);
MatGPU::swapWithBuffer(partsums, ELTWISE_FLAT_THREADS_X);
MatGPU::swapWithBuffer(totalsum, 1);
return cpusum(0, 0);
}
|
sort_by_key.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
template<typename Iterator1, typename Iterator2, typename Compare, typename Iterator3>
__global__
void sort_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Compare comp, Iterator3 is_supported)
{
#if (__CUDA_ARCH__ >= 200)
*is_supported = true;
thrust::sort_by_key(thrust::seq, keys_first, keys_last, values_first, comp);
#else
*is_supported = false;
#endif
}
template<typename T>
struct TestSortByKeyDeviceSeq
{
void operator()(const size_t n)
{
thrust::host_vector<T> h_keys = unittest::random_integers<T>(n);
thrust::device_vector<T> d_keys = h_keys;
thrust::host_vector<T> h_values = h_keys;
thrust::device_vector<T> d_values = d_keys;
thrust::device_vector<bool> is_supported(1);
hipLaunchKernelGGL(( sort_by_key_kernel), dim3(1),dim3(1), 0, 0, d_keys.begin(), d_keys.end(), d_values.begin(), thrust::less<T>(), is_supported.begin());
if(is_supported[0])
{
thrust::sort_by_key(h_keys.begin(), h_keys.end(), h_values.begin(), thrust::less<T>());
ASSERT_EQUAL(h_keys, d_keys);
ASSERT_EQUAL(h_values, d_values);
}
}
};
VariableUnitTest<
TestSortByKeyDeviceSeq,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestSortByKeyDeviceSeqInstance;
void TestSortByKeyCudaStreams()
{
thrust::device_vector<int> keys(10);
thrust::device_vector<int> vals(10);
keys[0] = 9; vals[0] = 9;
keys[1] = 3; vals[1] = 3;
keys[2] = 2; vals[2] = 2;
keys[3] = 0; vals[3] = 0;
keys[4] = 4; vals[4] = 4;
keys[5] = 7; vals[5] = 7;
keys[6] = 8; vals[6] = 8;
keys[7] = 1; vals[7] = 1;
keys[8] = 5; vals[8] = 5;
keys[9] = 6; vals[9] = 6;
hipStream_t s;
hipStreamCreate(&s);
thrust::sort_by_key(thrust::hip::par(s),
keys.begin(), keys.end(),
vals.begin());
hipStreamSynchronize(s);
ASSERT_EQUAL(true, thrust::is_sorted(keys.begin(), keys.end()));
ASSERT_EQUAL(true, thrust::is_sorted(vals.begin(), vals.end()));
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestSortByKeyCudaStreams);
| sort_by_key.cu | #include <unittest/unittest.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
template<typename Iterator1, typename Iterator2, typename Compare, typename Iterator3>
__global__
void sort_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Compare comp, Iterator3 is_supported)
{
#if (__CUDA_ARCH__ >= 200)
*is_supported = true;
thrust::sort_by_key(thrust::seq, keys_first, keys_last, values_first, comp);
#else
*is_supported = false;
#endif
}
template<typename T>
struct TestSortByKeyDeviceSeq
{
void operator()(const size_t n)
{
thrust::host_vector<T> h_keys = unittest::random_integers<T>(n);
thrust::device_vector<T> d_keys = h_keys;
thrust::host_vector<T> h_values = h_keys;
thrust::device_vector<T> d_values = d_keys;
thrust::device_vector<bool> is_supported(1);
sort_by_key_kernel<<<1,1>>>(d_keys.begin(), d_keys.end(), d_values.begin(), thrust::less<T>(), is_supported.begin());
if(is_supported[0])
{
thrust::sort_by_key(h_keys.begin(), h_keys.end(), h_values.begin(), thrust::less<T>());
ASSERT_EQUAL(h_keys, d_keys);
ASSERT_EQUAL(h_values, d_values);
}
}
};
VariableUnitTest<
TestSortByKeyDeviceSeq,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestSortByKeyDeviceSeqInstance;
void TestSortByKeyCudaStreams()
{
thrust::device_vector<int> keys(10);
thrust::device_vector<int> vals(10);
keys[0] = 9; vals[0] = 9;
keys[1] = 3; vals[1] = 3;
keys[2] = 2; vals[2] = 2;
keys[3] = 0; vals[3] = 0;
keys[4] = 4; vals[4] = 4;
keys[5] = 7; vals[5] = 7;
keys[6] = 8; vals[6] = 8;
keys[7] = 1; vals[7] = 1;
keys[8] = 5; vals[8] = 5;
keys[9] = 6; vals[9] = 6;
cudaStream_t s;
cudaStreamCreate(&s);
thrust::sort_by_key(thrust::cuda::par(s),
keys.begin(), keys.end(),
vals.begin());
cudaStreamSynchronize(s);
ASSERT_EQUAL(true, thrust::is_sorted(keys.begin(), keys.end()));
ASSERT_EQUAL(true, thrust::is_sorted(vals.begin(), vals.end()));
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestSortByKeyCudaStreams);
|
029e1d0167c020a33dccdc6631ed7ae65a109547.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <chrono>
#include <omp.h>
using namespace std;
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void get_histogram(unsigned char* input, float* histogram, int width, int height, int step) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * step + xIndex;
if (xIndex < width && yIndex < height) {
atomicAdd(&histogram[input[index]], 1);
}
}
__global__ void normalize_histogram(float* histogram, float* histogram_normalized, int width, int height) {
unsigned int nxy = threadIdx.x + threadIdx.y * blockDim.x;
if (nxy < 256 && blockIdx.x == 0 && blockIdx.y == 0) {
for (int i = 0; i < nxy; i++) {
histogram_normalized[nxy] += histogram[i];
}
histogram_normalized[nxy] = histogram_normalized[nxy] * 255 / (width * height);
}
}
__global__ void apply_histogram_image(unsigned char* input, unsigned char* output, float* histogram_normalized, int width, int height, int step) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * step + xIndex;
if (xIndex < width && yIndex < height) {
output[index] = histogram_normalized[input[index]];
}
}
void normalize_cpu(const cv::Mat& input, cv::Mat& output) {
float histogram[256] = {};
float histogram_normalized[256] = {};
int i, j, k, l, m, n;
int size = input.rows * input.cols;
// Get number of processors
int nProcessors = omp_get_max_threads();
std::cout << "CPU processors available: " << nProcessors << std::endl;
// Set number of processors to use with OpenMP
omp_set_num_threads(6);
#pragma omp parallel for private(i, j) shared(input, histogram)
for (i = 0; i < input.rows; i++) {
for (j = 0; j < input.cols; j++) {
histogram[(int)input.at<uchar>(i, j)]++;
}
}
#pragma omp parallel for private(k, l) shared(size, histogram, histogram_normalized)
for (k = 0; k < 256; k++) {
for (l = 0; l < k; l++) {
histogram_normalized[k] += histogram[l];
}
histogram_normalized[k] = histogram_normalized[k] * 255 / size;
}
#pragma omp parallel for private(m, n) shared(input, output, histogram_normalized)
for (m = 0; m < output.rows; m++) {
for (n = 0; n < output.cols; n++) {
output.at<uchar>(m, n) = histogram_normalized[(int)input.at<uchar>(m, n)];
}
}
}
void normalize_gpu(const cv::Mat& input, cv::Mat& output) {
// Set up device
int dev = 0;
hipDeviceProp_t deviceProp;
SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(hipSetDevice(dev), "Error setting device");
cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl;
// Calculate total number of bytes of input and output image
// Step = cols * number of colors
size_t inputBytes = input.step * input.rows;
size_t outputBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
float * d_histogram = {};
float * d_histogram_normalized = {};
// Allocate device memory
SAFE_CALL(hipMalloc<unsigned char>(&d_input, inputBytes), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_output, outputBytes), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc(&d_histogram, 256 * sizeof(float)), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc(&d_histogram_normalized, 256 * sizeof(float)), "CUDA Malloc Failed");
// Copy data from OpenCV input image to device memory
SAFE_CALL(hipMemcpy(d_input, input.ptr(), inputBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(d_output, output.ptr(), outputBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
// Specify a reasonable block size
const dim3 block(32, 32);
// Calculate grid size to cover the whole image
const dim3 grid((input.cols + block.x - 1) / block.x, (input.rows + block.y - 1) / block.y);
// const dim3 grid((input.cols) / block.x, (input.rows) / block.y);
// Launch the color conversion kernel
auto start_cpu = chrono::high_resolution_clock::now();
get_histogram << <grid, block >> > (d_input, d_histogram, input.cols, input.rows, static_cast<int>(input.step));
normalize_histogram << <grid, block >> > (d_histogram, d_histogram_normalized, input.cols, input.rows);
apply_histogram_image << <grid, block >> > (d_input, d_output, d_histogram_normalized, input.cols, input.rows, static_cast<int>(input.step));
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("GPU elapsed %f ms\n", duration_ms.count());
// Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed");
// Copy back data from destination device meory to OpenCV output image
SAFE_CALL(hipMemcpy(output.ptr(), d_output, outputBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
// Free the device memory
SAFE_CALL(hipFree(d_input), "CUDA Free Failed");
SAFE_CALL(hipFree(d_output), "CUDA Free Failed");
}
int main(int argc, char *argv[])
{
string imagePath;
if (argc < 2)
imagePath = "Images/woman3.jpg";
else
imagePath = argv[1];
cout << imagePath << endl;
// Read input image from the disk
cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
if (input.empty())
{
cout << "Image Not Found!" << std::endl;
cin.get();
return -1;
}
//Create output image
cv::Mat grayscale_input;
cvtColor(input, grayscale_input, cv::COLOR_BGR2GRAY);
//creating output image
cv::Mat output_gpu(grayscale_input.rows, grayscale_input.cols, grayscale_input.type());
cv::Mat output_cpu(grayscale_input.rows, grayscale_input.cols, grayscale_input.type());
// GPU
normalize_gpu(grayscale_input, output_gpu);
// CPU
auto start_cpu = chrono::high_resolution_clock::now();
normalize_cpu(grayscale_input, output_cpu);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("CPU elapsed %f ms\n", duration_ms.count());
//Allow the windows to resize
namedWindow("Input", cv::WINDOW_NORMAL);
namedWindow("Output_GPU", cv::WINDOW_NORMAL);
namedWindow("Output_CPU", cv::WINDOW_NORMAL);
cv::resizeWindow("Input", 800, 600);
cv::resizeWindow("Output_GPU", 800, 600);
cv::resizeWindow("Output_CPU", 800, 600);
// output = input_bw.clone();
imshow("Input", grayscale_input);
imshow("Output_GPU", output_gpu);
imshow("Output_CPU", output_cpu);
//Wait for key press
cv::waitKey();
return 0;
}
| 029e1d0167c020a33dccdc6631ed7ae65a109547.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <chrono>
#include <omp.h>
using namespace std;
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void get_histogram(unsigned char* input, float* histogram, int width, int height, int step) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * step + xIndex;
if (xIndex < width && yIndex < height) {
atomicAdd(&histogram[input[index]], 1);
}
}
__global__ void normalize_histogram(float* histogram, float* histogram_normalized, int width, int height) {
unsigned int nxy = threadIdx.x + threadIdx.y * blockDim.x;
if (nxy < 256 && blockIdx.x == 0 && blockIdx.y == 0) {
for (int i = 0; i < nxy; i++) {
histogram_normalized[nxy] += histogram[i];
}
histogram_normalized[nxy] = histogram_normalized[nxy] * 255 / (width * height);
}
}
__global__ void apply_histogram_image(unsigned char* input, unsigned char* output, float* histogram_normalized, int width, int height, int step) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * step + xIndex;
if (xIndex < width && yIndex < height) {
output[index] = histogram_normalized[input[index]];
}
}
void normalize_cpu(const cv::Mat& input, cv::Mat& output) {
float histogram[256] = {};
float histogram_normalized[256] = {};
int i, j, k, l, m, n;
int size = input.rows * input.cols;
// Get number of processors
int nProcessors = omp_get_max_threads();
std::cout << "CPU processors available: " << nProcessors << std::endl;
// Set number of processors to use with OpenMP
omp_set_num_threads(6);
#pragma omp parallel for private(i, j) shared(input, histogram)
for (i = 0; i < input.rows; i++) {
for (j = 0; j < input.cols; j++) {
histogram[(int)input.at<uchar>(i, j)]++;
}
}
#pragma omp parallel for private(k, l) shared(size, histogram, histogram_normalized)
for (k = 0; k < 256; k++) {
for (l = 0; l < k; l++) {
histogram_normalized[k] += histogram[l];
}
histogram_normalized[k] = histogram_normalized[k] * 255 / size;
}
#pragma omp parallel for private(m, n) shared(input, output, histogram_normalized)
for (m = 0; m < output.rows; m++) {
for (n = 0; n < output.cols; n++) {
output.at<uchar>(m, n) = histogram_normalized[(int)input.at<uchar>(m, n)];
}
}
}
void normalize_gpu(const cv::Mat& input, cv::Mat& output) {
// Set up device
int dev = 0;
cudaDeviceProp deviceProp;
SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(cudaSetDevice(dev), "Error setting device");
cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl;
// Calculate total number of bytes of input and output image
// Step = cols * number of colors
size_t inputBytes = input.step * input.rows;
size_t outputBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
float * d_histogram = {};
float * d_histogram_normalized = {};
// Allocate device memory
SAFE_CALL(cudaMalloc<unsigned char>(&d_input, inputBytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_output, outputBytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc(&d_histogram, 256 * sizeof(float)), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc(&d_histogram_normalized, 256 * sizeof(float)), "CUDA Malloc Failed");
// Copy data from OpenCV input image to device memory
SAFE_CALL(cudaMemcpy(d_input, input.ptr(), inputBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(d_output, output.ptr(), outputBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
// Specify a reasonable block size
const dim3 block(32, 32);
// Calculate grid size to cover the whole image
const dim3 grid((input.cols + block.x - 1) / block.x, (input.rows + block.y - 1) / block.y);
// const dim3 grid((input.cols) / block.x, (input.rows) / block.y);
// Launch the color conversion kernel
auto start_cpu = chrono::high_resolution_clock::now();
get_histogram << <grid, block >> > (d_input, d_histogram, input.cols, input.rows, static_cast<int>(input.step));
normalize_histogram << <grid, block >> > (d_histogram, d_histogram_normalized, input.cols, input.rows);
apply_histogram_image << <grid, block >> > (d_input, d_output, d_histogram_normalized, input.cols, input.rows, static_cast<int>(input.step));
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("GPU elapsed %f ms\n", duration_ms.count());
// Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed");
// Copy back data from destination device meory to OpenCV output image
SAFE_CALL(cudaMemcpy(output.ptr(), d_output, outputBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
// Free the device memory
SAFE_CALL(cudaFree(d_input), "CUDA Free Failed");
SAFE_CALL(cudaFree(d_output), "CUDA Free Failed");
}
int main(int argc, char *argv[])
{
string imagePath;
if (argc < 2)
imagePath = "Images/woman3.jpg";
else
imagePath = argv[1];
cout << imagePath << endl;
// Read input image from the disk
cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
if (input.empty())
{
cout << "Image Not Found!" << std::endl;
cin.get();
return -1;
}
//Create output image
cv::Mat grayscale_input;
cvtColor(input, grayscale_input, cv::COLOR_BGR2GRAY);
//creating output image
cv::Mat output_gpu(grayscale_input.rows, grayscale_input.cols, grayscale_input.type());
cv::Mat output_cpu(grayscale_input.rows, grayscale_input.cols, grayscale_input.type());
// GPU
normalize_gpu(grayscale_input, output_gpu);
// CPU
auto start_cpu = chrono::high_resolution_clock::now();
normalize_cpu(grayscale_input, output_cpu);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("CPU elapsed %f ms\n", duration_ms.count());
//Allow the windows to resize
namedWindow("Input", cv::WINDOW_NORMAL);
namedWindow("Output_GPU", cv::WINDOW_NORMAL);
namedWindow("Output_CPU", cv::WINDOW_NORMAL);
cv::resizeWindow("Input", 800, 600);
cv::resizeWindow("Output_GPU", 800, 600);
cv::resizeWindow("Output_CPU", 800, 600);
// output = input_bw.clone();
imshow("Input", grayscale_input);
imshow("Output_GPU", output_gpu);
imshow("Output_CPU", output_cpu);
//Wait for key press
cv::waitKey();
return 0;
}
|
73fa02332f2cbd1bf2488fcd6eb0cf9116bd78f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"glcm.h"
/* Calculating GLCM */
int *Calculate_GLCM_rl(const int *image, int *size, int *stride, int *angles, int *Range, int MASK_VALUE, int bin_width, int Ng, int NA, int batch_size)
{
//START_TIMER(time)
int nbytes_image = sizeof(int) * size[0] * size[1] * batch_size;
//int nbytes_mask = sizeof(int) * size[0] * size[1] * batch_size;
int nbytes_glcm = sizeof(int) * Ng * Ng * NA * batch_size;
//int *glcm = (int*)malloc(nbytes_glcm);
int *dev_image = NULL;
int *dev_angles = NULL;
int *dev_size = NULL;
int *dev_stride = NULL;
int *dev_glcm = NULL;
hipMalloc((void**)&dev_image, nbytes_image);
//HANDLE_ERROR(hipMalloc((void**)&dev_mask, nbytes_mask));
hipMalloc((void**)&dev_glcm, nbytes_glcm);
dim3 grids_Pn(Ng/8, Ng/8, batch_size);
dim3 threads_Pn(64, NA);
hipLaunchKernelGGL(( initialize), dim3(grids_Pn), dim3(threads_Pn), 0, 0, dev_glcm);
hipMalloc((void**)&dev_size, sizeof(int) * 2);
hipMalloc((void **)&dev_angles, sizeof(int) * 8);
hipMalloc((void**)&dev_stride, sizeof(int) * 2);
//ANDLE_ERROR(hipMemcpy((void*)dev_image, (void*)image, nbytes_image, hipMemcpyHostToDevice));
//HANDLE_ERROR(hipMemcpy((void*)dev_mask, (void*)mask, nbytes_mask, hipMemcpyHostToDevice));
//HANDLE_ERROR(hipMemcpy((void*)dev_glcm, (void*)glcm, nbytes_glcm, hipMemcpyHostToDevice));
hipMemcpy(dev_size, size, sizeof(int) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_stride, stride, sizeof(int) * 2, hipMemcpyHostToDevice);
hipMemcpy(dev_angles, angles, sizeof(int) * 8, hipMemcpyHostToDevice);
//printf("copying: ");
//PRINT_TIME(time)
//printf("\n");
dim3 grids(16, 16, batch_size);
dim3 threads(size[0]/16, size[1]/16);
//START_TIMER(time)
//START_TIMER(time)
hipLaunchKernelGGL(( Preprocessing_image_GLCM), dim3(grids), dim3(threads), 0, 0, dev_image, image, Range[0], Range[1], bin_width, MASK_VALUE);
//initialize<<<grids1, threads1>>>(dev_glcm);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calculate_glcm_kernel_rl), dim3(grids), dim3(threads), 0, 0, dev_image, dev_glcm, dev_size, dev_stride, dev_angles, Ng, NA);
//STOP_TIMER(time)
//printf("GLCM \n");
//PRINT_TIME(time)
//hipDeviceSynchronize();
//HANDLE_ERROR(hipMemcpy(glcm, dev_glcm, nbytes_glcm, hipMemcpyDeviceToHost));
//printf("GLCM calculated \n");
hipFree(dev_image);
//HANDLE_ERROR(hipFree(dev_mask));
//HANDLE_ERROR(hipFree(dev_glcm));
hipFree(dev_angles);
hipFree(dev_stride);
hipFree(dev_size);
return dev_glcm;
//return dev_glcm;
}
void Calculate_GLCM_Property(PROPERTY_glcm *Property_glcm, float Epsilon, int Ng, int NA, int batch_size)
{
/*
PROPERTY_glcm *Property_glcm = (PROPERTY_glcm*)malloc(sizeof(PROPERTY_glcm));
Property_glcm->P = glcm;
Property_glcm->s = (float*)malloc(sizeof(float) * NA);
Property_glcm->Pn = (float*)malloc(sizeof(float) * Ng * Ng * NA);
Property_glcm->Px = (float*)malloc(sizeof(float) * Ng * NA);
Property_glcm->Py = (float*)malloc(sizeof(float) * Ng * NA);
Property_glcm->ux = (float*)malloc(sizeof(float) * NA);
Property_glcm->uy = (float*)malloc(sizeof(float) * NA);
Property_glcm->Dx = (float*)malloc(sizeof(float) * NA);
Property_glcm->Dy = (float*)malloc(sizeof(float) * NA);
Property_glcm->Pxay = (float*)malloc(sizeof(float) * Ng * NA * 2);
Property_glcm->Pxsy = (float*)malloc(sizeof(float) * Ng * NA);
Property_glcm->HX = (float*)malloc(sizeof(float) * NA);
Property_glcm->HY = (float*)malloc(sizeof(float) * NA);
Property_glcm->HXY = (float*)malloc(sizeof(float) * NA);
Property_glcm->HXY1 = (float*)malloc(sizeof(float) * NA);
Property_glcm->HXY2 = (float*)malloc(sizeof(float) * NA);
*/
//START_TIMER(time)
int nbytes_glcm = sizeof(int) * Ng * Ng * NA * batch_size;
//int *P_matrix;
//hipMalloc((void**)&P_matrix, nbytes_glcm);
//hipMemcpy((void*)P_matrix, (void*)glcm, nbytes_glcm, hipMemcpyHostToDevice);
//PROPERTY_glcm Property_glcm;
//hipMalloc((void**)&Property_glcm, sizeof(PROPERTY_glcm));
//hipMalloc((void**)&(Property_glcm.P), sizeof(int) * NA * Ng * Ng * batch_size);
//Property_glcm->P = glcm;
if (Property_glcm->Pn != NULL)
{
delete Property_glcm->Pn;
Property_glcm->Pn = NULL;
//printf("Property_glcm->Pn != NULL! \n");
}
if (Property_glcm->s != NULL)
{
delete Property_glcm->s;
Property_glcm->s = NULL;
//printf("Property_glcm->s != NULL! \n");
}
if (Property_glcm->Px != NULL)
{
delete Property_glcm->Px;
Property_glcm->Px = NULL;
//printf("Property_glcm->Px != NULL! \n");
}
if (Property_glcm->Py != NULL)
{
delete Property_glcm->Py;
Property_glcm->Py = NULL;
//printf("Property_glcm->Py != NULL! \n");
}
if (Property_glcm->ux != NULL)
{
Property_glcm->ux;
Property_glcm->ux = NULL;
//printf("Property_glcm->ux != NULL! \n");
}
if (Property_glcm->uy != NULL)
{
delete Property_glcm->uy;
Property_glcm->uy = NULL;
//printf("Property_glcm->uy != NULL! \n");
}
if (Property_glcm->Dx != NULL)
{
delete Property_glcm->Dx;
Property_glcm->Dx = NULL;
//printf("Property_glcm->Dx != NULL! \n");
}
if (Property_glcm->Dy != NULL)
{
delete Property_glcm->Dy;
Property_glcm->Dy = NULL;
//printf("Property_glcm->Dy != NULL! \n");
}
if (Property_glcm->Pxay != NULL)
{
delete Property_glcm->Pxay;
Property_glcm->Pxay = NULL;
//printf("Property_glcm->Pxay != NULL! \n");
}
if (Property_glcm->Pxsy != NULL)
{
delete Property_glcm->Pxsy;
Property_glcm->Pxsy = NULL;
//printf("Property_glcm->Pxsy != NULL! \n");
}
if (Property_glcm->HX != NULL)
{
delete Property_glcm->HX;
Property_glcm->HX = NULL;
//printf("Property_glcm->HX != NULL! \n");
}
if (Property_glcm->HY != NULL)
{
delete Property_glcm->HY;
Property_glcm->HY = NULL;
//printf("Property_glcm->HY != NULL! \n");
}
if (Property_glcm->HXY != NULL)
{
delete Property_glcm->HXY;
Property_glcm->HXY = NULL;
//printf("Property_glcm->HXY != NULL! \n");
}
if (Property_glcm->HXY1 != NULL)
{
delete Property_glcm->HXY1;
Property_glcm->HXY1 = NULL;
//printf("Property_glcm->HXY1 != NULL! \n");
}
if (Property_glcm->HXY2 != NULL)
{
delete Property_glcm->HXY2;
Property_glcm->HXY2 = NULL;
//printf("Property_glcm->HXY2 != NULL! \n");
}
if (Property_glcm->maxp != NULL)
{
delete Property_glcm->maxp;
Property_glcm->maxp = NULL;
//printf("Property_glcm->maxp != NULL! \n");
}
hipDeviceSynchronize();
hipMalloc((void**)&(Property_glcm->s), sizeof(float) * NA * batch_size);
dim3 grids_s(1, 1, batch_size);
dim3 threads_s(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_s), dim3(threads_s), 0, 0, Property_glcm->s);
hipMalloc((void**)&Property_glcm->Pn, sizeof(float) * Ng * Ng * NA * batch_size);
dim3 grids_Pn(Ng/8, Ng/8, batch_size/4);
dim3 threads_Pn(16, 16);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_Pn), dim3(threads_Pn), 0, 0, Property_glcm->Pn);
hipMalloc((void**)&Property_glcm->Px, sizeof(float) * Ng * NA * batch_size);
dim3 grids_Px(Ng, 1, batch_size);
dim3 threads_Px(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_Px), dim3(threads_Px), 0, 0, Property_glcm->Px);
hipMalloc((void**)&Property_glcm->Py, sizeof(float) * Ng * NA * batch_size);
dim3 grids_Py(Ng, 1, batch_size);
dim3 threads_Py(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_Py), dim3(threads_Py), 0, 0, Property_glcm->Py);
hipMalloc((void**)&Property_glcm->Pxay, sizeof(float) * Ng * NA * 2 * batch_size);
dim3 grids_Pxay(Ng, 2, batch_size);
dim3 threads_Pxay(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_Pxay), dim3(threads_Pxay), 0, 0, Property_glcm->Pxay);
hipMalloc((void**)&Property_glcm->Pxsy, sizeof(float) * Ng * NA * batch_size);
dim3 grids_Pxsy(Ng, 1, batch_size);
dim3 threads_Pxsy(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_Pxsy), dim3(threads_Pxsy), 0, 0, Property_glcm->Pxsy);
hipMalloc((void**)&Property_glcm->ux, sizeof(float) * NA * batch_size);
dim3 grids_ux(1, 1, batch_size);
dim3 threads_ux(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_ux), dim3(threads_ux), 0, 0, Property_glcm->ux);
hipMalloc((void**)&Property_glcm->uy, sizeof(float) * NA * batch_size);
dim3 grids_uy(1, 1, batch_size);
dim3 threads_uy(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_uy), dim3(threads_uy), 0, 0, Property_glcm->uy);
hipMalloc((void**)&Property_glcm->Dx, sizeof(float) * NA * batch_size);
dim3 grids_Dx(1, 1, batch_size);
dim3 threads_Dx(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_Dx), dim3(threads_Dx), 0, 0, Property_glcm->Dx);
hipMalloc((void**)&Property_glcm->Dy, sizeof(float) * NA * batch_size);
dim3 grids_Dy(1, 1, batch_size);
dim3 threads_Dy(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_Dy), dim3(threads_Dy), 0, 0, Property_glcm->Dy);
hipMalloc((void**)&Property_glcm->HX, sizeof(float) * NA * batch_size);
dim3 grids_HX(1, 1, batch_size);
dim3 threads_HX(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_HX), dim3(threads_HX), 0, 0, Property_glcm->HX);
hipMalloc((void**)&Property_glcm->HY, sizeof(float) * NA * batch_size);
dim3 grids_HY(1, 1, batch_size);
dim3 threads_HY(2, 2);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_HY), dim3(threads_HY), 0, 0, Property_glcm->HY);
hipMalloc((void**)&Property_glcm->HXY, sizeof(float) * NA * batch_size);
dim3 grids_HXY(1, 1, batch_size);
dim3 threads_HXY(2, 2);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_HXY), dim3(threads_HXY), 0, 0, Property_glcm->HXY);
hipMalloc((void**)&Property_glcm->HXY1, sizeof(float) *NA * batch_size);
dim3 grids_HXY1(1, 1, batch_size);
dim3 threads_HXY1(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_s), dim3(threads_s), 0, 0, Property_glcm->HXY1);
hipMalloc((void**)&Property_glcm->HXY2, sizeof(float) * NA * batch_size);
dim3 grids_HXY2(1, 1, batch_size);
dim3 threads_HXY2(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_HXY2), dim3(threads_HXY2), 0, 0, Property_glcm->HXY2);
hipMalloc((void**)&Property_glcm->maxp, sizeof(int) * NA * batch_size);
dim3 grids_maxp(1, 1, batch_size);
dim3 threads_maxp(1, NA);
hipLaunchKernelGGL(( initialize), dim3(grids_maxp), dim3(threads_maxp), 0, 0, Property_glcm->maxp);
hipMalloc((void**)&Property_glcm->DA, sizeof(float) * NA * batch_size);
dim3 grids_DA(1, 1, batch_size);
dim3 threads_DA(1, NA);
hipLaunchKernelGGL(( initialize_tex), dim3(grids_DA), dim3(threads_DA), 0, 0, Property_glcm->DA);
hipDeviceSynchronize();
//printf("Property_glcm initialized! \n");
dim3 grids(Ng/8, Ng/8, batch_size);
dim3 threads(64, NA);
//printf("getting property_glcm CUDA \n");
//Calculate_GLCM_Property_glcm_kernel<<<grids, threads>>>(Property_glcm, P_matrix, NA, Ng, Epsilon);
//Property_glcm->epsilon = Epsilon;
//Property_glcm.P = glcm;
//printf("P %d \n", Property_glcm.P[10000]);
//printf("Get Property_glcm P! \n");
hipLaunchKernelGGL(( GLCM_sum), dim3(grids), dim3(threads), 0, 0, Property_glcm->P, Property_glcm->s, Ng, NA);
hipLaunchKernelGGL(( GLCM_Pn), dim3(grids), dim3(threads), 0, 0, Property_glcm->P, Property_glcm->Pn, Property_glcm->s, Ng, NA, Epsilon);
hipLaunchKernelGGL(( GLCM_Property1), dim3(grids), dim3(threads), 0, 0,
Property_glcm->Pn,
Property_glcm->Px,
Property_glcm->Py,
Property_glcm->ux,
Property_glcm->uy,
Property_glcm->Pxay,
Property_glcm->Pxsy,
Ng,
NA,
Epsilon);
hipLaunchKernelGGL(( GLCM_Property2), dim3(grids), dim3(threads), 0, 0, Property_glcm->P,
Property_glcm->s,
Property_glcm->Pn,
Property_glcm->Px,
Property_glcm->Py,
Property_glcm->ux,
Property_glcm->uy,
Property_glcm->Dx,
Property_glcm->Dy,
Property_glcm->Pxay,
Property_glcm->Pxsy,
Property_glcm->HX,
Property_glcm->HY,
Property_glcm->HXY,
Property_glcm->HXY1,
Property_glcm->HXY2,
Property_glcm->maxp,
Property_glcm->DA,
Ng,
NA,
Epsilon);
/*
GLCM_Px<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Px, Ng, NA);
//printf("Get Property_glcm Px! \n");
hipDeviceSynchronize();
GLCM_Py<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Py, Ng, NA);
//printf("Get Property_glcm Py! \n");
hipDeviceSynchronize();
GLCM_ux<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->ux, Ng, NA);
//printf("Get Property_glcm ux! \n");
hipDeviceSynchronize();
GLCM_uy<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->uy, Ng, NA);
//printf("Get Property_glcm uy! \n");
hipDeviceSynchronize();
GLCM_Dx<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->ux, Property_glcm->Dx, Ng, NA);
//printf("Get Property_glcm Dx! \n");
hipDeviceSynchronize();
GLCM_Dy<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->uy, Property_glcm->Dy, Ng, NA);
//printf("Get Property_glcm Dy! \n");
hipDeviceSynchronize();
//printf("Get Property_glcm Dx! \n");
GLCM_Pxay<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Pxay, Ng, NA);
//printf("Get Property_glcm Pxay! \n");
hipDeviceSynchronize();
GLCM_Pxsy<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Pxsy, Ng, NA);
//printf("Get Property_glcm Pxsy! \n");
hipDeviceSynchronize();
//printf("Get Property_glcm Pxay! \n");
GLCM_HX<<<grids, threads>>>(Property_glcm->Px, Property_glcm->HX, Epsilon, Ng, NA);
//printf("Get Property_glcm HX! \n");
hipDeviceSynchronize();
GLCM_HY<<<grids, threads>>>(Property_glcm->Py, Property_glcm->HY, Epsilon, Ng, NA);
//printf("Get Property_glcm HY! \n");
hipDeviceSynchronize();
GLCM_HXY<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->HXY, Epsilon, Ng, NA);
//printf("Get Property_glcm HXY \n");
hipDeviceSynchronize();
GLCM_HXY1<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Px, Property_glcm->Py, Property_glcm->HXY1, Epsilon, Ng, NA);
//printf("Get Property_glcm HXY1! \n");
hipDeviceSynchronize();
GLCM_HXY2<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Px, Property_glcm->Py, Property_glcm->HXY2, Epsilon, Ng, NA);
//printf("Get Property_glcm HXY2! \n");
hipDeviceSynchronize();
//printf("Get Property_glcm H! \n");
GLCM_maxp<<<grids, threads>>>(Property_glcm->P, Property_glcm->maxp, Ng, NA);
//printf("Get Property_glcm maxp! \n");
hipDeviceSynchronize();
GLCM_DA<<<grids, threads>>>(Property_glcm->DA, Property_glcm->Pxsy, Ng, NA);
*/
//printf("Get Property_glcm maxp! \n");
//printf("ux: %d, %d, %d, %d \n", Property_glcm.P[0], Property_glcm.P[1], Property_glcm.P[2], Property_glcm.P[3]);
//HANDLE_ERROR(hipDeviceSynchronize());
//STOP_TIMER(time)
//printf("getting Property_glcm: ");
//PRINT_TIME(time)
//printf("\n");
//printf("CUDA property_glcm finished \n");
//hipFree(glcm);
//return Property_glcm;
//HANDLE_ERROR(hipDeviceSynchronize());
}
void Calculate_GLCM_Texture_rl(PROPERTY_glcm *Property_glcm, float *texture_glcm, float Epsilon, int Ng, int NA, int batch_size)
{
//START_TIMER(time)
float *Texture_glcm = (float*)malloc(sizeof(float) * 23 * batch_size);
//float *texture_glcm = NULL;
dim3 grids1(1, 1, batch_size);
dim3 threads1(23, 1);
//hipMalloc((void**)&texture_glcm, sizeof(float) * NA * batch_size);
hipLaunchKernelGGL(( initialize_tex), dim3(grids1), dim3(threads1), 0, 0, texture_glcm);
hipDeviceSynchronize();
//printf("Texture_glcm initialized! \n");
dim3 grids(Ng/8, Ng/8, batch_size);
dim3 threads(64, NA);
hipLaunchKernelGGL(( glcm_features), dim3(grids), dim3(threads), 0, 0, texture_glcm,Property_glcm->s,Property_glcm->Pn,Property_glcm->ux,Property_glcm->uy,
Property_glcm->Dx, Property_glcm->Dy,Property_glcm->Pxsy,Property_glcm->Pxay,Property_glcm->HX,
Property_glcm->HY,Property_glcm->HXY,Property_glcm->HXY1,Property_glcm->HXY2,Property_glcm->maxp,
Property_glcm->DA,batch_size,Ng,NA,Epsilon);
hipDeviceSynchronize();
//STOP_TIMER(time)
/*
printf("getting Texture_glcm: \n");
printf("f1_AutoCorrelation: %f \n", Texture_glcm[0 * batch_size + 5]/4);
printf("f2_JointAverage: %f \n", Texture_glcm[1 * batch_size + 5]/4);
printf("f3_CLusterProminence: %f \n", Texture_glcm[2 * batch_size + 5]/4);
printf("f4_ClusterShade: %f \n", Texture_glcm[3 * batch_size + 5]/4);
printf("f5_ClusterTendency: %f \n", Texture_glcm[4 * batch_size + 5]/4);
printf("f6_Contrast: %f \n", Texture_glcm[5 * batch_size + 5]/4);
printf("f7_Correlation: %f \n", Texture_glcm[6 * batch_size + 5]/4);
printf("f8_DifferenceAverage: %f \n", Texture_glcm[7 * batch_size + 5]/4);
printf("f9_DifferenceEntropy: %f \n", Texture_glcm[8 * batch_size + 5]/4);
printf("f10_DifferenceVariance: %f \n", Texture_glcm[9 * batch_size + 5]/4);
printf("f11_JointEnergy: %f \n", Texture_glcm[10 * batch_size + 5]/4);
printf("f12_JointEntropy: %f \n", Texture_glcm[11 * batch_size + 5]/4);
printf("f13_IMC1: %f \n", Texture_glcm[12 * batch_size + 5]/4);
printf("f14_IMC2: %f \n", Texture_glcm[13 * batch_size + 5]/4);
printf("f15_IDM: %f \n", Texture_glcm[14 * batch_size + 5]/4);
printf("f17_IDMN: %f \n", Texture_glcm[15 * batch_size + 5]/4);
printf("f18_ID: %f \n", Texture_glcm[16 * batch_size + 5]/4);
printf("f19_IDN: %f \n", Texture_glcm[17 * batch_size +5]/4);
printf("f20_InverseVariance: %f \n", Texture_glcm[18 * batch_size + 5]/4);
printf("f21_MaximumProbability: %f \n", Texture_glcm[19 * batch_size + 5]/4);
printf("f22_SumAverage: %f \n", Texture_glcm[20 * batch_size + 5]/4);
printf("f23_SumEntropy: %f \n", Texture_glcm[21 * batch_size + 5]/4);
printf("f24_SumSquares: %f \n", Texture_glcm[22 * batch_size + 5]/4);
//PRINT_TIME(time)
printf("\n");
printf("CUDA Texture_glcm finished \n");
*/
//delete Property_glcm;
free(Texture_glcm);
}
__global__ void GLCM_Property(int *P,
float *s,
float *Pn,
float *Px,
float *Py,
float *ux,
float *uy,
float *Dx,
float *Dy,
float *Pxay,
float *Pxsy,
float *HX,
float *HY,
float *HXY,
float *HXY1,
float *HXY2,
int *maxp,
float *DA,
int Ng,
int NA,
float epsilon
)
{
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith, ix, iy;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
atomicAdd(&s[img_ith * NA + ia], float(P[ip]));
//hipDeviceSynchronize();
atomicExch(&Pn[ip], float(P[ip])/(s[img_ith * NA + ia] + epsilon));
//hipDeviceSynchronize();
atomicAdd(&Px[img_ith * Ng * NA + ix * NA + ia], Pn[ip]);
atomicAdd(&Py[img_ith * Ng * NA + iy * NA + ia], Pn[ip]);
atomicAdd(&Pxay[img_ith * 2 * Ng * NA + ix * NA + iy * NA + ia], Pn[ip]);
atomicAdd(&Pxsy[img_ith * Ng * NA + abs(ix - iy) * NA + ia], Pn[ip]);
atomicAdd(&ux[img_ith * NA + ia], Pn[ip] * (ix + 1));
atomicAdd(&uy[img_ith * NA + ia], Pn[ip] * (iy + 1));
//hipDeviceSynchronize();
atomicAdd(&Dx[img_ith * NA + ia], powf(ix + 1 - ux[img_ith * NA + ia], 2) * Pn[ip]);
atomicAdd(&Dy[img_ith * NA + ia], powf(iy + 1 - uy[img_ith * NA + ia], 2) * Pn[ip]);
//hipDeviceSynchronize();
atomicAdd(&HX[img_ith * NA + ia], float(iy==0) * (-Px[img_ith * Ng * NA + ix * NA + ia] * log2f(Px[img_ith * Ng * NA + ix * NA + ia] + epsilon)));
atomicAdd(&HY[img_ith * NA + ia], float(ix==0) * (-Py[img_ith * NA * Ng + iy * NA + ia] * log2f(Py[img_ith * NA * Ng + iy * NA + ia] + epsilon)));
atomicAdd(&HXY[img_ith * NA + ia], -Pn[ip] * log2f(Pn[ip] + epsilon));
atomicAdd(&HXY1[img_ith * NA + ia], -Pn[ip] * log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
atomicAdd(&HXY2[img_ith * NA + ia],
-Px[img_ith * NA * Ng + ix * NA + ia]
* Py[img_ith * NA * Ng + iy * NA + ia]
* log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
atomicMax(&maxp[img_ith * NA + ia], P[ip]);
atomicAdd(&DA[img_ith * NA + ia], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
}
__global__ void GLCM_Property2(int *P,
float *s,
float *Pn,
float *Px,
float *Py,
float *ux,
float *uy,
float *Dx,
float *Dy,
float *Pxay,
float *Pxsy,
float *HX,
float *HY,
float *HXY,
float *HXY1,
float *HXY2,
int *maxp,
float *DA,
int Ng,
int NA,
float epsilon
)
{
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith, ix, iy;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
atomicAdd(&Dx[img_ith * NA + ia], powf(ix + 1 - ux[img_ith * NA + ia], 2) * Pn[ip]);
atomicAdd(&Dy[img_ith * NA + ia], powf(iy + 1 - uy[img_ith * NA + ia], 2) * Pn[ip]);
//hipDeviceSynchronize();
atomicAdd(&HX[img_ith * NA + ia], float(iy==0) * (-Px[img_ith * Ng * NA + ix * NA + ia] * log2f(Px[img_ith * Ng * NA + ix * NA + ia] + epsilon)));
atomicAdd(&HY[img_ith * NA + ia], float(ix==0) * (-Py[img_ith * NA * Ng + iy * NA + ia] * log2f(Py[img_ith * NA * Ng + iy * NA + ia] + epsilon)));
atomicAdd(&HXY[img_ith * NA + ia], -Pn[ip] * log2f(Pn[ip] + epsilon));
atomicAdd(&HXY1[img_ith * NA + ia], -Pn[ip] * log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
atomicAdd(&HXY2[img_ith * NA + ia],
-Px[img_ith * NA * Ng + ix * NA + ia]
* Py[img_ith * NA * Ng + iy * NA + ia]
* log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
atomicMax(&maxp[img_ith * NA + ia], P[ip]);
atomicAdd(&DA[img_ith * NA + ia], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
}
__global__ void GLCM_Property1(
float *Pn,
float *Px,
float *Py,
float *ux,
float *uy,
float *Pxay,
float *Pxsy,
int Ng,
int NA,
float epsilon
)
{
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith, ix, iy;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
atomicAdd(&Px[img_ith * Ng * NA + ix * NA + ia], Pn[ip]);
atomicAdd(&Py[img_ith * Ng * NA + iy * NA + ia], Pn[ip]);
atomicAdd(&Pxay[img_ith * 2 * Ng * NA + ix * NA + iy * NA + ia], Pn[ip]);
atomicAdd(&Pxsy[img_ith * Ng * NA + abs(ix - iy) * NA + ia], Pn[ip]);
atomicAdd(&ux[img_ith * NA + ia], Pn[ip] * (ix + 1));
atomicAdd(&uy[img_ith * NA + ia], Pn[ip] * (iy + 1));
//hipDeviceSynchronize();
}
__global__ void calculate_glcm_kernel_rl(int *image, int *glcm, int *dev_size, int *dev_stride, int *dev_angles, int dev_ng, int dev_na)
{
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int j, glcm_idx, a, iz, iy, ix;
int img_ith, ipix;
img_ith = ip / (dev_size[0] * dev_size[1]);
ipix = ip % (dev_size[0] * dev_size[0]);
ix = ipix / dev_stride[0];
iy = ipix % dev_stride[0];
for (a = 0; a < dev_na; a++)
{
if (ix + dev_angles[a * 2] >= 0 && ix + dev_angles[a * 2] < dev_size[0] &&
iy + dev_angles[a * 2 + 1] >= 0 && iy + dev_angles[a * 2 + 1] < dev_size[1])
{
j = ip + dev_angles[a * 2] * dev_stride[0] + dev_angles[a * 2 + 1] * dev_stride[1];
glcm_idx = int(image[ip] > -1) * int(image[j] > -1) * (a + (image[j]-1) * dev_na + (image[ip]-1) * dev_na * dev_ng + img_ith * dev_ng * dev_ng * dev_na);
if (glcm_idx>0){
atomicAdd(&glcm[glcm_idx], 1 * int(image[ip] > -1) * int(image[j] > -1));}
}
}
}
__global__ void GLCM_sum(int *P, float *s, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
atomicAdd(&s[img_ith * NA + ia], float(P[ip]));
}
__global__ void GLCM_Pn(int *P, float *Pn, float *sum, int Ng, int NA, float epsilon){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
atomicExch(&Pn[ip], float(P[ip])/(sum[img_ith * NA + ia] + epsilon));
}
__global__ void GLCM_Px(float *P, float *Px, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Px[img_ith * Ng * NA + ix * NA + ia], P[ip]);
}
__global__ void GLCM_Py(float *P, float *Py, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Py[img_ith * Ng * NA + iy * NA + ia], P[ip]);
}
__global__ void GLCM_ux(float *P, float *ux, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
ia = ipix % NA;
atomicAdd(&ux[img_ith * NA + ia], P[ip] * (ix + 1));
}
__global__ void GLCM_uy(float *P, float *uy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip= blocks * blockDim.x * blockDim.y + threads;
int iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&uy[img_ith * NA + ia], P[ip] * (iy + 1));
}
__global__ void GLCM_Dx(float *P, float *ux, float *Dx, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
ia = ipix % NA;
atomicAdd(&Dx[img_ith * NA + ia], powf(ix + 1 - ux[img_ith * NA + ia], 2) * P[ip]);
//atomicExch(&Dx[img_ith * NA + ia], sqrtf(Dx[img_ith * NA + ia]));
}
__global__ void GLCM_Dy(float *P, float *uy, float *Dy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Dy[img_ith * NA + ia], powf(iy + 1 - uy[img_ith * NA + ia], 2) * P[ip]);
//atomicExch(&Dy[img_ith * NA + ia], sqrtf(Dy[img_ith * NA + ia]));
}
__global__ void GLCM_Pxay(float *P, float *Pxay, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, img_ith, ipix;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Pxay[img_ith * 2 * Ng * NA + ix * NA + iy * NA + ia], P[ip]);
}
__global__ void GLCM_Pxsy(float *P, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Pxsy[img_ith * Ng * NA + abs(ix - iy) * NA + ia], P[ip]);
}
__global__ void GLCM_HX(float *Px, float *HX, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
//for(i = 0; i< Ng; i++)
//{HX[0] -= Px[ipix] * log2f(Px[ipix] + epsilon);}
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&HX[img_ith * NA + ia], float(iy==0) * (-Px[img_ith * Ng * NA + ix * NA + ia] * log2f(Px[img_ith * Ng * NA + ix * NA + ia] + epsilon)));
//atomicExch(&HX[0], sum);
}
__global__ void GLCM_HY(float *Py, float *HY, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
//for(i = 0; i< Ng; i++)
//{HX[0] -= Px[ipix] * log2f(Px[ipix] + epsilon);}
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&HY[img_ith * NA + ia], float(ix==0) * (-Py[img_ith * NA * Ng + iy * NA + ia] * log2f(Py[img_ith * NA * Ng + iy * NA + ia] + epsilon)));
//atomicExch(&HX[0], sum);
}
__global__ void GLCM_HXY(float *P, float *HXY, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
atomicAdd(&HXY[img_ith * NA + ia], -P[ip] * log2f(P[ip] + epsilon));
//atomicExch(&HX[0], sum);
}
__global__ void GLCM_HXY1(float *P, float *Px, float *Py, float *HXY1, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&HXY1[img_ith * NA + ia], -P[ip] * log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
//atomicExch(&HXY1[0], sum);
}
__global__ void GLCM_HXY2(float *P, float *Px, float *Py, float *HXY2, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&HXY2[img_ith * NA + ia],
-Px[img_ith * NA * Ng + ix * NA + ia]
* Py[img_ith * NA * Ng + iy * NA + ia]
* log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
//atomicExch(&HXY2[0], sum);
}
__global__ void GLCM_maxp(int *P, int *maxp, int Ng, int NA){
//float dst[4];
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicMax(&maxp[img_ith * NA + ia], P[ip]);
}
__global__ void GLCM_DA(float *DA, float *Pxsy, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&DA[img_ith * NA + ia], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
//*rst /= NA;
}
/* FEATURE EXTRACTION*/
__global__ void glcm_features(float *rst,
float *s,
float *Pn,
float *ux,
float *uy,
float *Dx,
float *Dy,
float *Pxsy,
float *Pxay,
float *HX,
float *HY,
float *HXY,
float *HXY1,
float *HXY2,
int *maxp,
float *DA,
int batch_size,
int Ng,
int NA,
float epsilon){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
int ix_2122 = ipix / NA / (Ng / 2);
int iy_2122 = ipix / NA % (Ng / 2);
atomicAdd(&rst[0 * batch_size + img_ith], Pn[ip] * (ix + 1) * (iy + 1) / NA);
atomicAdd(&rst[1 * batch_size + img_ith], Pn[ip] * float(ix + 1) / NA);
atomicAdd(&rst[2 * batch_size + img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 4) * Pn[ip] / NA);
atomicAdd(&rst[3 * batch_size + img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 3) * Pn[ip] / NA);
atomicAdd(&rst[4 * batch_size + img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 2) * Pn[ip] / NA);
atomicAdd(&rst[5 * batch_size + img_ith], powf((ix - iy), 2) * Pn[ip] / NA);
atomicAdd(&rst[6 * batch_size + img_ith], Pn[ip] * (ix + 1 - ux[img_ith * NA + ia]) * (iy + 1 - uy[img_ith * NA + ia]) /(sqrtf(Dx[img_ith * NA + ia] * Dy[img_ith * NA + ia]) + epsilon) /NA);
atomicAdd(&rst[7 * batch_size + img_ith], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0) / NA);
atomicAdd(&rst[8 * batch_size + img_ith], -Pxsy[img_ith * NA * Ng + ix * NA + ia] * log2f(Pxsy[img_ith * NA * Ng + ix * NA + ia] + epsilon) * float(iy == 0) / NA);
atomicAdd(&rst[9 * batch_size + img_ith], powf(float(ix) - DA[img_ith * NA + ia], 2) * Pxsy[img_ith * Ng * NA + ix * NA + ia] * float(iy == 0) / NA);
atomicAdd(&rst[10 * batch_size + img_ith], powf(Pn[ip], 2) / NA);
atomicAdd(&rst[11 * batch_size + img_ith], -Pn[ip] * log2f(Pn[ip] + epsilon) / NA);
atomicAdd(&rst[12 * batch_size + img_ith], float(ix == 0) * float(iy == 0) * (HXY[img_ith * NA + ia] - HXY1[img_ith * NA + ia]) / max(HX[img_ith * NA + ia], HY[img_ith * NA + ia]) / NA);
atomicAdd(&rst[13 * batch_size + img_ith], float(ix == 0) * float(iy == 0) * sqrtf(abs(1 - powf(M_E, -2 * (HXY2[img_ith * NA + ia] - HXY[img_ith * NA + ia])))) / NA);
atomicAdd(&rst[14 * batch_size + img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + powf(ix, 2)) * float(iy == 0) / NA);
atomicAdd(&rst[15 * batch_size + img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + (powf(ix, 2)/powf(Ng, 2))) * float(iy == 0) / NA);
atomicAdd(&rst[16 * batch_size + img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / float(1 + ix) * float(iy == 0) / NA);
atomicAdd(&rst[17 * batch_size + img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + float(ix) / Ng) * float(iy == 0) / NA);
atomicAdd(&rst[18 * batch_size + img_ith], ix == 0? 0: float(iy == 0) * Pxsy[img_ith * NA * Ng + ix * NA + ia] / powf(ix, 2) / NA );
if (ix == 0 and iy == 0)
{atomicAdd(&rst[19 * batch_size + img_ith], float(maxp[img_ith * NA + ia]) / (s[img_ith * NA + ia] + epsilon) / NA);}
atomicAdd(&rst[20 * batch_size + img_ith], Pxay[img_ith * NA * Ng * 2 + ix_2122 * NA + ia] * (ix_2122 + 2) * float(iy_2122 == 0) / NA);
atomicAdd(&rst[21 * batch_size + img_ith], -Pxay[img_ith * Ng * NA * 2 + ix_2122 * NA + ia] * log2f(Pxay[img_ith * Ng * NA * 2 + ix_2122 * NA + ia] + epsilon) * float(iy_2122 == 0) / NA);
atomicAdd(&rst[22 * batch_size + img_ith], Pn[ip] * powf(float(ix + 1 - ux[img_ith * NA + ia]), 2) / NA);
}
/* Auto Correlation */
__global__ void f1_AutoCorrelation(float *rst, float *P, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], P[ip] * (ix + 1) * (iy + 1));
//*rst /= NA;
}
/* Joint Average */
__global__ void f2_JointAverage(float *rst, float *P, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], P[ip] * float(ix + 1));
//*rst /= NA;
}
/* CLuster Prominence */
__global__ void f3_ClusterProminence(float *rst, float *P, float *ux, float *uy, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 4) * P[ip]);
//*rst /= NA;
}
/* ClusterShade */
__global__ void f4_ClusterShade(float *rst, float *P, float *ux, float *uy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 3) * P[ip]);
//*rst /= NA;
}
/* Cluster Tendency */
__global__ void f5_ClusterTendency(float *rst, float *P, float *ux, float *uy, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 2) * P[ip]);
//*rst /= NA;
}
/* Contrast */
__global__ void f6_Contrast(float *rst, float *P, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf((ix - iy), 2) * P[ip]);
//*rst /= NA;
}
/* Correlation */
__global__ void f7_Correlation(float *rst, float *P, float *ux, float *uy, float *Dx, float *Dy, int Ng, int NA, float epsilon) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], P[ip] * (ix + 1 - ux[img_ith * NA + ia]) * (iy + 1 - uy[img_ith * NA + ia]) /(sqrtf(Dx[img_ith * NA + ia] * Dy[img_ith * NA + ia]) + epsilon));
//*rst /= NA;
}
/* Diffference Average */
__global__ void f8_DifferenceAverage(float *rst, float *DA, float *Pxsy, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
atomicAdd(&DA[img_ith * NA + ia], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
//*rst /= NA;
}
/* Differnence Entropy */
__global__ void f9_DifferenceEntropy(float *rst, float *Pxsy, float epsilon, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], -Pxsy[img_ith * NA * Ng + ix * NA + ia] * log2f(Pxsy[img_ith * NA * Ng + ix * NA + ia] + epsilon) * float(iy == 0));
//*rst /= NA;
}
/* Difference Variance */
__global__ void f10_DifferenceVariance(float *rst, float *Pxsy, float *DA, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
//atomicAdd(&DA[ia], float(ix) * Pxsy[ix * NA + ia] * float(iy == 0));
atomicAdd(&rst[img_ith], powf(float(ix) - DA[img_ith * NA + ia], 2) * Pxsy[img_ith * Ng * NA + ix * NA + ia] * float(iy == 0));
//*rst /= NA;
}
/* Joint Energy */
__global__ void f11_JointEnergy(float *rst, float *P, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf(P[ip], 2));
//*rst /= NA;
}
/* Joint Entropy */
__global__ void f12_JointEntropy(float *rst, float *P, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], -P[ip] * log2f(P[ip] + epsilon));
//*rst /= NA;
}
/* Information Measures of Correlation */
__global__ void f13_IMC1(float *rst, float *HXY, float *HXY1, float *HX, float *HY, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], float(ix == 0) * float(iy == 0) * (HXY[img_ith * NA + ia] - HXY1[img_ith * NA + ia]) / max(HX[img_ith * NA + ia], HY[img_ith * NA + ia]) );
//*rst /= NA;
}
/* IMC2 */
__global__ void f14_IMC2(float *rst, float *HXY, float *HXY2, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], float(ix == 0) * float(iy == 0) * sqrtf(abs(1 - powf(M_E, -2 * (HXY2[img_ith * NA + ia] - HXY[img_ith * NA + ia])))));
//*rst /= NA;
}
/* Inverse Difference Moment*/
__global__ void f15_IDM(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + powf(ix, 2)) * float(iy == 0));
//*rst /= 4;
}
/* Inverse Difference Moment Normalized*/
__global__ void f17_IDMN(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + (powf(ix, 2)/powf(Ng, 2))) * float(iy == 0));
//*rst /= NA;
}
/* Inverse Difference*/
__global__ void f18_ID(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / float(1 + ix) * float(iy == 0));
//*rst /= NA;
}
/* Inverse Difference Normalized*/
__global__ void f19_IDN(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + float(ix) / Ng) * float(iy == 0));
//*rst /= NA;
}
/* Inverse Variance*/
__global__ void f20_InverseVariance(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], ix == 0? 0: float(iy == 0) * Pxsy[img_ith * NA * Ng + ix * NA + ia] / powf(ix, 2) );
//*rst /= NA;
}
/* Maximum Probability*/
__global__ void f21_MaximumProbability(float *rst, float *sum, int *maxp, int Ng, int NA, float epsilon){
//float dst[4];
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
if (ix == 0 and iy == 0)
{atomicAdd(&rst[img_ith], float(maxp[img_ith * NA + ia]) / (sum[img_ith * NA + ia] + epsilon));}
//printf("maxp: %f\n", maxp[0]);
//atomicExch(&rst[img_ith], 0);
}
/* Sum Average*/
__global__ void f22_SumAverage(float *rst, float *Pxay, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / (Ng / 2);
iy = ipix / NA % (Ng / 2);
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxay[img_ith * NA * Ng * 2 + ix * NA + ia] * (ix + 2) * float(iy == 0));
//*rst /= NA;
}
/* Sum Entropy */
__global__ void f23_SumEntropy(float *rst, float *Pxay, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / (Ng / 2);
iy = ipix / NA % (Ng / 2);
ia = ipix % NA;
atomicAdd(&rst[img_ith], -Pxay[img_ith * Ng * NA * 2 + ix * NA + ia] * log2f(Pxay[img_ith * Ng * NA * 2 + ix * NA + ia] + epsilon) * float(iy == 0));
//*rst /= NA;
}
/*Sum of Squares*/
__global__ void f24_SumSquares(float *rst, float *P, float *ux, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], P[ip] * powf(float(ix + 1 - ux[img_ith * NA + ia]), 2));
//*rst /= NA;
}
| 73fa02332f2cbd1bf2488fcd6eb0cf9116bd78f2.cu |
#include"glcm.h"
/* Calculating GLCM */
int *Calculate_GLCM_rl(const int *image, int *size, int *stride, int *angles, int *Range, int MASK_VALUE, int bin_width, int Ng, int NA, int batch_size)
{
//START_TIMER(time)
int nbytes_image = sizeof(int) * size[0] * size[1] * batch_size;
//int nbytes_mask = sizeof(int) * size[0] * size[1] * batch_size;
int nbytes_glcm = sizeof(int) * Ng * Ng * NA * batch_size;
//int *glcm = (int*)malloc(nbytes_glcm);
int *dev_image = NULL;
int *dev_angles = NULL;
int *dev_size = NULL;
int *dev_stride = NULL;
int *dev_glcm = NULL;
cudaMalloc((void**)&dev_image, nbytes_image);
//HANDLE_ERROR(cudaMalloc((void**)&dev_mask, nbytes_mask));
cudaMalloc((void**)&dev_glcm, nbytes_glcm);
dim3 grids_Pn(Ng/8, Ng/8, batch_size);
dim3 threads_Pn(64, NA);
initialize<<<grids_Pn, threads_Pn>>>(dev_glcm);
cudaMalloc((void**)&dev_size, sizeof(int) * 2);
cudaMalloc((void **)&dev_angles, sizeof(int) * 8);
cudaMalloc((void**)&dev_stride, sizeof(int) * 2);
//ANDLE_ERROR(cudaMemcpy((void*)dev_image, (void*)image, nbytes_image, cudaMemcpyHostToDevice));
//HANDLE_ERROR(cudaMemcpy((void*)dev_mask, (void*)mask, nbytes_mask, cudaMemcpyHostToDevice));
//HANDLE_ERROR(cudaMemcpy((void*)dev_glcm, (void*)glcm, nbytes_glcm, cudaMemcpyHostToDevice));
cudaMemcpy(dev_size, size, sizeof(int) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_stride, stride, sizeof(int) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_angles, angles, sizeof(int) * 8, cudaMemcpyHostToDevice);
//printf("copying: ");
//PRINT_TIME(time)
//printf("\n");
dim3 grids(16, 16, batch_size);
dim3 threads(size[0]/16, size[1]/16);
//START_TIMER(time)
//START_TIMER(time)
Preprocessing_image_GLCM<<<grids, threads>>>(dev_image, image, Range[0], Range[1], bin_width, MASK_VALUE);
//initialize<<<grids1, threads1>>>(dev_glcm);
cudaDeviceSynchronize();
calculate_glcm_kernel_rl<<<grids, threads>>>(dev_image, dev_glcm, dev_size, dev_stride, dev_angles, Ng, NA);
//STOP_TIMER(time)
//printf("GLCM \n");
//PRINT_TIME(time)
//cudaDeviceSynchronize();
//HANDLE_ERROR(cudaMemcpy(glcm, dev_glcm, nbytes_glcm, cudaMemcpyDeviceToHost));
//printf("GLCM calculated \n");
cudaFree(dev_image);
//HANDLE_ERROR(cudaFree(dev_mask));
//HANDLE_ERROR(cudaFree(dev_glcm));
cudaFree(dev_angles);
cudaFree(dev_stride);
cudaFree(dev_size);
return dev_glcm;
//return dev_glcm;
}
void Calculate_GLCM_Property(PROPERTY_glcm *Property_glcm, float Epsilon, int Ng, int NA, int batch_size)
{
/*
PROPERTY_glcm *Property_glcm = (PROPERTY_glcm*)malloc(sizeof(PROPERTY_glcm));
Property_glcm->P = glcm;
Property_glcm->s = (float*)malloc(sizeof(float) * NA);
Property_glcm->Pn = (float*)malloc(sizeof(float) * Ng * Ng * NA);
Property_glcm->Px = (float*)malloc(sizeof(float) * Ng * NA);
Property_glcm->Py = (float*)malloc(sizeof(float) * Ng * NA);
Property_glcm->ux = (float*)malloc(sizeof(float) * NA);
Property_glcm->uy = (float*)malloc(sizeof(float) * NA);
Property_glcm->Dx = (float*)malloc(sizeof(float) * NA);
Property_glcm->Dy = (float*)malloc(sizeof(float) * NA);
Property_glcm->Pxay = (float*)malloc(sizeof(float) * Ng * NA * 2);
Property_glcm->Pxsy = (float*)malloc(sizeof(float) * Ng * NA);
Property_glcm->HX = (float*)malloc(sizeof(float) * NA);
Property_glcm->HY = (float*)malloc(sizeof(float) * NA);
Property_glcm->HXY = (float*)malloc(sizeof(float) * NA);
Property_glcm->HXY1 = (float*)malloc(sizeof(float) * NA);
Property_glcm->HXY2 = (float*)malloc(sizeof(float) * NA);
*/
//START_TIMER(time)
int nbytes_glcm = sizeof(int) * Ng * Ng * NA * batch_size;
//int *P_matrix;
//cudaMalloc((void**)&P_matrix, nbytes_glcm);
//cudaMemcpy((void*)P_matrix, (void*)glcm, nbytes_glcm, cudaMemcpyHostToDevice);
//PROPERTY_glcm Property_glcm;
//cudaMalloc((void**)&Property_glcm, sizeof(PROPERTY_glcm));
//cudaMalloc((void**)&(Property_glcm.P), sizeof(int) * NA * Ng * Ng * batch_size);
//Property_glcm->P = glcm;
if (Property_glcm->Pn != NULL)
{
delete Property_glcm->Pn;
Property_glcm->Pn = NULL;
//printf("Property_glcm->Pn != NULL! \n");
}
if (Property_glcm->s != NULL)
{
delete Property_glcm->s;
Property_glcm->s = NULL;
//printf("Property_glcm->s != NULL! \n");
}
if (Property_glcm->Px != NULL)
{
delete Property_glcm->Px;
Property_glcm->Px = NULL;
//printf("Property_glcm->Px != NULL! \n");
}
if (Property_glcm->Py != NULL)
{
delete Property_glcm->Py;
Property_glcm->Py = NULL;
//printf("Property_glcm->Py != NULL! \n");
}
if (Property_glcm->ux != NULL)
{
Property_glcm->ux;
Property_glcm->ux = NULL;
//printf("Property_glcm->ux != NULL! \n");
}
if (Property_glcm->uy != NULL)
{
delete Property_glcm->uy;
Property_glcm->uy = NULL;
//printf("Property_glcm->uy != NULL! \n");
}
if (Property_glcm->Dx != NULL)
{
delete Property_glcm->Dx;
Property_glcm->Dx = NULL;
//printf("Property_glcm->Dx != NULL! \n");
}
if (Property_glcm->Dy != NULL)
{
delete Property_glcm->Dy;
Property_glcm->Dy = NULL;
//printf("Property_glcm->Dy != NULL! \n");
}
if (Property_glcm->Pxay != NULL)
{
delete Property_glcm->Pxay;
Property_glcm->Pxay = NULL;
//printf("Property_glcm->Pxay != NULL! \n");
}
if (Property_glcm->Pxsy != NULL)
{
delete Property_glcm->Pxsy;
Property_glcm->Pxsy = NULL;
//printf("Property_glcm->Pxsy != NULL! \n");
}
if (Property_glcm->HX != NULL)
{
delete Property_glcm->HX;
Property_glcm->HX = NULL;
//printf("Property_glcm->HX != NULL! \n");
}
if (Property_glcm->HY != NULL)
{
delete Property_glcm->HY;
Property_glcm->HY = NULL;
//printf("Property_glcm->HY != NULL! \n");
}
if (Property_glcm->HXY != NULL)
{
delete Property_glcm->HXY;
Property_glcm->HXY = NULL;
//printf("Property_glcm->HXY != NULL! \n");
}
if (Property_glcm->HXY1 != NULL)
{
delete Property_glcm->HXY1;
Property_glcm->HXY1 = NULL;
//printf("Property_glcm->HXY1 != NULL! \n");
}
if (Property_glcm->HXY2 != NULL)
{
delete Property_glcm->HXY2;
Property_glcm->HXY2 = NULL;
//printf("Property_glcm->HXY2 != NULL! \n");
}
if (Property_glcm->maxp != NULL)
{
delete Property_glcm->maxp;
Property_glcm->maxp = NULL;
//printf("Property_glcm->maxp != NULL! \n");
}
cudaDeviceSynchronize();
cudaMalloc((void**)&(Property_glcm->s), sizeof(float) * NA * batch_size);
dim3 grids_s(1, 1, batch_size);
dim3 threads_s(1, NA);
initialize_tex<<<grids_s, threads_s>>>(Property_glcm->s);
cudaMalloc((void**)&Property_glcm->Pn, sizeof(float) * Ng * Ng * NA * batch_size);
dim3 grids_Pn(Ng/8, Ng/8, batch_size/4);
dim3 threads_Pn(16, 16);
initialize_tex<<<grids_Pn, threads_Pn>>>(Property_glcm->Pn);
cudaMalloc((void**)&Property_glcm->Px, sizeof(float) * Ng * NA * batch_size);
dim3 grids_Px(Ng, 1, batch_size);
dim3 threads_Px(1, NA);
initialize_tex<<<grids_Px, threads_Px>>>(Property_glcm->Px);
cudaMalloc((void**)&Property_glcm->Py, sizeof(float) * Ng * NA * batch_size);
dim3 grids_Py(Ng, 1, batch_size);
dim3 threads_Py(1, NA);
initialize_tex<<<grids_Py, threads_Py>>>(Property_glcm->Py);
cudaMalloc((void**)&Property_glcm->Pxay, sizeof(float) * Ng * NA * 2 * batch_size);
dim3 grids_Pxay(Ng, 2, batch_size);
dim3 threads_Pxay(1, NA);
initialize_tex<<<grids_Pxay, threads_Pxay>>>(Property_glcm->Pxay);
cudaMalloc((void**)&Property_glcm->Pxsy, sizeof(float) * Ng * NA * batch_size);
dim3 grids_Pxsy(Ng, 1, batch_size);
dim3 threads_Pxsy(1, NA);
initialize_tex<<<grids_Pxsy, threads_Pxsy>>>(Property_glcm->Pxsy);
cudaMalloc((void**)&Property_glcm->ux, sizeof(float) * NA * batch_size);
dim3 grids_ux(1, 1, batch_size);
dim3 threads_ux(1, NA);
initialize_tex<<<grids_ux, threads_ux>>>(Property_glcm->ux);
cudaMalloc((void**)&Property_glcm->uy, sizeof(float) * NA * batch_size);
dim3 grids_uy(1, 1, batch_size);
dim3 threads_uy(1, NA);
initialize_tex<<<grids_uy, threads_uy>>>(Property_glcm->uy);
cudaMalloc((void**)&Property_glcm->Dx, sizeof(float) * NA * batch_size);
dim3 grids_Dx(1, 1, batch_size);
dim3 threads_Dx(1, NA);
initialize_tex<<<grids_Dx, threads_Dx>>>(Property_glcm->Dx);
cudaMalloc((void**)&Property_glcm->Dy, sizeof(float) * NA * batch_size);
dim3 grids_Dy(1, 1, batch_size);
dim3 threads_Dy(1, NA);
initialize_tex<<<grids_Dy, threads_Dy>>>(Property_glcm->Dy);
cudaMalloc((void**)&Property_glcm->HX, sizeof(float) * NA * batch_size);
dim3 grids_HX(1, 1, batch_size);
dim3 threads_HX(1, NA);
initialize_tex<<<grids_HX, threads_HX>>>(Property_glcm->HX);
cudaMalloc((void**)&Property_glcm->HY, sizeof(float) * NA * batch_size);
dim3 grids_HY(1, 1, batch_size);
dim3 threads_HY(2, 2);
initialize_tex<<<grids_HY, threads_HY>>>(Property_glcm->HY);
cudaMalloc((void**)&Property_glcm->HXY, sizeof(float) * NA * batch_size);
dim3 grids_HXY(1, 1, batch_size);
dim3 threads_HXY(2, 2);
initialize_tex<<<grids_HXY, threads_HXY>>>(Property_glcm->HXY);
cudaMalloc((void**)&Property_glcm->HXY1, sizeof(float) *NA * batch_size);
dim3 grids_HXY1(1, 1, batch_size);
dim3 threads_HXY1(1, NA);
initialize_tex<<<grids_s, threads_s>>>(Property_glcm->HXY1);
cudaMalloc((void**)&Property_glcm->HXY2, sizeof(float) * NA * batch_size);
dim3 grids_HXY2(1, 1, batch_size);
dim3 threads_HXY2(1, NA);
initialize_tex<<<grids_HXY2, threads_HXY2>>>(Property_glcm->HXY2);
cudaMalloc((void**)&Property_glcm->maxp, sizeof(int) * NA * batch_size);
dim3 grids_maxp(1, 1, batch_size);
dim3 threads_maxp(1, NA);
initialize<<<grids_maxp, threads_maxp>>>(Property_glcm->maxp);
cudaMalloc((void**)&Property_glcm->DA, sizeof(float) * NA * batch_size);
dim3 grids_DA(1, 1, batch_size);
dim3 threads_DA(1, NA);
initialize_tex<<<grids_DA, threads_DA>>>(Property_glcm->DA);
cudaDeviceSynchronize();
//printf("Property_glcm initialized! \n");
dim3 grids(Ng/8, Ng/8, batch_size);
dim3 threads(64, NA);
//printf("getting property_glcm CUDA \n");
//Calculate_GLCM_Property_glcm_kernel<<<grids, threads>>>(Property_glcm, P_matrix, NA, Ng, Epsilon);
//Property_glcm->epsilon = Epsilon;
//Property_glcm.P = glcm;
//printf("P %d \n", Property_glcm.P[10000]);
//printf("Get Property_glcm P! \n");
GLCM_sum<<<grids, threads>>>(Property_glcm->P, Property_glcm->s, Ng, NA);
GLCM_Pn<<<grids, threads>>>(Property_glcm->P, Property_glcm->Pn, Property_glcm->s, Ng, NA, Epsilon);
GLCM_Property1<<<grids, threads>>>(
Property_glcm->Pn,
Property_glcm->Px,
Property_glcm->Py,
Property_glcm->ux,
Property_glcm->uy,
Property_glcm->Pxay,
Property_glcm->Pxsy,
Ng,
NA,
Epsilon);
GLCM_Property2<<<grids, threads>>>(Property_glcm->P,
Property_glcm->s,
Property_glcm->Pn,
Property_glcm->Px,
Property_glcm->Py,
Property_glcm->ux,
Property_glcm->uy,
Property_glcm->Dx,
Property_glcm->Dy,
Property_glcm->Pxay,
Property_glcm->Pxsy,
Property_glcm->HX,
Property_glcm->HY,
Property_glcm->HXY,
Property_glcm->HXY1,
Property_glcm->HXY2,
Property_glcm->maxp,
Property_glcm->DA,
Ng,
NA,
Epsilon);
/*
GLCM_Px<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Px, Ng, NA);
//printf("Get Property_glcm Px! \n");
cudaDeviceSynchronize();
GLCM_Py<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Py, Ng, NA);
//printf("Get Property_glcm Py! \n");
cudaDeviceSynchronize();
GLCM_ux<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->ux, Ng, NA);
//printf("Get Property_glcm ux! \n");
cudaDeviceSynchronize();
GLCM_uy<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->uy, Ng, NA);
//printf("Get Property_glcm uy! \n");
cudaDeviceSynchronize();
GLCM_Dx<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->ux, Property_glcm->Dx, Ng, NA);
//printf("Get Property_glcm Dx! \n");
cudaDeviceSynchronize();
GLCM_Dy<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->uy, Property_glcm->Dy, Ng, NA);
//printf("Get Property_glcm Dy! \n");
cudaDeviceSynchronize();
//printf("Get Property_glcm Dx! \n");
GLCM_Pxay<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Pxay, Ng, NA);
//printf("Get Property_glcm Pxay! \n");
cudaDeviceSynchronize();
GLCM_Pxsy<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Pxsy, Ng, NA);
//printf("Get Property_glcm Pxsy! \n");
cudaDeviceSynchronize();
//printf("Get Property_glcm Pxay! \n");
GLCM_HX<<<grids, threads>>>(Property_glcm->Px, Property_glcm->HX, Epsilon, Ng, NA);
//printf("Get Property_glcm HX! \n");
cudaDeviceSynchronize();
GLCM_HY<<<grids, threads>>>(Property_glcm->Py, Property_glcm->HY, Epsilon, Ng, NA);
//printf("Get Property_glcm HY! \n");
cudaDeviceSynchronize();
GLCM_HXY<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->HXY, Epsilon, Ng, NA);
//printf("Get Property_glcm HXY \n");
cudaDeviceSynchronize();
GLCM_HXY1<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Px, Property_glcm->Py, Property_glcm->HXY1, Epsilon, Ng, NA);
//printf("Get Property_glcm HXY1! \n");
cudaDeviceSynchronize();
GLCM_HXY2<<<grids, threads>>>(Property_glcm->Pn, Property_glcm->Px, Property_glcm->Py, Property_glcm->HXY2, Epsilon, Ng, NA);
//printf("Get Property_glcm HXY2! \n");
cudaDeviceSynchronize();
//printf("Get Property_glcm H! \n");
GLCM_maxp<<<grids, threads>>>(Property_glcm->P, Property_glcm->maxp, Ng, NA);
//printf("Get Property_glcm maxp! \n");
cudaDeviceSynchronize();
GLCM_DA<<<grids, threads>>>(Property_glcm->DA, Property_glcm->Pxsy, Ng, NA);
*/
//printf("Get Property_glcm maxp! \n");
//printf("ux: %d, %d, %d, %d \n", Property_glcm.P[0], Property_glcm.P[1], Property_glcm.P[2], Property_glcm.P[3]);
//HANDLE_ERROR(cudaDeviceSynchronize());
//STOP_TIMER(time)
//printf("getting Property_glcm: ");
//PRINT_TIME(time)
//printf("\n");
//printf("CUDA property_glcm finished \n");
//cudaFree(glcm);
//return Property_glcm;
//HANDLE_ERROR(cudaDeviceSynchronize());
}
void Calculate_GLCM_Texture_rl(PROPERTY_glcm *Property_glcm, float *texture_glcm, float Epsilon, int Ng, int NA, int batch_size)
{
//START_TIMER(time)
float *Texture_glcm = (float*)malloc(sizeof(float) * 23 * batch_size);
//float *texture_glcm = NULL;
dim3 grids1(1, 1, batch_size);
dim3 threads1(23, 1);
//cudaMalloc((void**)&texture_glcm, sizeof(float) * NA * batch_size);
initialize_tex<<<grids1, threads1>>>(texture_glcm);
cudaDeviceSynchronize();
//printf("Texture_glcm initialized! \n");
dim3 grids(Ng/8, Ng/8, batch_size);
dim3 threads(64, NA);
glcm_features<<<grids, threads>>>(texture_glcm,Property_glcm->s,Property_glcm->Pn,Property_glcm->ux,Property_glcm->uy,
Property_glcm->Dx, Property_glcm->Dy,Property_glcm->Pxsy,Property_glcm->Pxay,Property_glcm->HX,
Property_glcm->HY,Property_glcm->HXY,Property_glcm->HXY1,Property_glcm->HXY2,Property_glcm->maxp,
Property_glcm->DA,batch_size,Ng,NA,Epsilon);
cudaDeviceSynchronize();
//STOP_TIMER(time)
/*
printf("getting Texture_glcm: \n");
printf("f1_AutoCorrelation: %f \n", Texture_glcm[0 * batch_size + 5]/4);
printf("f2_JointAverage: %f \n", Texture_glcm[1 * batch_size + 5]/4);
printf("f3_CLusterProminence: %f \n", Texture_glcm[2 * batch_size + 5]/4);
printf("f4_ClusterShade: %f \n", Texture_glcm[3 * batch_size + 5]/4);
printf("f5_ClusterTendency: %f \n", Texture_glcm[4 * batch_size + 5]/4);
printf("f6_Contrast: %f \n", Texture_glcm[5 * batch_size + 5]/4);
printf("f7_Correlation: %f \n", Texture_glcm[6 * batch_size + 5]/4);
printf("f8_DifferenceAverage: %f \n", Texture_glcm[7 * batch_size + 5]/4);
printf("f9_DifferenceEntropy: %f \n", Texture_glcm[8 * batch_size + 5]/4);
printf("f10_DifferenceVariance: %f \n", Texture_glcm[9 * batch_size + 5]/4);
printf("f11_JointEnergy: %f \n", Texture_glcm[10 * batch_size + 5]/4);
printf("f12_JointEntropy: %f \n", Texture_glcm[11 * batch_size + 5]/4);
printf("f13_IMC1: %f \n", Texture_glcm[12 * batch_size + 5]/4);
printf("f14_IMC2: %f \n", Texture_glcm[13 * batch_size + 5]/4);
printf("f15_IDM: %f \n", Texture_glcm[14 * batch_size + 5]/4);
printf("f17_IDMN: %f \n", Texture_glcm[15 * batch_size + 5]/4);
printf("f18_ID: %f \n", Texture_glcm[16 * batch_size + 5]/4);
printf("f19_IDN: %f \n", Texture_glcm[17 * batch_size +5]/4);
printf("f20_InverseVariance: %f \n", Texture_glcm[18 * batch_size + 5]/4);
printf("f21_MaximumProbability: %f \n", Texture_glcm[19 * batch_size + 5]/4);
printf("f22_SumAverage: %f \n", Texture_glcm[20 * batch_size + 5]/4);
printf("f23_SumEntropy: %f \n", Texture_glcm[21 * batch_size + 5]/4);
printf("f24_SumSquares: %f \n", Texture_glcm[22 * batch_size + 5]/4);
//PRINT_TIME(time)
printf("\n");
printf("CUDA Texture_glcm finished \n");
*/
//delete Property_glcm;
free(Texture_glcm);
}
__global__ void GLCM_Property(int *P,
float *s,
float *Pn,
float *Px,
float *Py,
float *ux,
float *uy,
float *Dx,
float *Dy,
float *Pxay,
float *Pxsy,
float *HX,
float *HY,
float *HXY,
float *HXY1,
float *HXY2,
int *maxp,
float *DA,
int Ng,
int NA,
float epsilon
)
{
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith, ix, iy;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
atomicAdd(&s[img_ith * NA + ia], float(P[ip]));
//cudaDeviceSynchronize();
atomicExch(&Pn[ip], float(P[ip])/(s[img_ith * NA + ia] + epsilon));
//cudaDeviceSynchronize();
atomicAdd(&Px[img_ith * Ng * NA + ix * NA + ia], Pn[ip]);
atomicAdd(&Py[img_ith * Ng * NA + iy * NA + ia], Pn[ip]);
atomicAdd(&Pxay[img_ith * 2 * Ng * NA + ix * NA + iy * NA + ia], Pn[ip]);
atomicAdd(&Pxsy[img_ith * Ng * NA + abs(ix - iy) * NA + ia], Pn[ip]);
atomicAdd(&ux[img_ith * NA + ia], Pn[ip] * (ix + 1));
atomicAdd(&uy[img_ith * NA + ia], Pn[ip] * (iy + 1));
//cudaDeviceSynchronize();
atomicAdd(&Dx[img_ith * NA + ia], powf(ix + 1 - ux[img_ith * NA + ia], 2) * Pn[ip]);
atomicAdd(&Dy[img_ith * NA + ia], powf(iy + 1 - uy[img_ith * NA + ia], 2) * Pn[ip]);
//cudaDeviceSynchronize();
atomicAdd(&HX[img_ith * NA + ia], float(iy==0) * (-Px[img_ith * Ng * NA + ix * NA + ia] * log2f(Px[img_ith * Ng * NA + ix * NA + ia] + epsilon)));
atomicAdd(&HY[img_ith * NA + ia], float(ix==0) * (-Py[img_ith * NA * Ng + iy * NA + ia] * log2f(Py[img_ith * NA * Ng + iy * NA + ia] + epsilon)));
atomicAdd(&HXY[img_ith * NA + ia], -Pn[ip] * log2f(Pn[ip] + epsilon));
atomicAdd(&HXY1[img_ith * NA + ia], -Pn[ip] * log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
atomicAdd(&HXY2[img_ith * NA + ia],
-Px[img_ith * NA * Ng + ix * NA + ia]
* Py[img_ith * NA * Ng + iy * NA + ia]
* log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
atomicMax(&maxp[img_ith * NA + ia], P[ip]);
atomicAdd(&DA[img_ith * NA + ia], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
}
__global__ void GLCM_Property2(int *P,
float *s,
float *Pn,
float *Px,
float *Py,
float *ux,
float *uy,
float *Dx,
float *Dy,
float *Pxay,
float *Pxsy,
float *HX,
float *HY,
float *HXY,
float *HXY1,
float *HXY2,
int *maxp,
float *DA,
int Ng,
int NA,
float epsilon
)
{
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith, ix, iy;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
atomicAdd(&Dx[img_ith * NA + ia], powf(ix + 1 - ux[img_ith * NA + ia], 2) * Pn[ip]);
atomicAdd(&Dy[img_ith * NA + ia], powf(iy + 1 - uy[img_ith * NA + ia], 2) * Pn[ip]);
//cudaDeviceSynchronize();
atomicAdd(&HX[img_ith * NA + ia], float(iy==0) * (-Px[img_ith * Ng * NA + ix * NA + ia] * log2f(Px[img_ith * Ng * NA + ix * NA + ia] + epsilon)));
atomicAdd(&HY[img_ith * NA + ia], float(ix==0) * (-Py[img_ith * NA * Ng + iy * NA + ia] * log2f(Py[img_ith * NA * Ng + iy * NA + ia] + epsilon)));
atomicAdd(&HXY[img_ith * NA + ia], -Pn[ip] * log2f(Pn[ip] + epsilon));
atomicAdd(&HXY1[img_ith * NA + ia], -Pn[ip] * log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
atomicAdd(&HXY2[img_ith * NA + ia],
-Px[img_ith * NA * Ng + ix * NA + ia]
* Py[img_ith * NA * Ng + iy * NA + ia]
* log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
atomicMax(&maxp[img_ith * NA + ia], P[ip]);
atomicAdd(&DA[img_ith * NA + ia], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
}
__global__ void GLCM_Property1(
float *Pn,
float *Px,
float *Py,
float *ux,
float *uy,
float *Pxay,
float *Pxsy,
int Ng,
int NA,
float epsilon
)
{
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith, ix, iy;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
atomicAdd(&Px[img_ith * Ng * NA + ix * NA + ia], Pn[ip]);
atomicAdd(&Py[img_ith * Ng * NA + iy * NA + ia], Pn[ip]);
atomicAdd(&Pxay[img_ith * 2 * Ng * NA + ix * NA + iy * NA + ia], Pn[ip]);
atomicAdd(&Pxsy[img_ith * Ng * NA + abs(ix - iy) * NA + ia], Pn[ip]);
atomicAdd(&ux[img_ith * NA + ia], Pn[ip] * (ix + 1));
atomicAdd(&uy[img_ith * NA + ia], Pn[ip] * (iy + 1));
//cudaDeviceSynchronize();
}
__global__ void calculate_glcm_kernel_rl(int *image, int *glcm, int *dev_size, int *dev_stride, int *dev_angles, int dev_ng, int dev_na)
{
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int j, glcm_idx, a, iz, iy, ix;
int img_ith, ipix;
img_ith = ip / (dev_size[0] * dev_size[1]);
ipix = ip % (dev_size[0] * dev_size[0]);
ix = ipix / dev_stride[0];
iy = ipix % dev_stride[0];
for (a = 0; a < dev_na; a++)
{
if (ix + dev_angles[a * 2] >= 0 && ix + dev_angles[a * 2] < dev_size[0] &&
iy + dev_angles[a * 2 + 1] >= 0 && iy + dev_angles[a * 2 + 1] < dev_size[1])
{
j = ip + dev_angles[a * 2] * dev_stride[0] + dev_angles[a * 2 + 1] * dev_stride[1];
glcm_idx = int(image[ip] > -1) * int(image[j] > -1) * (a + (image[j]-1) * dev_na + (image[ip]-1) * dev_na * dev_ng + img_ith * dev_ng * dev_ng * dev_na);
if (glcm_idx>0){
atomicAdd(&glcm[glcm_idx], 1 * int(image[ip] > -1) * int(image[j] > -1));}
}
}
}
__global__ void GLCM_sum(int *P, float *s, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
atomicAdd(&s[img_ith * NA + ia], float(P[ip]));
}
__global__ void GLCM_Pn(int *P, float *Pn, float *sum, int Ng, int NA, float epsilon){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
atomicExch(&Pn[ip], float(P[ip])/(sum[img_ith * NA + ia] + epsilon));
}
__global__ void GLCM_Px(float *P, float *Px, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Px[img_ith * Ng * NA + ix * NA + ia], P[ip]);
}
__global__ void GLCM_Py(float *P, float *Py, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Py[img_ith * Ng * NA + iy * NA + ia], P[ip]);
}
__global__ void GLCM_ux(float *P, float *ux, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
ia = ipix % NA;
atomicAdd(&ux[img_ith * NA + ia], P[ip] * (ix + 1));
}
__global__ void GLCM_uy(float *P, float *uy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip= blocks * blockDim.x * blockDim.y + threads;
int iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&uy[img_ith * NA + ia], P[ip] * (iy + 1));
}
__global__ void GLCM_Dx(float *P, float *ux, float *Dx, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
ia = ipix % NA;
atomicAdd(&Dx[img_ith * NA + ia], powf(ix + 1 - ux[img_ith * NA + ia], 2) * P[ip]);
//atomicExch(&Dx[img_ith * NA + ia], sqrtf(Dx[img_ith * NA + ia]));
}
__global__ void GLCM_Dy(float *P, float *uy, float *Dy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Dy[img_ith * NA + ia], powf(iy + 1 - uy[img_ith * NA + ia], 2) * P[ip]);
//atomicExch(&Dy[img_ith * NA + ia], sqrtf(Dy[img_ith * NA + ia]));
}
__global__ void GLCM_Pxay(float *P, float *Pxay, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, img_ith, ipix;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Pxay[img_ith * 2 * Ng * NA + ix * NA + iy * NA + ia], P[ip]);
}
__global__ void GLCM_Pxsy(float *P, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&Pxsy[img_ith * Ng * NA + abs(ix - iy) * NA + ia], P[ip]);
}
__global__ void GLCM_HX(float *Px, float *HX, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
//for(i = 0; i< Ng; i++)
//{HX[0] -= Px[ipix] * log2f(Px[ipix] + epsilon);}
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&HX[img_ith * NA + ia], float(iy==0) * (-Px[img_ith * Ng * NA + ix * NA + ia] * log2f(Px[img_ith * Ng * NA + ix * NA + ia] + epsilon)));
//atomicExch(&HX[0], sum);
}
__global__ void GLCM_HY(float *Py, float *HY, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
//for(i = 0; i< Ng; i++)
//{HX[0] -= Px[ipix] * log2f(Px[ipix] + epsilon);}
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&HY[img_ith * NA + ia], float(ix==0) * (-Py[img_ith * NA * Ng + iy * NA + ia] * log2f(Py[img_ith * NA * Ng + iy * NA + ia] + epsilon)));
//atomicExch(&HX[0], sum);
}
__global__ void GLCM_HXY(float *P, float *HXY, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ia = ipix % NA;
atomicAdd(&HXY[img_ith * NA + ia], -P[ip] * log2f(P[ip] + epsilon));
//atomicExch(&HX[0], sum);
}
__global__ void GLCM_HXY1(float *P, float *Px, float *Py, float *HXY1, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&HXY1[img_ith * NA + ia], -P[ip] * log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
//atomicExch(&HXY1[0], sum);
}
__global__ void GLCM_HXY2(float *P, float *Px, float *Py, float *HXY2, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&HXY2[img_ith * NA + ia],
-Px[img_ith * NA * Ng + ix * NA + ia]
* Py[img_ith * NA * Ng + iy * NA + ia]
* log2f(Px[img_ith * NA * Ng + ix * NA + ia] * Py[img_ith * NA * Ng + iy * NA + ia] + epsilon));
//atomicExch(&HXY2[0], sum);
}
__global__ void GLCM_maxp(int *P, int *maxp, int Ng, int NA){
//float dst[4];
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicMax(&maxp[img_ith * NA + ia], P[ip]);
}
__global__ void GLCM_DA(float *DA, float *Pxsy, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&DA[img_ith * NA + ia], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
//*rst /= NA;
}
/* FEATURE EXTRACTION*/
__global__ void glcm_features(float *rst,
float *s,
float *Pn,
float *ux,
float *uy,
float *Dx,
float *Dy,
float *Pxsy,
float *Pxay,
float *HX,
float *HY,
float *HXY,
float *HXY1,
float *HXY2,
int *maxp,
float *DA,
int batch_size,
int Ng,
int NA,
float epsilon){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
int ix_2122 = ipix / NA / (Ng / 2);
int iy_2122 = ipix / NA % (Ng / 2);
atomicAdd(&rst[0 * batch_size + img_ith], Pn[ip] * (ix + 1) * (iy + 1) / NA);
atomicAdd(&rst[1 * batch_size + img_ith], Pn[ip] * float(ix + 1) / NA);
atomicAdd(&rst[2 * batch_size + img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 4) * Pn[ip] / NA);
atomicAdd(&rst[3 * batch_size + img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 3) * Pn[ip] / NA);
atomicAdd(&rst[4 * batch_size + img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 2) * Pn[ip] / NA);
atomicAdd(&rst[5 * batch_size + img_ith], powf((ix - iy), 2) * Pn[ip] / NA);
atomicAdd(&rst[6 * batch_size + img_ith], Pn[ip] * (ix + 1 - ux[img_ith * NA + ia]) * (iy + 1 - uy[img_ith * NA + ia]) /(sqrtf(Dx[img_ith * NA + ia] * Dy[img_ith * NA + ia]) + epsilon) /NA);
atomicAdd(&rst[7 * batch_size + img_ith], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0) / NA);
atomicAdd(&rst[8 * batch_size + img_ith], -Pxsy[img_ith * NA * Ng + ix * NA + ia] * log2f(Pxsy[img_ith * NA * Ng + ix * NA + ia] + epsilon) * float(iy == 0) / NA);
atomicAdd(&rst[9 * batch_size + img_ith], powf(float(ix) - DA[img_ith * NA + ia], 2) * Pxsy[img_ith * Ng * NA + ix * NA + ia] * float(iy == 0) / NA);
atomicAdd(&rst[10 * batch_size + img_ith], powf(Pn[ip], 2) / NA);
atomicAdd(&rst[11 * batch_size + img_ith], -Pn[ip] * log2f(Pn[ip] + epsilon) / NA);
atomicAdd(&rst[12 * batch_size + img_ith], float(ix == 0) * float(iy == 0) * (HXY[img_ith * NA + ia] - HXY1[img_ith * NA + ia]) / max(HX[img_ith * NA + ia], HY[img_ith * NA + ia]) / NA);
atomicAdd(&rst[13 * batch_size + img_ith], float(ix == 0) * float(iy == 0) * sqrtf(abs(1 - powf(M_E, -2 * (HXY2[img_ith * NA + ia] - HXY[img_ith * NA + ia])))) / NA);
atomicAdd(&rst[14 * batch_size + img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + powf(ix, 2)) * float(iy == 0) / NA);
atomicAdd(&rst[15 * batch_size + img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + (powf(ix, 2)/powf(Ng, 2))) * float(iy == 0) / NA);
atomicAdd(&rst[16 * batch_size + img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / float(1 + ix) * float(iy == 0) / NA);
atomicAdd(&rst[17 * batch_size + img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + float(ix) / Ng) * float(iy == 0) / NA);
atomicAdd(&rst[18 * batch_size + img_ith], ix == 0? 0: float(iy == 0) * Pxsy[img_ith * NA * Ng + ix * NA + ia] / powf(ix, 2) / NA );
if (ix == 0 and iy == 0)
{atomicAdd(&rst[19 * batch_size + img_ith], float(maxp[img_ith * NA + ia]) / (s[img_ith * NA + ia] + epsilon) / NA);}
atomicAdd(&rst[20 * batch_size + img_ith], Pxay[img_ith * NA * Ng * 2 + ix_2122 * NA + ia] * (ix_2122 + 2) * float(iy_2122 == 0) / NA);
atomicAdd(&rst[21 * batch_size + img_ith], -Pxay[img_ith * Ng * NA * 2 + ix_2122 * NA + ia] * log2f(Pxay[img_ith * Ng * NA * 2 + ix_2122 * NA + ia] + epsilon) * float(iy_2122 == 0) / NA);
atomicAdd(&rst[22 * batch_size + img_ith], Pn[ip] * powf(float(ix + 1 - ux[img_ith * NA + ia]), 2) / NA);
}
/* Auto Correlation */
__global__ void f1_AutoCorrelation(float *rst, float *P, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], P[ip] * (ix + 1) * (iy + 1));
//*rst /= NA;
}
/* Joint Average */
__global__ void f2_JointAverage(float *rst, float *P, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], P[ip] * float(ix + 1));
//*rst /= NA;
}
/* CLuster Prominence */
__global__ void f3_ClusterProminence(float *rst, float *P, float *ux, float *uy, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 4) * P[ip]);
//*rst /= NA;
}
/* ClusterShade */
__global__ void f4_ClusterShade(float *rst, float *P, float *ux, float *uy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 3) * P[ip]);
//*rst /= NA;
}
/* Cluster Tendency */
__global__ void f5_ClusterTendency(float *rst, float *P, float *ux, float *uy, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf((float(ix + 1) + float(iy + 1) - ux[img_ith * NA + ia] - uy[img_ith * NA + ia]), 2) * P[ip]);
//*rst /= NA;
}
/* Contrast */
__global__ void f6_Contrast(float *rst, float *P, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf((ix - iy), 2) * P[ip]);
//*rst /= NA;
}
/* Correlation */
__global__ void f7_Correlation(float *rst, float *P, float *ux, float *uy, float *Dx, float *Dy, int Ng, int NA, float epsilon) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], P[ip] * (ix + 1 - ux[img_ith * NA + ia]) * (iy + 1 - uy[img_ith * NA + ia]) /(sqrtf(Dx[img_ith * NA + ia] * Dy[img_ith * NA + ia]) + epsilon));
//*rst /= NA;
}
/* Diffference Average */
__global__ void f8_DifferenceAverage(float *rst, float *DA, float *Pxsy, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
atomicAdd(&DA[img_ith * NA + ia], float(ix) * Pxsy[img_ith * NA * Ng + ix * NA + ia] * float(iy == 0));
//*rst /= NA;
}
/* Differnence Entropy */
__global__ void f9_DifferenceEntropy(float *rst, float *Pxsy, float epsilon, int Ng, int NA) {
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], -Pxsy[img_ith * NA * Ng + ix * NA + ia] * log2f(Pxsy[img_ith * NA * Ng + ix * NA + ia] + epsilon) * float(iy == 0));
//*rst /= NA;
}
/* Difference Variance */
__global__ void f10_DifferenceVariance(float *rst, float *Pxsy, float *DA, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
//atomicAdd(&DA[ia], float(ix) * Pxsy[ix * NA + ia] * float(iy == 0));
atomicAdd(&rst[img_ith], powf(float(ix) - DA[img_ith * NA + ia], 2) * Pxsy[img_ith * Ng * NA + ix * NA + ia] * float(iy == 0));
//*rst /= NA;
}
/* Joint Energy */
__global__ void f11_JointEnergy(float *rst, float *P, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], powf(P[ip], 2));
//*rst /= NA;
}
/* Joint Entropy */
__global__ void f12_JointEntropy(float *rst, float *P, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], -P[ip] * log2f(P[ip] + epsilon));
//*rst /= NA;
}
/* Information Measures of Correlation */
__global__ void f13_IMC1(float *rst, float *HXY, float *HXY1, float *HX, float *HY, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], float(ix == 0) * float(iy == 0) * (HXY[img_ith * NA + ia] - HXY1[img_ith * NA + ia]) / max(HX[img_ith * NA + ia], HY[img_ith * NA + ia]) );
//*rst /= NA;
}
/* IMC2 */
__global__ void f14_IMC2(float *rst, float *HXY, float *HXY2, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], float(ix == 0) * float(iy == 0) * sqrtf(abs(1 - powf(M_E, -2 * (HXY2[img_ith * NA + ia] - HXY[img_ith * NA + ia])))));
//*rst /= NA;
}
/* Inverse Difference Moment*/
__global__ void f15_IDM(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + powf(ix, 2)) * float(iy == 0));
//*rst /= 4;
}
/* Inverse Difference Moment Normalized*/
__global__ void f17_IDMN(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + (powf(ix, 2)/powf(Ng, 2))) * float(iy == 0));
//*rst /= NA;
}
/* Inverse Difference*/
__global__ void f18_ID(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / float(1 + ix) * float(iy == 0));
//*rst /= NA;
}
/* Inverse Difference Normalized*/
__global__ void f19_IDN(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxsy[img_ith * NA * Ng + ix * NA + ia] / (1 + float(ix) / Ng) * float(iy == 0));
//*rst /= NA;
}
/* Inverse Variance*/
__global__ void f20_InverseVariance(float *rst, float *Pxsy, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], ix == 0? 0: float(iy == 0) * Pxsy[img_ith * NA * Ng + ix * NA + ia] / powf(ix, 2) );
//*rst /= NA;
}
/* Maximum Probability*/
__global__ void f21_MaximumProbability(float *rst, float *sum, int *maxp, int Ng, int NA, float epsilon){
//float dst[4];
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
if (ix == 0 and iy == 0)
{atomicAdd(&rst[img_ith], float(maxp[img_ith * NA + ia]) / (sum[img_ith * NA + ia] + epsilon));}
//printf("maxp: %f\n", maxp[0]);
//atomicExch(&rst[img_ith], 0);
}
/* Sum Average*/
__global__ void f22_SumAverage(float *rst, float *Pxay, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / (Ng / 2);
iy = ipix / NA % (Ng / 2);
ia = ipix % NA;
atomicAdd(&rst[img_ith], Pxay[img_ith * NA * Ng * 2 + ix * NA + ia] * (ix + 2) * float(iy == 0));
//*rst /= NA;
}
/* Sum Entropy */
__global__ void f23_SumEntropy(float *rst, float *Pxay, float epsilon, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / (Ng / 2);
iy = ipix / NA % (Ng / 2);
ia = ipix % NA;
atomicAdd(&rst[img_ith], -Pxay[img_ith * Ng * NA * 2 + ix * NA + ia] * log2f(Pxay[img_ith * Ng * NA * 2 + ix * NA + ia] + epsilon) * float(iy == 0));
//*rst /= NA;
}
/*Sum of Squares*/
__global__ void f24_SumSquares(float *rst, float *P, float *ux, int Ng, int NA){
int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x;
int threads = blockDim.x * threadIdx.y + threadIdx.x;
int ip = blocks * blockDim.x * blockDim.y + threads;
int ix, iy, ia, ipix, img_ith;
img_ith = ip / (Ng * Ng * NA);
ipix = ip % (Ng * Ng * NA);
ix = ipix / NA / Ng;
iy = ipix / NA % Ng;
ia = ipix % NA;
atomicAdd(&rst[img_ith], P[ip] * powf(float(ix + 1 - ux[img_ith * NA + ia]), 2));
//*rst /= NA;
}
|
baea5c12fe87499afcb793bf9db0e10f6dbe5493.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/asin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AsinGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));
#else
dX[i] = dY[i] * rsqrtf(1.0f - X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AsinGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* X_dims */,
const T* dY,
const T* X,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( AsinGradientCUDAKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, dY, X, dX);
return true;
}
REGISTER_CUDA_OPERATOR(
Asin,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AsinFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AsinGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AsinGradientFunctor<CUDAContext>>);
} // namespace caffe2
| baea5c12fe87499afcb793bf9db0e10f6dbe5493.cu | #include "caffe2/operators/asin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AsinGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));
#else
dX[i] = dY[i] * rsqrtf(1.0f - X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AsinGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* X_dims */,
const T* dY,
const T* X,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
AsinGradientCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
return true;
}
REGISTER_CUDA_OPERATOR(
Asin,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AsinFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AsinGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AsinGradientFunctor<CUDAContext>>);
} // namespace caffe2
|
f55f8ad3b8d8f23e2fd055ec1d5ad8ce39aeba0f.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <unistd.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
extern "C" {
#include "utils.c"
}
#define WARP_SIZE 16
#define DEBUG false
/* ---------------- [[CUDA KERNELS]] ---------------- */
__global__ void updateWeightsCUDA(float *weights, float *changes, float *delta_outputs, float *inputs, int n_inputs, int n_outputs) {
int width = n_outputs;
int height = n_inputs;
GlobalDim gd = getGlobalDim(blockDim, blockIdx, threadIdx);
if ((gd.x < width) && (gd.y < height)) {
int idx = width * gd.y + gd.x;
float change = delta_outputs[gd.x] * inputs[gd.y];
weights[idx] += 0.5 * change + 0.5 * changes[idx];
changes[idx] = change;
}
}
__global__ void mapStepCUDA(float *inputs, float *matrix, float *buffer, int width, int height) {
GlobalDim gd = getGlobalDim(blockDim, blockIdx, threadIdx);
if ((gd.x < width) && (gd.y < height)) {
int idx = width * gd.y + gd.x;
buffer[idx] = inputs[gd.y] * matrix[idx];
}
}
__global__ void reduceStepCUDA(float *input, float *output, int width, int height) {
__shared__ float sharedMemory[WARP_SIZE * WARP_SIZE];
// STEP 1: exclude all threads that do not depend from problem
GlobalDim gd = getGlobalDim(blockDim, blockIdx, threadIdx);
if ((gd.x < width) && (gd.y < height)) {
// STEP 2: Move to shared memory
int gridId = gd.y * width + gd.x;
int blockId = threadIdx.y * blockDim.x + threadIdx.x;
sharedMemory[blockId] = input[gridId];
__syncthreads();
int n = (int)ceil((float)blockDim.y/2);
while(n >= 1) {
if (threadIdx.y < n) {
if ((gd.y + n) < height) {
int firstIndex = blockId;
int secondIndex = blockDim.x * (threadIdx.y + n) + threadIdx.x;
sharedMemory[firstIndex] += sharedMemory[secondIndex];
}
}
__syncthreads();
if (n == 1) {
break;
} else {
n = (int)ceil((float)n/2);
}
}
__syncthreads();
// STEP 3: Write back results
if (threadIdx.y == 1) {
output[blockIdx.y * width + gd.x] = sharedMemory[threadIdx.x];
}
}
}
/* ---------------- [[LAUNCH FUNCTIONS]] ---------------- */
void setWeightsForLayers(float *weights, float *changes, float *delta_outputs, float *inputs, int n_inputs, int n_outputs) {
// to device memory
int grid_size = n_inputs * n_outputs;
float *weights_d = _copyHostDevice(weights, grid_size);
float *changes_d = _copyHostDevice(changes, grid_size);
float *delta_outputs_d = _copyHostDevice(delta_outputs, n_outputs);
float *inputs_d = _copyHostDevice(inputs, n_inputs);
// Define block structure
dim3 block(WARP_SIZE, WARP_SIZE);
dim3 grid = getGridBasedOnBlockSize(n_outputs, n_inputs, WARP_SIZE);
// RUN RUN RUN!
hipLaunchKernelGGL(( updateWeightsCUDA), dim3(grid), dim3(block), 0, 0, weights_d, changes_d, delta_outputs_d, inputs_d, n_inputs, n_outputs);
// Copy back weights and momenutm
weights = _copyDeviceHost(weights_d, grid_size, weights);
changes = _copyDeviceHost(changes_d, grid_size, changes);
}
void update_layer(float *src_layer, float *dst_layer, int src_n, int dst_n, float *weights) {
dim3 block(WARP_SIZE, WARP_SIZE);
float *src_layer_d, *weights_d, *buffer_d;
int total = src_n * dst_n;
// Allocate input in global memory
src_layer_d = _copyHostDevice(src_layer, src_n);
weights_d = _copyHostDevice(weights, total);
hipMalloc((void**)&buffer_d, sizeof(float) * total);
// Create block dimensions and run parallel update layer
int gridX = (int)ceil((float)dst_n/WARP_SIZE);
int gridY = (int)ceil((float)src_n/WARP_SIZE);
dim3 grid(gridX, gridY);
// RUN RUN RUN!
if (DEBUG) {
printf("\n***** Updating layer *****\n");
printf("\nFrom\n");
drawMatrix(src_layer, src_n, 1);
printf("\nTo\n");
drawMatrix(weights, dst_n, src_n);
}
hipLaunchKernelGGL(( mapStepCUDA), dim3(grid), dim3(block), 0, 0, src_layer_d, weights_d, buffer_d, dst_n, src_n);
// Set the current target to the input
float *currentTarget = buffer_d;
int currentHeight = src_n;
while (currentHeight > 1) {
// Calculate grid size
int gridX = (int)ceil((float)dst_n/WARP_SIZE);
int gridY = (int)ceil((float)currentHeight/WARP_SIZE);
dim3 grid(gridX, gridY);
// Allocate new buffer
float *buffer_d;
hipMalloc((void**)&buffer_d, sizeof(float) * (dst_n * gridY));
// RUN RUN RUN!
hipLaunchKernelGGL(( reduceStepCUDA), dim3(grid), dim3(block), 0, 0, currentTarget, buffer_d, dst_n, currentHeight);
// Free old memory and keep track of the new one
hipFree(currentTarget);
currentHeight = grid.y;
currentTarget = buffer_d;
}
dst_layer =_copyDeviceHost(currentTarget, dst_n, dst_layer);
for (int i=0; i < dst_n; i++) {
dst_layer[i] = tanh(dst_layer[i]);
}
if (DEBUG) {
printf("\nResult is\n");
drawMatrix(dst_layer, dst_n, 1);
printf("\n***** ENDED UPDATING LAYER *****\n");
_sleep(1);
}
}
| f55f8ad3b8d8f23e2fd055ec1d5ad8ce39aeba0f.cu | #include <math.h>
#include <unistd.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
extern "C" {
#include "utils.c"
}
#define WARP_SIZE 16
#define DEBUG false
/* ---------------- [[CUDA KERNELS]] ---------------- */
__global__ void updateWeightsCUDA(float *weights, float *changes, float *delta_outputs, float *inputs, int n_inputs, int n_outputs) {
int width = n_outputs;
int height = n_inputs;
GlobalDim gd = getGlobalDim(blockDim, blockIdx, threadIdx);
if ((gd.x < width) && (gd.y < height)) {
int idx = width * gd.y + gd.x;
float change = delta_outputs[gd.x] * inputs[gd.y];
weights[idx] += 0.5 * change + 0.5 * changes[idx];
changes[idx] = change;
}
}
__global__ void mapStepCUDA(float *inputs, float *matrix, float *buffer, int width, int height) {
GlobalDim gd = getGlobalDim(blockDim, blockIdx, threadIdx);
if ((gd.x < width) && (gd.y < height)) {
int idx = width * gd.y + gd.x;
buffer[idx] = inputs[gd.y] * matrix[idx];
}
}
__global__ void reduceStepCUDA(float *input, float *output, int width, int height) {
__shared__ float sharedMemory[WARP_SIZE * WARP_SIZE];
// STEP 1: exclude all threads that do not depend from problem
GlobalDim gd = getGlobalDim(blockDim, blockIdx, threadIdx);
if ((gd.x < width) && (gd.y < height)) {
// STEP 2: Move to shared memory
int gridId = gd.y * width + gd.x;
int blockId = threadIdx.y * blockDim.x + threadIdx.x;
sharedMemory[blockId] = input[gridId];
__syncthreads();
int n = (int)ceil((float)blockDim.y/2);
while(n >= 1) {
if (threadIdx.y < n) {
if ((gd.y + n) < height) {
int firstIndex = blockId;
int secondIndex = blockDim.x * (threadIdx.y + n) + threadIdx.x;
sharedMemory[firstIndex] += sharedMemory[secondIndex];
}
}
__syncthreads();
if (n == 1) {
break;
} else {
n = (int)ceil((float)n/2);
}
}
__syncthreads();
// STEP 3: Write back results
if (threadIdx.y == 1) {
output[blockIdx.y * width + gd.x] = sharedMemory[threadIdx.x];
}
}
}
/* ---------------- [[LAUNCH FUNCTIONS]] ---------------- */
void setWeightsForLayers(float *weights, float *changes, float *delta_outputs, float *inputs, int n_inputs, int n_outputs) {
// to device memory
int grid_size = n_inputs * n_outputs;
float *weights_d = _copyHostDevice(weights, grid_size);
float *changes_d = _copyHostDevice(changes, grid_size);
float *delta_outputs_d = _copyHostDevice(delta_outputs, n_outputs);
float *inputs_d = _copyHostDevice(inputs, n_inputs);
// Define block structure
dim3 block(WARP_SIZE, WARP_SIZE);
dim3 grid = getGridBasedOnBlockSize(n_outputs, n_inputs, WARP_SIZE);
// RUN RUN RUN!
updateWeightsCUDA<<<grid, block>>>(weights_d, changes_d, delta_outputs_d, inputs_d, n_inputs, n_outputs);
// Copy back weights and momenutm
weights = _copyDeviceHost(weights_d, grid_size, weights);
changes = _copyDeviceHost(changes_d, grid_size, changes);
}
void update_layer(float *src_layer, float *dst_layer, int src_n, int dst_n, float *weights) {
dim3 block(WARP_SIZE, WARP_SIZE);
float *src_layer_d, *weights_d, *buffer_d;
int total = src_n * dst_n;
// Allocate input in global memory
src_layer_d = _copyHostDevice(src_layer, src_n);
weights_d = _copyHostDevice(weights, total);
cudaMalloc((void**)&buffer_d, sizeof(float) * total);
// Create block dimensions and run parallel update layer
int gridX = (int)ceil((float)dst_n/WARP_SIZE);
int gridY = (int)ceil((float)src_n/WARP_SIZE);
dim3 grid(gridX, gridY);
// RUN RUN RUN!
if (DEBUG) {
printf("\n***** Updating layer *****\n");
printf("\nFrom\n");
drawMatrix(src_layer, src_n, 1);
printf("\nTo\n");
drawMatrix(weights, dst_n, src_n);
}
mapStepCUDA<<<grid, block>>>(src_layer_d, weights_d, buffer_d, dst_n, src_n);
// Set the current target to the input
float *currentTarget = buffer_d;
int currentHeight = src_n;
while (currentHeight > 1) {
// Calculate grid size
int gridX = (int)ceil((float)dst_n/WARP_SIZE);
int gridY = (int)ceil((float)currentHeight/WARP_SIZE);
dim3 grid(gridX, gridY);
// Allocate new buffer
float *buffer_d;
cudaMalloc((void**)&buffer_d, sizeof(float) * (dst_n * gridY));
// RUN RUN RUN!
reduceStepCUDA<<<grid, block>>>(currentTarget, buffer_d, dst_n, currentHeight);
// Free old memory and keep track of the new one
cudaFree(currentTarget);
currentHeight = grid.y;
currentTarget = buffer_d;
}
dst_layer =_copyDeviceHost(currentTarget, dst_n, dst_layer);
for (int i=0; i < dst_n; i++) {
dst_layer[i] = tanh(dst_layer[i]);
}
if (DEBUG) {
printf("\nResult is\n");
drawMatrix(dst_layer, dst_n, 1);
printf("\n***** ENDED UPDATING LAYER *****\n");
_sleep(1);
}
}
|
95d2ad5c9dd96d61ac2afefe44c39d1984185c39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <float.h>
#include "cudakernel/reformat/reformat.h"
#include "cudakernel/common/common.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/macro.h"
#include "hip/hip_fp16.h"
using namespace PPLCUDA;
using namespace ppl::nn;
using namespace ppl::common;
#define DIM 32
#define LEASTCHANNEL 16
template <typename T, CVTFormatMode mode>
__global__ void cuda_kernel_cvtformat(
T* input,
T* output,
ReFormatParam param)
{
}
#define cvtNCTONHWC8(type) \
template<> \
__global__ void cuda_kernel_cvtformat<type, NDARRAY_NHWC8>( \
type* input, \
type* output, \
ReFormatParam param) \
{ \
__shared__ type share_val[DIM][DIM + 1]; \
\
int64_t num = blockIdx.z; \
for (int n = num; n < param.n_outer; n+= blockDim.x) { \
int64_t idx_w = blockIdx.x * blockDim.x + threadIdx.x; \
int64_t idx_h = blockIdx.y * blockDim.y + threadIdx.y; \
\
if (idx_w < param.n_inner && idx_h < param.src_pad) { \
int64_t offset = n * param.src_pad * param.n_inner + idx_h * param.n_inner + idx_w; \
share_val[threadIdx.y][threadIdx.x] = input[offset]; \
} else { \
share_val[threadIdx.y][threadIdx.x] = (type)0; \
} \
__syncthreads(); \
\
idx_w = blockIdx.y * blockDim.y + threadIdx.x; \
idx_h = blockIdx.x * blockDim.x + threadIdx.y; \
\
if (idx_w < param.dst_pad && idx_h < param.n_inner) { \
int64_t offset = n * param.dst_pad * param.n_inner + idx_h * param.dst_pad + idx_w; \
output[offset] = share_val[threadIdx.x][threadIdx.y]; \
} \
} \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtNCTONHWC8(half)
#endif
cvtNCTONHWC8(float)
cvtNCTONHWC8(char)
cvtNCTONHWC8(double)
#define cvtNHWC8TONC(type) \
template<> \
__global__ void cuda_kernel_cvtformat<type, NHWC8_NDARRAY>( \
type* input, \
type* output, \
ReFormatParam param) \
{ \
__shared__ type share_val[DIM][DIM + 1]; \
\
int64_t num = blockIdx.z; \
for (int n = num; n < param.n_outer; n += blockDim.x) { \
int64_t idx_w = blockIdx.x * blockDim.x + threadIdx.x; \
int64_t idx_h = blockIdx.y * blockDim.y + threadIdx.y; \
\
if (idx_w < param.src_pad && idx_h < param.n_inner) { \
int64_t offset = n * param.src_pad * param.n_inner + idx_h * param.src_pad + idx_w; \
share_val[threadIdx.y][threadIdx.x] = input[offset]; \
} else { \
share_val[threadIdx.y][threadIdx.x] = (type)0; \
} \
__syncthreads(); \
\
idx_w = blockIdx.y * blockDim.y + threadIdx.x; \
idx_h = blockIdx.x * blockDim.x + threadIdx.y; \
\
if (idx_w < param.n_inner && idx_h < param.dst_pad) { \
int64_t offset = n * param.dst_pad * param.n_inner + idx_h * param.n_inner + idx_w; \
output[offset] = share_val[threadIdx.x][threadIdx.y]; \
} \
} \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtNHWC8TONC(half)
#endif
cvtNHWC8TONC(float)
cvtNHWC8TONC(char)
cvtNHWC8TONC(double)
#define cvtN4CXTONC(type) \
template <> \
__global__ void cuda_kernel_cvtformat<type, N4CX_NDARRAY>( \
type * input, \
type * output, \
ReFormatParam param) \
{ \
const uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= param.n_inner) \
return; \
const uint64_t inner_idx = tid; \
const uint64_t num_inner = blockIdx.z; \
const uint64_t c4_idx = blockIdx.y; \
_Pragma("unroll 4") for (int c_in_c4_idx = 0; c_in_c4_idx < 4; c_in_c4_idx++) \
{ \
const uint64_t c_idx = c4_idx * 4 + c_in_c4_idx; \
const uint64_t size = param.n_inner; \
const uint64_t padChannels = gridDim.y * 4; \
const uint64_t numChannels = param.channel; \
if (c_idx < numChannels) { \
const uint64_t offset = num_inner * padChannels * size + (c4_idx * size + inner_idx) * 4 + c_in_c4_idx; \
const uint64_t outOffset = num_inner * numChannels * size + c_idx * size + inner_idx; \
output[outOffset] = input[offset]; \
} \
} \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtN4CXTONC(half)
#endif
cvtN4CXTONC(float)
cvtN4CXTONC(char)
cvtN4CXTONC(double)
#define cvtNCTON4CX(type) \
template <> \
__global__ void cuda_kernel_cvtformat<type, NDARRAY_N4CX>( \
type * input, \
type * output, \
ReFormatParam param) \
{ \
const uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= param.n_inner) \
return; \
const uint64_t inner_idx = tid; \
const uint64_t num_inner = blockIdx.z; \
const uint64_t c4_idx = blockIdx.y; \
_Pragma("unroll 4") for (int c_in_c4_idx = 0; c_in_c4_idx < 4; c_in_c4_idx++) \
{ \
const uint64_t c_idx = c4_idx * 4 + c_in_c4_idx; \
const uint64_t size = param.n_inner; \
const uint64_t padChannels = gridDim.y * 4; \
const uint64_t numChannels = param.channel; \
if (c_idx < numChannels) { \
const uint64_t offset = num_inner * padChannels * size + (c4_idx * size + inner_idx) * 4 + c_in_c4_idx; \
const uint64_t inOffset = num_inner * numChannels * size + c_idx * size + inner_idx; \
output[offset] = input[inOffset]; \
} \
} \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtNCTON4CX(half)
#endif
cvtNCTON4CX(float)
cvtNCTON4CX(char)
cvtNCTON4CX(double)
template <typename T, CVTFormatMode mode>
__global__ void cuda_kernel_small_channel_cvtformat(
T* input,
int num_elems,
DivModFast inner_fast,
DivModFast src_pad_fast,
DivModFast dst_pad_fast,
T* output,
ReFormatParam param)
{
}
/*
// #define cvtSMCHANNELNCTONHWC8(type) \
// template<> \
// __global__ void cuda_kernel_small_channel_cvtformat<type, NDARRAY_NHWC8>( \
// type* input, \
// int64_t num_elems, \
// type* output, \
// ReFormatParam param) \
// { \
// int64_t tid = blockIdx.x * blockDim.x + threadIdx.x; \
// if (tid >= num_elems) return; \
// int c_idx = tid % param.dst_pad; \
// int inner_idx = (tid / param.dst_pad) % param.n_inner; \
// int outer_idx = tid / (param.dst_pad * param.n_inner); \
// int64_t offset = outer_idx * param.src_pad * param.n_inner + c_idx * param.n_inner + inner_idx; \
// output[tid] = c_idx > param.channel ? input[offset] : (type)0; \
// }
*/
#define cvtSMCHANNELNCTONHWC8(type) \
template<> \
__global__ void cuda_kernel_small_channel_cvtformat<type, NDARRAY_NHWC8>( \
type* input, \
int num_elems, \
DivModFast inner_fast, \
DivModFast src_pad_fast, \
DivModFast dst_pad_fast, \
type* output, \
ReFormatParam param) \
{ \
int tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= num_elems) return; \
int inner_idx = 0, num_inner = 0, c_idx = 0; \
dst_pad_fast.divmod(tid, num_inner, c_idx); \
inner_idx = inner_fast.mod(num_inner); \
int outer_idx = inner_fast.div(num_inner); \
int offset = outer_idx * param.src_pad * param.n_inner + c_idx * param.n_inner + inner_idx; \
output[tid] = c_idx < param.src_pad ? input[offset] : (type)0; \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtSMCHANNELNCTONHWC8(half)
#endif
cvtSMCHANNELNCTONHWC8(float)
cvtSMCHANNELNCTONHWC8(char)
cvtSMCHANNELNCTONHWC8(double)
#define cvtSMCHANNELNHWC8TONC(type) \
template<> \
__global__ void cuda_kernel_small_channel_cvtformat<type, NHWC8_NDARRAY>( \
type* input, \
int num_elems, \
DivModFast inner_fast, \
DivModFast src_pad_fast, \
DivModFast dst_pad_fast, \
type* output, \
ReFormatParam param) \
{ \
int tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= num_elems) return; \
int inner_idx = 0, num_inner = 0, c_idx = 0; \
inner_fast.divmod(tid, num_inner, inner_idx); \
c_idx = dst_pad_fast.mod(num_inner); \
int outer_idx = tid / (param.dst_pad * param.n_inner); \
int offset = outer_idx * param.src_pad * param.n_inner + c_idx + inner_idx * param.src_pad; \
output[tid] = input[offset]; \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtSMCHANNELNHWC8TONC(half)
#endif
cvtSMCHANNELNHWC8TONC(float)
cvtSMCHANNELNHWC8TONC(char)
cvtSMCHANNELNHWC8TONC(double)
#define cvtSMCHANNELN4CXTONC(type) \
template <> \
__global__ void cuda_kernel_small_channel_cvtformat<type, N4CX_NDARRAY>( \
type * input, \
int num_elems, \
DivModFast inner_fast, \
DivModFast src_pad_fast, \
DivModFast dst_pad_fast, \
type* output, \
ReFormatParam param) \
{ \
const int tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= num_elems) \
return; \
int inner_idx, num_inner, c_idx; \
inner_fast.divmod(tid, num_inner, inner_idx); \
src_pad_fast.divmod(num_inner, num_inner, c_idx); \
const int c4_idx = c_idx / 4; \
const int c_in_c4_idx = c_idx % 4; \
const uint64_t size = param.n_inner; \
const uint64_t padChannels = param.src_pad; \
const uint64_t numChannels = param.channel; \
const uint64_t offset = num_inner * padChannels * size + (c4_idx * size + inner_idx) * 4 + c_in_c4_idx; \
const uint64_t outOffset = num_inner * numChannels * size + c_idx * size + inner_idx; \
output[outOffset] = input[offset]; \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtSMCHANNELN4CXTONC(half)
#endif
cvtSMCHANNELN4CXTONC(float)
cvtSMCHANNELN4CXTONC(char)
cvtSMCHANNELN4CXTONC(double)
#define cvtSMCHANNELNCTON4CX(type) \
template <> \
__global__ void cuda_kernel_small_channel_cvtformat<type, NDARRAY_N4CX>( \
type * input, \
int num_elems, \
DivModFast inner_fast, \
DivModFast src_pad_fast, \
DivModFast dst_pad_fast, \
type* output, \
ReFormatParam param) \
{ \
const int tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= num_elems) \
return; \
int inner_idx, num_inner, c_idx; \
inner_fast.divmod(tid, num_inner, inner_idx); \
src_pad_fast.divmod(num_inner, num_inner, c_idx); \
const int c4_idx = c_idx / 4; \
const int c_in_c4_idx = c_idx % 4; \
const uint64_t size = param.n_inner; \
const uint64_t padChannels = param.dst_pad; \
const uint64_t numChannels = param.channel; \
const uint64_t offset = num_inner * padChannels * size + (c4_idx * size + inner_idx) * 4 + c_in_c4_idx; \
const uint64_t inOffset = num_inner * numChannels * size + c_idx * size + inner_idx; \
output[offset] = input[inOffset]; \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtSMCHANNELNCTON4CX(half)
#endif
cvtSMCHANNELNCTON4CX(float)
cvtSMCHANNELNCTON4CX(char)
cvtSMCHANNELNCTON4CX(double)
#define MAX_DIM 65533
template<CVTFormatMode mode>
void GenDimParam(
ReFormatParam param,
dim3& dimBlock,
dim3& dimGrid)
{
dimGrid.z = param.n_outer >= MAX_DIM ? MAX_DIM : param.n_outer;
if (mode == NHWC8_NDARRAY) {
dimBlock.x = DIM;
dimBlock.y = DIM;
dimGrid.x = DivUp(param.src_pad, DIM);
dimGrid.y = DivUp(param.n_inner, DIM);
} else if (mode == NDARRAY_NHWC8) {
dimBlock.x = DIM;
dimBlock.y = DIM;
dimGrid.x = DivUp(param.n_inner, DIM);
dimGrid.y = DivUp(param.dst_pad, DIM);
} else if (mode == N4CX_NDARRAY) {
dimBlock.x = DIM;
dimBlock.y = 1;
dimGrid.x = DivUp(param.n_inner, DIM);
dimGrid.y = param.src_pad / 4;
} else if (mode == NDARRAY_N4CX) {
dimBlock.x = DIM;
dimBlock.y = 1;
dimGrid.x = DivUp(param.n_inner, DIM);
dimGrid.y = param.dst_pad / 4;
} else {
}
}
#define RFNHWC8 \
case NDARRAY_NHWC8: \
RUN(NDARRAY_NHWC8); \
case NHWC8_NDARRAY: \
RUN(NHWC8_NDARRAY);
#define RFN4CX \
case NDARRAY_N4CX: \
RUN(NDARRAY_N4CX); \
case N4CX_NDARRAY: \
RUN(N4CX_NDARRAY);
void PPLCUDANormalCVTFormat(hipStream_t stream, const void *input, void *output, ReFormatParam param)
{
#define RUN(mode) \
do { \
dim3 dimBlock(32, 1, 1); \
dim3 dimGrid(32, 1, 1); \
GenDimParam<mode>(param, dimBlock, dimGrid); \
switch (GetSizeOfDataType(param.out_type)) { \
case 1: \
hipLaunchKernelGGL(( cuda_kernel_cvtformat<char, mode>), dim3(dimGrid), dim3(dimBlock), 0, stream, \
(char *)input, (char *)output, param); \
break; \
case 2: \
hipLaunchKernelGGL(( cuda_kernel_cvtformat<half, mode>), dim3(dimGrid), dim3(dimBlock), 0, stream, \
(half *)input, (half *)output, param); \
break; \
case 4: \
hipLaunchKernelGGL(( cuda_kernel_cvtformat<float, mode>), dim3(dimGrid), dim3(dimBlock), 0, stream, \
(float *)input, (float *)output, param); \
break; \
case 8: \
hipLaunchKernelGGL(( cuda_kernel_cvtformat<double, mode>), dim3(dimGrid), dim3(dimBlock), 0, stream, \
(double *)input, (double *)output, param); \
break; \
default: \
break; \
} \
return; \
} while (0)
switch (GetCVTFormatMode(param)) {
RFNHWC8
RFN4CX
default:
return;
}
#undef RUN
}
void PPLCUDASmallChannelCVTFormat(hipStream_t stream, const void *input, void *output, ReFormatParam param)
{
#define RUN(mode) \
do { \
dim3 dimBlock(256, 1, 1); \
int num_elems = param.out_elems; \
dim3 dimGrid(DivUp(num_elems, 256), 1, 1); \
DivModFast inner_fast(param.n_inner); \
DivModFast src_pad_fast(param.src_pad); \
DivModFast dst_pad_fast(param.dst_pad); \
switch (GetSizeOfDataType(param.out_type)) { \
case 1: \
hipLaunchKernelGGL(( cuda_kernel_small_channel_cvtformat<char, mode>), dim3(dimGrid), dim3(dimBlock), 0, stream, \
(char *)input, num_elems, inner_fast, src_pad_fast, dst_pad_fast, \
(char *)output, param); \
break; \
case 2: \
hipLaunchKernelGGL(( cuda_kernel_small_channel_cvtformat<half, mode>), dim3(dimGrid), dim3(dimBlock), 0, stream, \
(half *)input, num_elems, inner_fast, src_pad_fast, dst_pad_fast, \
(half *)output, param); \
break; \
case 4: \
hipLaunchKernelGGL(( cuda_kernel_small_channel_cvtformat<float, mode>), dim3(dimGrid), dim3(dimBlock), 0, stream, \
(float *)input, num_elems, inner_fast, src_pad_fast, dst_pad_fast, \
(float *)output, param); \
break; \
case 8: \
hipLaunchKernelGGL(( cuda_kernel_small_channel_cvtformat<double, mode>), dim3(dimGrid), dim3(dimBlock), 0, stream, \
(double *)input, num_elems, inner_fast, src_pad_fast, dst_pad_fast, \
(double *)output, param); \
break; \
default: \
break; \
} \
return; \
} while (0)
switch (GetCVTFormatMode(param)) {
RFNHWC8
RFN4CX
default:
return;
}
#undef RUN
}
void PPLCUDACVTFormat(
hipStream_t stream,
const void* input,
void* output,
ReFormatParam param)
{
if (param.channel < LEASTCHANNEL) {
if (param.out_type == DATATYPE_INT8) {
auto host_in = new int8_t[param.in_elems];
auto host_out = new int8_t[param.out_elems];
hipMemcpy(host_in, input, param.in_elems, hipMemcpyDefault);
PPLCUDASmallChannelCVTFormat(stream, input, output, param);
hipMemcpy(host_out, output, param.out_elems, hipMemcpyDefault);
delete[] host_in;
delete[] host_out;
} else if (param.out_type == DATATYPE_FLOAT32) {
auto host_in = new float[param.in_elems];
auto host_out = new float[param.out_elems];
hipMemcpy(host_in, input, 4 * param.in_elems, hipMemcpyDefault);
PPLCUDASmallChannelCVTFormat(stream, input, output, param);
hipMemcpy(host_out, output, 4 * param.out_elems, hipMemcpyDefault);
delete[] host_in;
delete[] host_out;
} else {
PPLCUDASmallChannelCVTFormat(stream, input, output, param);
}
} else
{
PPLCUDANormalCVTFormat(stream, input, output, param);
}
}
CVTFormatMode GetCVTFormatMode(ReFormatParam param)
{
if (param.in_format == DATAFORMAT_NDARRAY) {
switch (param.out_format) {
case DATAFORMAT_NHWC8:
return NDARRAY_NHWC8;
case DATAFORMAT_N4CX:
return NDARRAY_N4CX;
default:
return CVTFormatUnknown;
}
} else if (param.in_format == DATAFORMAT_N4CX) {
switch (param.out_format) {
case DATAFORMAT_NDARRAY:
return N4CX_NDARRAY;
default:
return CVTFormatUnknown;
}
} else if (param.in_format == DATAFORMAT_NHWC8) {
switch (param.out_format) {
case DATAFORMAT_NDARRAY:
return NHWC8_NDARRAY;
default:
return CVTFormatUnknown;
}
} else {
return CVTFormatUnknown;
}
}
CVTTypeMode GetCVTTypeMode(ReFormatParam param)
{
if (param.in_type == DATATYPE_FLOAT32) {
switch (param.out_type) {
case DATATYPE_FLOAT16:
return FLOAT32_FLOAT16;
case DATATYPE_INT8:
return FLOAT32_INT8;
case DATATYPE_INT4B:
return FLOAT32_INT4B;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_FLOAT16) {
switch (param.out_type) {
case DATATYPE_FLOAT32:
return FLOAT16_FLOAT32;
case DATATYPE_INT8:
return FLOAT16_INT8;
case DATATYPE_INT4B:
return FLOAT16_INT4B;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_INT8) {
switch (param.out_type) {
case DATATYPE_FLOAT16:
return INT8_FLOAT16;
case DATATYPE_FLOAT32:
return INT8_FLOAT32;
case DATATYPE_INT4B:
return INT8_INT4B;
case DATATYPE_INT8:
return INT8_INT8;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_INT4B) {
switch (param.out_type) {
case DATATYPE_FLOAT16:
return INT4B_FLOAT16;
case DATATYPE_FLOAT32:
return INT4B_FLOAT32;
case DATATYPE_INT8:
return INT4B_INT8;
case DATATYPE_INT4B:
return INT4B_INT4B;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_INT32) {
switch (param.out_type) {
case DATATYPE_INT64:
return INT32_INT64;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_INT64) {
switch (param.out_type) {
case DATATYPE_INT32:
return INT64_INT32;
default:
return CVTTypeUnknown;
}
}
return CVTTypeUnknown;
}
bool IsFloatEqual(const std::vector<float>& a, const std::vector<float>& b) {
if (a.size() != b.size()) {
return false;
}
for (uint32_t i = 0; i < a.size(); i++) {
if (fabs(a[0] - b[0]) > FLT_EPSILON) {
return false;
}
}
return true;
}
bool EqualQuant(const ppl::nn::cuda::CudaTensorQuant& quant_a, const ppl::nn::cuda::CudaTensorQuant& quant_b) {
return quant_a.bit_width == quant_b.bit_width &&
IsFloatEqual(quant_a.scale, quant_b.scale) &&
IsFloatEqual(quant_a.zero_point, quant_b.zero_point);
}
ppl::common::RetCode SetReLayoutParam(
ReFormatParam *param,
const TensorShape& input,
const TensorShape& output)
{
param->n_outer = input.GetDim(0);
param->channel = input.GetDimCount() > 1 ? input.GetDim(1) : 1;
param->n_inner = input.GetDimCount() > 2 ? input.GetElementsFromDimensionIncludingPadding(2) : 1;
param->in_format = input.GetDataFormat();
param->out_format = output.GetDataFormat();
param->in_type = input.GetDataType();
param->out_type = output.GetDataType();
param->mix_type = (param->in_type != param->out_type);
param->mix_format = (param->in_format != param->out_format);
param->src_pad = Align(param->channel, AlignDataFormat(param->in_format));
param->dst_pad = Align(param->channel, AlignDataFormat(param->out_format));
param->out_elems = output.GetElementsIncludingPadding();
param->in_elems = input.GetElementsIncludingPadding();
return RC_SUCCESS;
}
ppl::common::RetCode SetReLayoutParam(
ReFormatParam *param,
const TensorShape& input,
const ppl::nn::cuda::CudaTensorQuant& input_quant,
const TensorShape& output,
const ppl::nn::cuda::CudaTensorQuant& output_quant)
{
SetReLayoutParam(param, input, output);
param->i_step = input_quant.scale[0];
param->i_zero_point = input_quant.zero_point[0];
param->o_step = output_quant.scale[0];
param->o_zero_point = output_quant.zero_point[0];
if (param->in_type == param->out_type) {
param->mix_type = !EqualQuant(input_quant, output_quant);
}
return RC_SUCCESS;
}
void PPLCUDADataConvert(
hipStream_t stream,
const void* input,
void* output,
void* tempBuf,
ReFormatParam& param)
{
if (param.in_format != param.out_format && param.in_type != param.out_type) {
PPLCUDACVTTypePerTensor(stream, input, tempBuf, param);
PPLCUDACVTFormat(stream, tempBuf, output, param);
return;
} else if (param.in_format != param.out_format) {
PPLCUDACVTFormat(stream, input, output, param);
return;
} else if (param.in_type != param.out_type) {
PPLCUDACVTTypePerTensor(stream, input, output, param);
return;
} else {
return;
}
}
| 95d2ad5c9dd96d61ac2afefe44c39d1984185c39.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <float.h>
#include "cudakernel/reformat/reformat.h"
#include "cudakernel/common/common.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/macro.h"
#include "cuda_fp16.h"
using namespace PPLCUDA;
using namespace ppl::nn;
using namespace ppl::common;
#define DIM 32
#define LEASTCHANNEL 16
template <typename T, CVTFormatMode mode>
__global__ void cuda_kernel_cvtformat(
T* input,
T* output,
ReFormatParam param)
{
}
#define cvtNCTONHWC8(type) \
template<> \
__global__ void cuda_kernel_cvtformat<type, NDARRAY_NHWC8>( \
type* input, \
type* output, \
ReFormatParam param) \
{ \
__shared__ type share_val[DIM][DIM + 1]; \
\
int64_t num = blockIdx.z; \
for (int n = num; n < param.n_outer; n+= blockDim.x) { \
int64_t idx_w = blockIdx.x * blockDim.x + threadIdx.x; \
int64_t idx_h = blockIdx.y * blockDim.y + threadIdx.y; \
\
if (idx_w < param.n_inner && idx_h < param.src_pad) { \
int64_t offset = n * param.src_pad * param.n_inner + idx_h * param.n_inner + idx_w; \
share_val[threadIdx.y][threadIdx.x] = input[offset]; \
} else { \
share_val[threadIdx.y][threadIdx.x] = (type)0; \
} \
__syncthreads(); \
\
idx_w = blockIdx.y * blockDim.y + threadIdx.x; \
idx_h = blockIdx.x * blockDim.x + threadIdx.y; \
\
if (idx_w < param.dst_pad && idx_h < param.n_inner) { \
int64_t offset = n * param.dst_pad * param.n_inner + idx_h * param.dst_pad + idx_w; \
output[offset] = share_val[threadIdx.x][threadIdx.y]; \
} \
} \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtNCTONHWC8(half)
#endif
cvtNCTONHWC8(float)
cvtNCTONHWC8(char)
cvtNCTONHWC8(double)
#define cvtNHWC8TONC(type) \
template<> \
__global__ void cuda_kernel_cvtformat<type, NHWC8_NDARRAY>( \
type* input, \
type* output, \
ReFormatParam param) \
{ \
__shared__ type share_val[DIM][DIM + 1]; \
\
int64_t num = blockIdx.z; \
for (int n = num; n < param.n_outer; n += blockDim.x) { \
int64_t idx_w = blockIdx.x * blockDim.x + threadIdx.x; \
int64_t idx_h = blockIdx.y * blockDim.y + threadIdx.y; \
\
if (idx_w < param.src_pad && idx_h < param.n_inner) { \
int64_t offset = n * param.src_pad * param.n_inner + idx_h * param.src_pad + idx_w; \
share_val[threadIdx.y][threadIdx.x] = input[offset]; \
} else { \
share_val[threadIdx.y][threadIdx.x] = (type)0; \
} \
__syncthreads(); \
\
idx_w = blockIdx.y * blockDim.y + threadIdx.x; \
idx_h = blockIdx.x * blockDim.x + threadIdx.y; \
\
if (idx_w < param.n_inner && idx_h < param.dst_pad) { \
int64_t offset = n * param.dst_pad * param.n_inner + idx_h * param.n_inner + idx_w; \
output[offset] = share_val[threadIdx.x][threadIdx.y]; \
} \
} \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtNHWC8TONC(half)
#endif
cvtNHWC8TONC(float)
cvtNHWC8TONC(char)
cvtNHWC8TONC(double)
#define cvtN4CXTONC(type) \
template <> \
__global__ void cuda_kernel_cvtformat<type, N4CX_NDARRAY>( \
type * input, \
type * output, \
ReFormatParam param) \
{ \
const uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= param.n_inner) \
return; \
const uint64_t inner_idx = tid; \
const uint64_t num_inner = blockIdx.z; \
const uint64_t c4_idx = blockIdx.y; \
_Pragma("unroll 4") for (int c_in_c4_idx = 0; c_in_c4_idx < 4; c_in_c4_idx++) \
{ \
const uint64_t c_idx = c4_idx * 4 + c_in_c4_idx; \
const uint64_t size = param.n_inner; \
const uint64_t padChannels = gridDim.y * 4; \
const uint64_t numChannels = param.channel; \
if (c_idx < numChannels) { \
const uint64_t offset = num_inner * padChannels * size + (c4_idx * size + inner_idx) * 4 + c_in_c4_idx; \
const uint64_t outOffset = num_inner * numChannels * size + c_idx * size + inner_idx; \
output[outOffset] = input[offset]; \
} \
} \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtN4CXTONC(half)
#endif
cvtN4CXTONC(float)
cvtN4CXTONC(char)
cvtN4CXTONC(double)
#define cvtNCTON4CX(type) \
template <> \
__global__ void cuda_kernel_cvtformat<type, NDARRAY_N4CX>( \
type * input, \
type * output, \
ReFormatParam param) \
{ \
const uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= param.n_inner) \
return; \
const uint64_t inner_idx = tid; \
const uint64_t num_inner = blockIdx.z; \
const uint64_t c4_idx = blockIdx.y; \
_Pragma("unroll 4") for (int c_in_c4_idx = 0; c_in_c4_idx < 4; c_in_c4_idx++) \
{ \
const uint64_t c_idx = c4_idx * 4 + c_in_c4_idx; \
const uint64_t size = param.n_inner; \
const uint64_t padChannels = gridDim.y * 4; \
const uint64_t numChannels = param.channel; \
if (c_idx < numChannels) { \
const uint64_t offset = num_inner * padChannels * size + (c4_idx * size + inner_idx) * 4 + c_in_c4_idx; \
const uint64_t inOffset = num_inner * numChannels * size + c_idx * size + inner_idx; \
output[offset] = input[inOffset]; \
} \
} \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtNCTON4CX(half)
#endif
cvtNCTON4CX(float)
cvtNCTON4CX(char)
cvtNCTON4CX(double)
template <typename T, CVTFormatMode mode>
__global__ void cuda_kernel_small_channel_cvtformat(
T* input,
int num_elems,
DivModFast inner_fast,
DivModFast src_pad_fast,
DivModFast dst_pad_fast,
T* output,
ReFormatParam param)
{
}
/*
// #define cvtSMCHANNELNCTONHWC8(type) \
// template<> \
// __global__ void cuda_kernel_small_channel_cvtformat<type, NDARRAY_NHWC8>( \
// type* input, \
// int64_t num_elems, \
// type* output, \
// ReFormatParam param) \
// { \
// int64_t tid = blockIdx.x * blockDim.x + threadIdx.x; \
// if (tid >= num_elems) return; \
// int c_idx = tid % param.dst_pad; \
// int inner_idx = (tid / param.dst_pad) % param.n_inner; \
// int outer_idx = tid / (param.dst_pad * param.n_inner); \
// int64_t offset = outer_idx * param.src_pad * param.n_inner + c_idx * param.n_inner + inner_idx; \
// output[tid] = c_idx > param.channel ? input[offset] : (type)0; \
// }
*/
#define cvtSMCHANNELNCTONHWC8(type) \
template<> \
__global__ void cuda_kernel_small_channel_cvtformat<type, NDARRAY_NHWC8>( \
type* input, \
int num_elems, \
DivModFast inner_fast, \
DivModFast src_pad_fast, \
DivModFast dst_pad_fast, \
type* output, \
ReFormatParam param) \
{ \
int tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= num_elems) return; \
int inner_idx = 0, num_inner = 0, c_idx = 0; \
dst_pad_fast.divmod(tid, num_inner, c_idx); \
inner_idx = inner_fast.mod(num_inner); \
int outer_idx = inner_fast.div(num_inner); \
int offset = outer_idx * param.src_pad * param.n_inner + c_idx * param.n_inner + inner_idx; \
output[tid] = c_idx < param.src_pad ? input[offset] : (type)0; \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtSMCHANNELNCTONHWC8(half)
#endif
cvtSMCHANNELNCTONHWC8(float)
cvtSMCHANNELNCTONHWC8(char)
cvtSMCHANNELNCTONHWC8(double)
#define cvtSMCHANNELNHWC8TONC(type) \
template<> \
__global__ void cuda_kernel_small_channel_cvtformat<type, NHWC8_NDARRAY>( \
type* input, \
int num_elems, \
DivModFast inner_fast, \
DivModFast src_pad_fast, \
DivModFast dst_pad_fast, \
type* output, \
ReFormatParam param) \
{ \
int tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= num_elems) return; \
int inner_idx = 0, num_inner = 0, c_idx = 0; \
inner_fast.divmod(tid, num_inner, inner_idx); \
c_idx = dst_pad_fast.mod(num_inner); \
int outer_idx = tid / (param.dst_pad * param.n_inner); \
int offset = outer_idx * param.src_pad * param.n_inner + c_idx + inner_idx * param.src_pad; \
output[tid] = input[offset]; \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtSMCHANNELNHWC8TONC(half)
#endif
cvtSMCHANNELNHWC8TONC(float)
cvtSMCHANNELNHWC8TONC(char)
cvtSMCHANNELNHWC8TONC(double)
#define cvtSMCHANNELN4CXTONC(type) \
template <> \
__global__ void cuda_kernel_small_channel_cvtformat<type, N4CX_NDARRAY>( \
type * input, \
int num_elems, \
DivModFast inner_fast, \
DivModFast src_pad_fast, \
DivModFast dst_pad_fast, \
type* output, \
ReFormatParam param) \
{ \
const int tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= num_elems) \
return; \
int inner_idx, num_inner, c_idx; \
inner_fast.divmod(tid, num_inner, inner_idx); \
src_pad_fast.divmod(num_inner, num_inner, c_idx); \
const int c4_idx = c_idx / 4; \
const int c_in_c4_idx = c_idx % 4; \
const uint64_t size = param.n_inner; \
const uint64_t padChannels = param.src_pad; \
const uint64_t numChannels = param.channel; \
const uint64_t offset = num_inner * padChannels * size + (c4_idx * size + inner_idx) * 4 + c_in_c4_idx; \
const uint64_t outOffset = num_inner * numChannels * size + c_idx * size + inner_idx; \
output[outOffset] = input[offset]; \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtSMCHANNELN4CXTONC(half)
#endif
cvtSMCHANNELN4CXTONC(float)
cvtSMCHANNELN4CXTONC(char)
cvtSMCHANNELN4CXTONC(double)
#define cvtSMCHANNELNCTON4CX(type) \
template <> \
__global__ void cuda_kernel_small_channel_cvtformat<type, NDARRAY_N4CX>( \
type * input, \
int num_elems, \
DivModFast inner_fast, \
DivModFast src_pad_fast, \
DivModFast dst_pad_fast, \
type* output, \
ReFormatParam param) \
{ \
const int tid = blockIdx.x * blockDim.x + threadIdx.x; \
if (tid >= num_elems) \
return; \
int inner_idx, num_inner, c_idx; \
inner_fast.divmod(tid, num_inner, inner_idx); \
src_pad_fast.divmod(num_inner, num_inner, c_idx); \
const int c4_idx = c_idx / 4; \
const int c_in_c4_idx = c_idx % 4; \
const uint64_t size = param.n_inner; \
const uint64_t padChannels = param.dst_pad; \
const uint64_t numChannels = param.channel; \
const uint64_t offset = num_inner * padChannels * size + (c4_idx * size + inner_idx) * 4 + c_in_c4_idx; \
const uint64_t inOffset = num_inner * numChannels * size + c_idx * size + inner_idx; \
output[offset] = input[inOffset]; \
}
#if __CUDACC_VER_MAJOR__ >= 9
cvtSMCHANNELNCTON4CX(half)
#endif
cvtSMCHANNELNCTON4CX(float)
cvtSMCHANNELNCTON4CX(char)
cvtSMCHANNELNCTON4CX(double)
#define MAX_DIM 65533
template<CVTFormatMode mode>
void GenDimParam(
ReFormatParam param,
dim3& dimBlock,
dim3& dimGrid)
{
dimGrid.z = param.n_outer >= MAX_DIM ? MAX_DIM : param.n_outer;
if (mode == NHWC8_NDARRAY) {
dimBlock.x = DIM;
dimBlock.y = DIM;
dimGrid.x = DivUp(param.src_pad, DIM);
dimGrid.y = DivUp(param.n_inner, DIM);
} else if (mode == NDARRAY_NHWC8) {
dimBlock.x = DIM;
dimBlock.y = DIM;
dimGrid.x = DivUp(param.n_inner, DIM);
dimGrid.y = DivUp(param.dst_pad, DIM);
} else if (mode == N4CX_NDARRAY) {
dimBlock.x = DIM;
dimBlock.y = 1;
dimGrid.x = DivUp(param.n_inner, DIM);
dimGrid.y = param.src_pad / 4;
} else if (mode == NDARRAY_N4CX) {
dimBlock.x = DIM;
dimBlock.y = 1;
dimGrid.x = DivUp(param.n_inner, DIM);
dimGrid.y = param.dst_pad / 4;
} else {
}
}
#define RFNHWC8 \
case NDARRAY_NHWC8: \
RUN(NDARRAY_NHWC8); \
case NHWC8_NDARRAY: \
RUN(NHWC8_NDARRAY);
#define RFN4CX \
case NDARRAY_N4CX: \
RUN(NDARRAY_N4CX); \
case N4CX_NDARRAY: \
RUN(N4CX_NDARRAY);
void PPLCUDANormalCVTFormat(cudaStream_t stream, const void *input, void *output, ReFormatParam param)
{
#define RUN(mode) \
do { \
dim3 dimBlock(32, 1, 1); \
dim3 dimGrid(32, 1, 1); \
GenDimParam<mode>(param, dimBlock, dimGrid); \
switch (GetSizeOfDataType(param.out_type)) { \
case 1: \
cuda_kernel_cvtformat<char, mode><<<dimGrid, dimBlock, 0, stream>>>( \
(char *)input, (char *)output, param); \
break; \
case 2: \
cuda_kernel_cvtformat<half, mode><<<dimGrid, dimBlock, 0, stream>>>( \
(half *)input, (half *)output, param); \
break; \
case 4: \
cuda_kernel_cvtformat<float, mode><<<dimGrid, dimBlock, 0, stream>>>( \
(float *)input, (float *)output, param); \
break; \
case 8: \
cuda_kernel_cvtformat<double, mode><<<dimGrid, dimBlock, 0, stream>>>(\
(double *)input, (double *)output, param); \
break; \
default: \
break; \
} \
return; \
} while (0)
switch (GetCVTFormatMode(param)) {
RFNHWC8
RFN4CX
default:
return;
}
#undef RUN
}
void PPLCUDASmallChannelCVTFormat(cudaStream_t stream, const void *input, void *output, ReFormatParam param)
{
#define RUN(mode) \
do { \
dim3 dimBlock(256, 1, 1); \
int num_elems = param.out_elems; \
dim3 dimGrid(DivUp(num_elems, 256), 1, 1); \
DivModFast inner_fast(param.n_inner); \
DivModFast src_pad_fast(param.src_pad); \
DivModFast dst_pad_fast(param.dst_pad); \
switch (GetSizeOfDataType(param.out_type)) { \
case 1: \
cuda_kernel_small_channel_cvtformat<char, mode><<<dimGrid, dimBlock, 0, stream>>>( \
(char *)input, num_elems, inner_fast, src_pad_fast, dst_pad_fast, \
(char *)output, param); \
break; \
case 2: \
cuda_kernel_small_channel_cvtformat<half, mode><<<dimGrid, dimBlock, 0, stream>>>( \
(half *)input, num_elems, inner_fast, src_pad_fast, dst_pad_fast, \
(half *)output, param); \
break; \
case 4: \
cuda_kernel_small_channel_cvtformat<float, mode><<<dimGrid, dimBlock, 0, stream>>>( \
(float *)input, num_elems, inner_fast, src_pad_fast, dst_pad_fast, \
(float *)output, param); \
break; \
case 8: \
cuda_kernel_small_channel_cvtformat<double, mode><<<dimGrid, dimBlock, 0, stream>>>( \
(double *)input, num_elems, inner_fast, src_pad_fast, dst_pad_fast, \
(double *)output, param); \
break; \
default: \
break; \
} \
return; \
} while (0)
switch (GetCVTFormatMode(param)) {
RFNHWC8
RFN4CX
default:
return;
}
#undef RUN
}
void PPLCUDACVTFormat(
cudaStream_t stream,
const void* input,
void* output,
ReFormatParam param)
{
if (param.channel < LEASTCHANNEL) {
if (param.out_type == DATATYPE_INT8) {
auto host_in = new int8_t[param.in_elems];
auto host_out = new int8_t[param.out_elems];
cudaMemcpy(host_in, input, param.in_elems, cudaMemcpyDefault);
PPLCUDASmallChannelCVTFormat(stream, input, output, param);
cudaMemcpy(host_out, output, param.out_elems, cudaMemcpyDefault);
delete[] host_in;
delete[] host_out;
} else if (param.out_type == DATATYPE_FLOAT32) {
auto host_in = new float[param.in_elems];
auto host_out = new float[param.out_elems];
cudaMemcpy(host_in, input, 4 * param.in_elems, cudaMemcpyDefault);
PPLCUDASmallChannelCVTFormat(stream, input, output, param);
cudaMemcpy(host_out, output, 4 * param.out_elems, cudaMemcpyDefault);
delete[] host_in;
delete[] host_out;
} else {
PPLCUDASmallChannelCVTFormat(stream, input, output, param);
}
} else
{
PPLCUDANormalCVTFormat(stream, input, output, param);
}
}
CVTFormatMode GetCVTFormatMode(ReFormatParam param)
{
if (param.in_format == DATAFORMAT_NDARRAY) {
switch (param.out_format) {
case DATAFORMAT_NHWC8:
return NDARRAY_NHWC8;
case DATAFORMAT_N4CX:
return NDARRAY_N4CX;
default:
return CVTFormatUnknown;
}
} else if (param.in_format == DATAFORMAT_N4CX) {
switch (param.out_format) {
case DATAFORMAT_NDARRAY:
return N4CX_NDARRAY;
default:
return CVTFormatUnknown;
}
} else if (param.in_format == DATAFORMAT_NHWC8) {
switch (param.out_format) {
case DATAFORMAT_NDARRAY:
return NHWC8_NDARRAY;
default:
return CVTFormatUnknown;
}
} else {
return CVTFormatUnknown;
}
}
CVTTypeMode GetCVTTypeMode(ReFormatParam param)
{
if (param.in_type == DATATYPE_FLOAT32) {
switch (param.out_type) {
case DATATYPE_FLOAT16:
return FLOAT32_FLOAT16;
case DATATYPE_INT8:
return FLOAT32_INT8;
case DATATYPE_INT4B:
return FLOAT32_INT4B;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_FLOAT16) {
switch (param.out_type) {
case DATATYPE_FLOAT32:
return FLOAT16_FLOAT32;
case DATATYPE_INT8:
return FLOAT16_INT8;
case DATATYPE_INT4B:
return FLOAT16_INT4B;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_INT8) {
switch (param.out_type) {
case DATATYPE_FLOAT16:
return INT8_FLOAT16;
case DATATYPE_FLOAT32:
return INT8_FLOAT32;
case DATATYPE_INT4B:
return INT8_INT4B;
case DATATYPE_INT8:
return INT8_INT8;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_INT4B) {
switch (param.out_type) {
case DATATYPE_FLOAT16:
return INT4B_FLOAT16;
case DATATYPE_FLOAT32:
return INT4B_FLOAT32;
case DATATYPE_INT8:
return INT4B_INT8;
case DATATYPE_INT4B:
return INT4B_INT4B;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_INT32) {
switch (param.out_type) {
case DATATYPE_INT64:
return INT32_INT64;
default:
return CVTTypeUnknown;
}
}
if (param.in_type == DATATYPE_INT64) {
switch (param.out_type) {
case DATATYPE_INT32:
return INT64_INT32;
default:
return CVTTypeUnknown;
}
}
return CVTTypeUnknown;
}
bool IsFloatEqual(const std::vector<float>& a, const std::vector<float>& b) {
if (a.size() != b.size()) {
return false;
}
for (uint32_t i = 0; i < a.size(); i++) {
if (fabs(a[0] - b[0]) > FLT_EPSILON) {
return false;
}
}
return true;
}
bool EqualQuant(const ppl::nn::cuda::CudaTensorQuant& quant_a, const ppl::nn::cuda::CudaTensorQuant& quant_b) {
return quant_a.bit_width == quant_b.bit_width &&
IsFloatEqual(quant_a.scale, quant_b.scale) &&
IsFloatEqual(quant_a.zero_point, quant_b.zero_point);
}
ppl::common::RetCode SetReLayoutParam(
ReFormatParam *param,
const TensorShape& input,
const TensorShape& output)
{
param->n_outer = input.GetDim(0);
param->channel = input.GetDimCount() > 1 ? input.GetDim(1) : 1;
param->n_inner = input.GetDimCount() > 2 ? input.GetElementsFromDimensionIncludingPadding(2) : 1;
param->in_format = input.GetDataFormat();
param->out_format = output.GetDataFormat();
param->in_type = input.GetDataType();
param->out_type = output.GetDataType();
param->mix_type = (param->in_type != param->out_type);
param->mix_format = (param->in_format != param->out_format);
param->src_pad = Align(param->channel, AlignDataFormat(param->in_format));
param->dst_pad = Align(param->channel, AlignDataFormat(param->out_format));
param->out_elems = output.GetElementsIncludingPadding();
param->in_elems = input.GetElementsIncludingPadding();
return RC_SUCCESS;
}
ppl::common::RetCode SetReLayoutParam(
ReFormatParam *param,
const TensorShape& input,
const ppl::nn::cuda::CudaTensorQuant& input_quant,
const TensorShape& output,
const ppl::nn::cuda::CudaTensorQuant& output_quant)
{
SetReLayoutParam(param, input, output);
param->i_step = input_quant.scale[0];
param->i_zero_point = input_quant.zero_point[0];
param->o_step = output_quant.scale[0];
param->o_zero_point = output_quant.zero_point[0];
if (param->in_type == param->out_type) {
param->mix_type = !EqualQuant(input_quant, output_quant);
}
return RC_SUCCESS;
}
void PPLCUDADataConvert(
cudaStream_t stream,
const void* input,
void* output,
void* tempBuf,
ReFormatParam& param)
{
if (param.in_format != param.out_format && param.in_type != param.out_type) {
PPLCUDACVTTypePerTensor(stream, input, tempBuf, param);
PPLCUDACVTFormat(stream, tempBuf, output, param);
return;
} else if (param.in_format != param.out_format) {
PPLCUDACVTFormat(stream, input, output, param);
return;
} else if (param.in_type != param.out_type) {
PPLCUDACVTTypePerTensor(stream, input, output, param);
return;
} else {
return;
}
}
|
57ffc9aeadfd1f9dad832b56f245b522c7b5e45a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**************************************************************************************************************************************************
| HBFT algorithm without Dynamic Programming in CUDA
| Author : Dinali Rosemin Dabarera
| University of Peradeniya (EFac 2016) All Rights Reserved
|*************************************************************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "helpers.cuh"
/*
* Edge structure.
*/
struct Edge {
int from ;
int to;
};
/*
* CPU function to check level array.
*/
int isLevelFilled(int * level, int * vertices) {
int i;
for(i=0; i<*vertices; i++) {
if(level[i]==-1) {
return 1;
}
}
return 0;
}
/*
* GPU Kernel to update the level array of each vertex.
*/
__global__ void BreadthFirstSearch( struct Edge * adjacencyList, int * vertices, int * level, int * edges ) {
int tid = (blockDim.x * blockIdx.x ) + threadIdx.x;
if(tid<*edges) {
struct Edge element = adjacencyList[tid];
if (level[element.from]>=0 and level[element.to]==-1) {
level[element.to] = level[element.from]+1;
}
}
}
/*
* Main Program starts here.
*/
int main(int arg,char** args) {
hipEvent_t start,stop;
float elapsedtime;
hipEventCreate(&start);
hipEventRecord(start,0);
/*
* Select the GPU card: For Dynamic programing: GPU over 3.5 architecture
*/
int device =0;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
hipSetDevice(device);
fprintf(stderr,"Device name: %s\n", prop.name);
int i;
int v1,v2;
int count=1;
int finalLevel;
/*
* Host variables
*/
int * Hvertices=(int *) malloc(sizeof(int));
int * Hedges=(int *) malloc(sizeof(int));
int * HstartArrayCount = (int *) malloc(sizeof(int));
/*
* Read data from Graph file
*/
FILE* fileNew = fopen(args[1], "r");
fscanf(fileNew, "%d ",&finalLevel);
fscanf(fileNew, "%d %d %d",Hvertices, Hvertices, Hedges);
int * Hlevel= (int *)malloc(sizeof(int)*(*Hvertices));
struct Edge * HedgeList =(struct Edge * )malloc(sizeof(struct Edge)*(*Hedges));
for (i = 0; i < *Hvertices; ++i) {
Hlevel[i] = -1;
}
int val;
for (i = 0; i < *Hedges; ++i) {
fscanf(fileNew, "%d %d %d",&v1, &v2, &val);
// Adding edge v1 --> v2
HedgeList[i].from = v1;
HedgeList[i].to = v2;
}
/*
* Read data from Input vertex file
*/
FILE * vectorFile= fopen(args[2],"r");
fscanf(vectorFile,"%d",HstartArrayCount);
int tempVal;
for(i=0; i<*HstartArrayCount; i++) {
fscanf(vectorFile,"%d",&tempVal);
Hlevel[tempVal]=0;
}
/*
* Device variables
*/
int * Dvertices;
int * Dedges;
int * DstartArrayCount ;
int * Dlevel;
struct Edge * DedgeList ;
/*
* Allocate memory on Device
*/
checkCuda(hipMalloc((void **)&Dvertices,sizeof(int)));
checkCuda(hipMalloc((void **)&Dedges,sizeof(int)));
checkCuda(hipMalloc((void **)&DstartArrayCount,sizeof(int)));
checkCuda(hipMalloc((void **)&Dlevel,sizeof(int)*(*Hvertices)));
checkCuda(hipMalloc((void **)&DedgeList,sizeof(struct Edge)* (*Hedges)));
/*
* Copy data from Host to Device
*/
checkCuda(hipMemcpyAsync(Dvertices,Hvertices,sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpyAsync(Dedges,Hedges,sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpyAsync(DstartArrayCount,HstartArrayCount,sizeof(int),hipMemcpyHostToDevice));
checkCuda(hipMemcpyAsync(Dlevel,Hlevel,sizeof(int)*(*Hvertices),hipMemcpyHostToDevice));
checkCuda(hipMemcpyAsync(DedgeList,HedgeList,sizeof(struct Edge)*(*Hedges),hipMemcpyHostToDevice));
/*
* Itterative Kernel call
*/
while(isLevelFilled(Hlevel,Hvertices)) {
hipLaunchKernelGGL(( BreadthFirstSearch), dim3(ceil(*Hedges/256.0)),dim3(256), 0, 0, DedgeList,Dvertices,Dlevel,Dedges);
hipDeviceSynchronize();
checkCudaError();
count ++;
checkCuda(hipMemcpy(Hlevel,Dlevel,sizeof(int)*(*Hvertices),hipMemcpyDeviceToHost));
}
/*
* Copy Memory back from Device to Host
*/
checkCuda(hipMemcpy(Hlevel,Dlevel,sizeof(int)*(*Hvertices),hipMemcpyDeviceToHost));
/*
* Free memory on the Device
*/
hipFree(Dvertices);
hipFree(Dedges);
hipFree(DstartArrayCount );
hipFree(Dlevel);
hipFree(DedgeList);
/*
* Stop the Clock
*/
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedtime,start,stop);
printf("%d, %d, %.8f \n",*Hvertices, *Hedges,elapsedtime/(float)1000);
/*
* Print vertices according to the level order
*/
printf("\nLevel and Parent Arrays -\n");
for (i = 0; i < *Hvertices; ++i) {
printf("Level of Vertex %d is %d\n",
i, Hlevel[i]);
}
printf("vertices in level order when traversing :\n");
int b;
for(b=0;b<=count;b++){
for (i = 0; i < *Hvertices; ++i) {
if(Hlevel[i]==b){
printf("%d ,", i);
}
}
printf(" | ");
}
/*
* Free Host memory
*/
free(Hvertices);
free(Hedges);
free(HstartArrayCount );
free(Hlevel);
free(HedgeList);
return 0;
}
| 57ffc9aeadfd1f9dad832b56f245b522c7b5e45a.cu | /**************************************************************************************************************************************************
| HBFT algorithm without Dynamic Programming in CUDA
| Author : Dinali Rosemin Dabarera
| University of Peradeniya (EFac 2016) All Rights Reserved
|*************************************************************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "helpers.cuh"
/*
* Edge structure.
*/
struct Edge {
int from ;
int to;
};
/*
* CPU function to check level array.
*/
int isLevelFilled(int * level, int * vertices) {
int i;
for(i=0; i<*vertices; i++) {
if(level[i]==-1) {
return 1;
}
}
return 0;
}
/*
* GPU Kernel to update the level array of each vertex.
*/
__global__ void BreadthFirstSearch( struct Edge * adjacencyList, int * vertices, int * level, int * edges ) {
int tid = (blockDim.x * blockIdx.x ) + threadIdx.x;
if(tid<*edges) {
struct Edge element = adjacencyList[tid];
if (level[element.from]>=0 and level[element.to]==-1) {
level[element.to] = level[element.from]+1;
}
}
}
/*
* Main Program starts here.
*/
int main(int arg,char** args) {
cudaEvent_t start,stop;
float elapsedtime;
cudaEventCreate(&start);
cudaEventRecord(start,0);
/*
* Select the GPU card: For Dynamic programing: GPU over 3.5 architecture
*/
int device =0;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
cudaSetDevice(device);
fprintf(stderr,"Device name: %s\n", prop.name);
int i;
int v1,v2;
int count=1;
int finalLevel;
/*
* Host variables
*/
int * Hvertices=(int *) malloc(sizeof(int));
int * Hedges=(int *) malloc(sizeof(int));
int * HstartArrayCount = (int *) malloc(sizeof(int));
/*
* Read data from Graph file
*/
FILE* fileNew = fopen(args[1], "r");
fscanf(fileNew, "%d ",&finalLevel);
fscanf(fileNew, "%d %d %d",Hvertices, Hvertices, Hedges);
int * Hlevel= (int *)malloc(sizeof(int)*(*Hvertices));
struct Edge * HedgeList =(struct Edge * )malloc(sizeof(struct Edge)*(*Hedges));
for (i = 0; i < *Hvertices; ++i) {
Hlevel[i] = -1;
}
int val;
for (i = 0; i < *Hedges; ++i) {
fscanf(fileNew, "%d %d %d",&v1, &v2, &val);
// Adding edge v1 --> v2
HedgeList[i].from = v1;
HedgeList[i].to = v2;
}
/*
* Read data from Input vertex file
*/
FILE * vectorFile= fopen(args[2],"r");
fscanf(vectorFile,"%d",HstartArrayCount);
int tempVal;
for(i=0; i<*HstartArrayCount; i++) {
fscanf(vectorFile,"%d",&tempVal);
Hlevel[tempVal]=0;
}
/*
* Device variables
*/
int * Dvertices;
int * Dedges;
int * DstartArrayCount ;
int * Dlevel;
struct Edge * DedgeList ;
/*
* Allocate memory on Device
*/
checkCuda(cudaMalloc((void **)&Dvertices,sizeof(int)));
checkCuda(cudaMalloc((void **)&Dedges,sizeof(int)));
checkCuda(cudaMalloc((void **)&DstartArrayCount,sizeof(int)));
checkCuda(cudaMalloc((void **)&Dlevel,sizeof(int)*(*Hvertices)));
checkCuda(cudaMalloc((void **)&DedgeList,sizeof(struct Edge)* (*Hedges)));
/*
* Copy data from Host to Device
*/
checkCuda(cudaMemcpyAsync(Dvertices,Hvertices,sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyAsync(Dedges,Hedges,sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyAsync(DstartArrayCount,HstartArrayCount,sizeof(int),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyAsync(Dlevel,Hlevel,sizeof(int)*(*Hvertices),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyAsync(DedgeList,HedgeList,sizeof(struct Edge)*(*Hedges),cudaMemcpyHostToDevice));
/*
* Itterative Kernel call
*/
while(isLevelFilled(Hlevel,Hvertices)) {
BreadthFirstSearch<<<ceil(*Hedges/256.0),256>>>(DedgeList,Dvertices,Dlevel,Dedges);
cudaDeviceSynchronize();
checkCudaError();
count ++;
checkCuda(cudaMemcpy(Hlevel,Dlevel,sizeof(int)*(*Hvertices),cudaMemcpyDeviceToHost));
}
/*
* Copy Memory back from Device to Host
*/
checkCuda(cudaMemcpy(Hlevel,Dlevel,sizeof(int)*(*Hvertices),cudaMemcpyDeviceToHost));
/*
* Free memory on the Device
*/
cudaFree(Dvertices);
cudaFree(Dedges);
cudaFree(DstartArrayCount );
cudaFree(Dlevel);
cudaFree(DedgeList);
/*
* Stop the Clock
*/
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedtime,start,stop);
printf("%d, %d, %.8f \n",*Hvertices, *Hedges,elapsedtime/(float)1000);
/*
* Print vertices according to the level order
*/
printf("\nLevel and Parent Arrays -\n");
for (i = 0; i < *Hvertices; ++i) {
printf("Level of Vertex %d is %d\n",
i, Hlevel[i]);
}
printf("vertices in level order when traversing :\n");
int b;
for(b=0;b<=count;b++){
for (i = 0; i < *Hvertices; ++i) {
if(Hlevel[i]==b){
printf("%d ,", i);
}
}
printf(" | ");
}
/*
* Free Host memory
*/
free(Hvertices);
free(Hedges);
free(HstartArrayCount );
free(Hlevel);
free(HedgeList);
return 0;
}
|
d607dffd119353f06f6cfc9fc70ad5687641d265.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ReconstructionTest_gpu.cu
*
* Created on: Jul 11, 2014
* Author: tombr
*/
#include "ReconstructionTest.h"
#include <tbblas/deeplearn/conv_dbn.hpp>
#include <tbblas/rearrange.hpp>
#include <tbblas/dot.hpp>
#include <tbblas/math.hpp>
#include <tbblas/util.hpp>
#include <tbblas/new_context.hpp>
#include <tbblas/change_stream.hpp>
#include <boost/thread/thread.hpp>
#include <omp.h>
#include <iostream>
namespace gml {
namespace dbn {
ReconstructionTestChecker::ReconstructionTestChecker() {
ReconstructionTest test;
test.initializeClass();
CHECK_MEMORY_LAYOUT2(Model, test);
CHECK_MEMORY_LAYOUT2(Dataset, test);
CHECK_MEMORY_LAYOUT2(Type, test);
CHECK_MEMORY_LAYOUT2(MaxLayer, test);
CHECK_MEMORY_LAYOUT2(GpuCount, test);
CHECK_MEMORY_LAYOUT2(FilterBatchLength, test);
CHECK_MEMORY_LAYOUT2(Reconstructions, test);
CHECK_MEMORY_LAYOUT2(ReconstructionError, test);
}
void ReconstructionTest::update(IProgressMonitor* monitor) const {
using namespace tbblas;
using namespace tbblas::deeplearn;
typedef dbn_t::value_t value_t;
const unsigned dimCount = dbn_t::dimCount;
typedef tensor<value_t, dimCount, true> tensor_t;
typedef tensor_t::dim_t dim_t;
v_host_tensor_t& dataset = *getDataset();
boost::shared_ptr<v_host_tensor_t> reconstructions(new v_host_tensor_t(dataset.size()));
omp_set_num_threads(getGpuCount());
value_t totalError = 0;
#pragma omp parallel
{
size_t tid = omp_get_thread_num();
hipSetDevice(tid);
new_context context;
hipStream_t copyStream;
hipStreamCreate(©Stream);
tbblas::deeplearn::conv_dbn<value_t, dimCount> dbn(*getModel());
for (size_t i = 0; i < getModel()->crbms().size() && i < getFilterBatchLength().size(); ++i)
dbn.set_batch_length(i, getFilterBatchLength()[i]);
tensor_t v1, v2;
value_t error = 0;
tensor_t vtemp;
if (dataset.size() > tid)
vtemp = *dataset[tid];
for (size_t i = tid; i < dataset.size(); i += getGpuCount()) {
v1 = vtemp;
tbblas::synchronize();
if (i + getGpuCount() < dataset.size()) {
change_stream context(copyStream);
vtemp = *dataset[i + getGpuCount()];
}
dbn.cvisibles() = v1;
dbn.normalize_visibles();
dbn.infer_hiddens(getMaxLayer());
dbn.infer_visibles(getMaxLayer());
dbn.diversify_visibles();
hipStreamSynchronize(copyStream);
v2 = dbn.cvisibles();
tbblas::synchronize();
switch (getType()) {
case TestType::Reconstruct:
{
change_stream context(copyStream);
reconstructions->at(i) = boost::make_shared<host_tensor_t>(v2);
}
break;
case TestType::CalculateMSE:
error += dot(v1 - v2, v1 - v2) / v1.count();
break;
case TestType::CalculateRMSE:
error += sqrt(dot(v1 - v2, v1 - v2) / v1.count());
break;
case TestType::CalculateRRMSE:
error += sqrt(dot(v1 - v2, v1 - v2) / v1.count()) / (sum(v1) / v1.count());
break;
}
#pragma omp master
if (monitor)
monitor->reportProgress((double)(i+1) / (double)dataset.size() * 100.0);
}
hipStreamSynchronize(copyStream);
hipStreamDestroy(copyStream);
#pragma omp critical
totalError += error;
#pragma omp barrier
}
switch (getType()) {
case TestType::Reconstruct:
newState->setReconstructions(reconstructions);
break;
case TestType::CalculateMSE:
case TestType::CalculateRMSE:
case TestType::CalculateRRMSE:
newState->setReconstructionError(totalError / dataset.size());
break;
}
}
}
}
| d607dffd119353f06f6cfc9fc70ad5687641d265.cu | /*
* ReconstructionTest_gpu.cu
*
* Created on: Jul 11, 2014
* Author: tombr
*/
#include "ReconstructionTest.h"
#include <tbblas/deeplearn/conv_dbn.hpp>
#include <tbblas/rearrange.hpp>
#include <tbblas/dot.hpp>
#include <tbblas/math.hpp>
#include <tbblas/util.hpp>
#include <tbblas/new_context.hpp>
#include <tbblas/change_stream.hpp>
#include <boost/thread/thread.hpp>
#include <omp.h>
#include <iostream>
namespace gml {
namespace dbn {
ReconstructionTestChecker::ReconstructionTestChecker() {
ReconstructionTest test;
test.initializeClass();
CHECK_MEMORY_LAYOUT2(Model, test);
CHECK_MEMORY_LAYOUT2(Dataset, test);
CHECK_MEMORY_LAYOUT2(Type, test);
CHECK_MEMORY_LAYOUT2(MaxLayer, test);
CHECK_MEMORY_LAYOUT2(GpuCount, test);
CHECK_MEMORY_LAYOUT2(FilterBatchLength, test);
CHECK_MEMORY_LAYOUT2(Reconstructions, test);
CHECK_MEMORY_LAYOUT2(ReconstructionError, test);
}
void ReconstructionTest::update(IProgressMonitor* monitor) const {
using namespace tbblas;
using namespace tbblas::deeplearn;
typedef dbn_t::value_t value_t;
const unsigned dimCount = dbn_t::dimCount;
typedef tensor<value_t, dimCount, true> tensor_t;
typedef tensor_t::dim_t dim_t;
v_host_tensor_t& dataset = *getDataset();
boost::shared_ptr<v_host_tensor_t> reconstructions(new v_host_tensor_t(dataset.size()));
omp_set_num_threads(getGpuCount());
value_t totalError = 0;
#pragma omp parallel
{
size_t tid = omp_get_thread_num();
cudaSetDevice(tid);
new_context context;
cudaStream_t copyStream;
cudaStreamCreate(©Stream);
tbblas::deeplearn::conv_dbn<value_t, dimCount> dbn(*getModel());
for (size_t i = 0; i < getModel()->crbms().size() && i < getFilterBatchLength().size(); ++i)
dbn.set_batch_length(i, getFilterBatchLength()[i]);
tensor_t v1, v2;
value_t error = 0;
tensor_t vtemp;
if (dataset.size() > tid)
vtemp = *dataset[tid];
for (size_t i = tid; i < dataset.size(); i += getGpuCount()) {
v1 = vtemp;
tbblas::synchronize();
if (i + getGpuCount() < dataset.size()) {
change_stream context(copyStream);
vtemp = *dataset[i + getGpuCount()];
}
dbn.cvisibles() = v1;
dbn.normalize_visibles();
dbn.infer_hiddens(getMaxLayer());
dbn.infer_visibles(getMaxLayer());
dbn.diversify_visibles();
cudaStreamSynchronize(copyStream);
v2 = dbn.cvisibles();
tbblas::synchronize();
switch (getType()) {
case TestType::Reconstruct:
{
change_stream context(copyStream);
reconstructions->at(i) = boost::make_shared<host_tensor_t>(v2);
}
break;
case TestType::CalculateMSE:
error += dot(v1 - v2, v1 - v2) / v1.count();
break;
case TestType::CalculateRMSE:
error += sqrt(dot(v1 - v2, v1 - v2) / v1.count());
break;
case TestType::CalculateRRMSE:
error += sqrt(dot(v1 - v2, v1 - v2) / v1.count()) / (sum(v1) / v1.count());
break;
}
#pragma omp master
if (monitor)
monitor->reportProgress((double)(i+1) / (double)dataset.size() * 100.0);
}
cudaStreamSynchronize(copyStream);
cudaStreamDestroy(copyStream);
#pragma omp critical
totalError += error;
#pragma omp barrier
}
switch (getType()) {
case TestType::Reconstruct:
newState->setReconstructions(reconstructions);
break;
case TestType::CalculateMSE:
case TestType::CalculateRMSE:
case TestType::CalculateRRMSE:
newState->setReconstructionError(totalError / dataset.size());
break;
}
}
}
}
|
cc951c41268c161f847789637ef7a7903faac298.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "common/inc/helper_functions.h"
#include "common/inc/helper_cuda.h"
typedef float2 Complex;
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
namespace caffe {
void fft2_second(hipfftHandle forward_plan, float* d_in, float2* d_freq)
{
hipfftExecR2C(forward_plan, d_in, d_freq);
}
void ifft2_second(hipfftHandle inverse_plan, float2* d_freq, float* d_out)
{
hipfftExecC2R(inverse_plan, d_freq, d_out);
}
template <typename Dtype>
__global__ void ifftshift_second(const int n, int num_per_channel, Dtype* L_mask, Dtype* input_real, Dtype* input_imag, float2* output, int row_num, int col_num,int num_per_channel1) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>0)
{int ori_index=L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=input_imag[ori_index];
}
else
{ int ori_index=-L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=-input_imag[ori_index];
}
}
}
template <typename Dtype>
__global__ void fftshift_second(const int n, int num_per_channel1, Dtype* L_mask, float2* input, Dtype* output_real, Dtype* output_imag) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>-0.5)
{
int ori_index=L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=input[ori_index].y;
// output_real[index]=ori_index;
// output_imag[index]=ori_index;
}
else
{
int ori_index=-L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=-input[ori_index].y;//
//output_real[index]=ori_index;
//output_imag[index]=-ori_index;//
}
}
}
__global__ void scale_out_real_second(const int n, float* input, float scale_factor) {
CUDA_KERNEL_LOOP(index, n) {
input[index]=input[index]/scale_factor;
}
}
template <typename Dtype>
__global__ void add_mask_second(const int n, int num_per_channel, Dtype* mask, float* input, float * output) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel;
int current_index=index%num_per_channel;
output[index]=input[index]*mask[current_index];
}
}
template <typename Dtype>
__global__ void add_reg_mask(const int n, int num_per_channel, Dtype* mask, Dtype* input, Dtype * output) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel;
int current_index=index%num_per_channel;
output[index]=input[index]*mask[current_index];
}
}
void fft2(hipfftHandle forward_plan, float* d_in, float2* d_freq)
{
hipfftExecR2C(forward_plan, d_in, d_freq);
}
void ifft2(hipfftHandle inverse_plan, float2* d_freq, float* d_out)
{
hipfftExecC2R(inverse_plan, d_freq, d_out);
}
__global__ void copy_memory_to_blob(const int n, float2* mem1, float* tmp1, float* tmp2) {
CUDA_KERNEL_LOOP(index, n) {
}
}
__global__ void copy_memory_from_blob(const int n, float2* mem1, float* tmp1, float* tmp2) {
CUDA_KERNEL_LOOP(index, n) {
}
}
template <typename Dtype>
__global__ void set_zeros(const int n, Dtype* in_out) {
CUDA_KERNEL_LOOP(index, n) {
in_out[index]=0;
}
}
__global__ void scale_out_real(const int n, float* input, float scale_factor) {
CUDA_KERNEL_LOOP(index, n) {
input[index]=input[index]/scale_factor;
}
}
template <typename Dtype>
__global__ void add_mask(const int n, int num_per_channel, Dtype* mask, float* input, float * output) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel;
int current_index=index%num_per_channel;
output[index]=input[index]*mask[current_index];
}
}
template <typename Dtype>
__global__ void ifftshift(const int n, int num_per_channel, Dtype* L_mask, Dtype* input_real, Dtype* input_imag, float2* output, int row_num, int col_num,int num_per_channel1) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>0)
{int ori_index=L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=input_imag[ori_index];
}
else
{ int ori_index=-L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=-input_imag[ori_index];
}
}
}
template <typename Dtype>
__global__ void fftshift(const int n, int num_per_channel1, Dtype* L_mask, float2* input, Dtype* output_real, Dtype* output_imag) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>-0.5)
{
int ori_index=L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=input[ori_index].y;
// output_real[index]=ori_index;
// output_imag[index]=ori_index;
}
else
{
int ori_index=-L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=-input[ori_index].y;//
//output_real[index]=ori_index;
//output_imag[index]=-ori_index;//
}
}
}
template <typename Dtype>
__global__ void obtain_output(const int n,int number_per_sample1, int number_per_sample2,Dtype* L_mask, Dtype* real1, Dtype* real2, Dtype* real3, Dtype* real4, Dtype* real5,Dtype* imag1,Dtype* imag2, Dtype* imag3,Dtype* imag4,Dtype* imag5,Dtype* y_real, Dtype* y_imag) {
CUDA_KERNEL_LOOP(index, n) {
//index
int sample_index1=index/number_per_sample1;
int index1=index%number_per_sample1;
int index2=number_per_sample2*sample_index1+L_mask[index1]-1;
if(L_mask[index1]==0)
{
y_real[index]=real1[index]+real2[index]+real4[index]+real5[index];
y_imag[index]=imag1[index]+imag2[index]+imag4[index]+imag5[index];
}
else
{
y_real[index]=real1[index]+real2[index]+real3[index2]+real4[index]+real5[index];
y_imag[index]=imag1[index]+imag2[index]+imag3[index2]+imag4[index]+imag5[index];
}
}
}
template <typename Dtype>
__global__ void obtain_freq(const int n, float2* input, Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
output[index]=input[index].x;
output[index+n]=input[index].y;
}
}
template <typename Dtype>
__global__ void pad_filter(const int n,Dtype* pad_mask, int pad_h, int pad_w, int num_per_channel1, int num_per_channel2, int filter_h, int filter_w, int height, int width, int padded_height, int padded_width, Dtype* h_real_in, Dtype* h_imag_in , Dtype* h_real_out, Dtype* h_imag_out) {
CUDA_KERNEL_LOOP(index, n) {
// heightwidth
int current_index=index%num_per_channel1;
int channel_index=index/num_per_channel1;
int index_ori=pad_mask[current_index]+channel_index*num_per_channel2;
h_real_out[index]=h_real_in[0];
h_imag_out[index]=h_imag_in[0];
}
}
template <typename Dtype>
__global__ void get_col(const int n, Dtype* col_mask, Dtype* h_real_in, Dtype* h_imag_in, Dtype* h_real_col, Dtype* h_imag_col) {
CUDA_KERNEL_LOOP(index, n) {
int index_ori=col_mask[index];
h_real_col[index]=h_real_in[index_ori];
h_imag_col[index]=h_imag_in[index_ori];
}
}
template <typename Dtype>
__global__ void get_freq(const int n, float2* freq, Dtype* top_data_real, Dtype* top_data_imag) {
CUDA_KERNEL_LOOP(index, n) {
top_data_real[index]=freq[index].x;
top_data_imag[index]=freq[index].y;
}
}
template <typename Dtype>
__global__ void set_freq(const int n, float2* freq, Dtype* input_data) {
CUDA_KERNEL_LOOP(index, n) {
freq[index].x=input_data[index];
freq[index].y=input_data[index+n];
}
}
template <typename Dtype>
__global__ void laplace_add(const int n, Dtype* input1, Dtype* input2, Dtype* output1, Dtype* output2,Dtype factor) {
CUDA_KERNEL_LOOP(index, n) {
output1[index]=output1[index]+factor*input1[index];
output2[index]=output2[index]+factor*input2[index];
}
}
template <typename Dtype>
__global__ void my_weight_sample_kernel(const int n, Dtype* sample_real, Dtype* sample_imag,
Dtype* weight_real, Dtype* weight_imag, Dtype* weighted_sample_real,Dtype* weighted_sample_imag, int number_per_sample,int number_per_channel) {
CUDA_KERNEL_LOOP(index, n) {
int channel_num=number_per_sample/number_per_channel;
int sample_index=index/number_per_channel;
int position_index=index%number_per_channel;
for(int i=0;i<channel_num;i++)
{int hf_base_position=position_index+i*number_per_channel;
//weighted_sample_real[0]= weighted_sample_real[0]+weight_real[0]*sample_real[0]+weight_imag[0]*sample_imag[0];
// weighted_sample_real[1]=hf_base_position;
// weighted_sample_real[0]=sample_real[0];
// printf("the index is %d\n",index);
weighted_sample_real[index]= weighted_sample_real[index]+weight_real[hf_base_position]*sample_real[hf_base_position+number_per_sample*sample_index]+weight_imag[hf_base_position]*sample_imag[hf_base_position+number_per_sample*sample_index];
weighted_sample_imag[index]= weighted_sample_imag[index]-weight_real[hf_base_position]*sample_imag[hf_base_position+number_per_sample*sample_index]+weight_imag[hf_base_position]*sample_real[hf_base_position+number_per_sample*sample_index];
}
}
}
template <typename Dtype>
__global__ void weight_sample_kernel_second(const int n, Dtype* sample_real, Dtype* sample_imag,
Dtype* weighted_sample_real, Dtype* weighted_sample_imag, Dtype* KK_real,Dtype* KK_imag,Dtype* sample_weight, int number_per_sample,int number_per_channel, int sample_num) {
CUDA_KERNEL_LOOP(index, n) {
int position_index=index%number_per_channel;
for(int i=0; i<sample_num;i++)
{
int weighted_sample_index=position_index+i*number_per_channel;
int index1=index+i*number_per_sample;
KK_real[index]=KK_real[index]+sample_weight[i]*(weighted_sample_real[weighted_sample_index]*sample_real[index1]-weighted_sample_imag[weighted_sample_index]*sample_imag[index1]);
KK_imag[index]=KK_imag[index]+sample_weight[i]*(weighted_sample_real[weighted_sample_index]*sample_imag[index1]+weighted_sample_imag[weighted_sample_index]*sample_real[index1]);
}
}
}
template <typename Dtype>
__global__ void fuse_result(const int n, Dtype* input,Dtype* output, int channels, int num_per_channel2,int number_per_sample1 ) {
CUDA_KERNEL_LOOP(index, n) {
//frag
for(int frag_id=0;frag_id<10;frag_id++)
{ int position_index=index+number_per_sample1*frag_id;
if(frag_id<9)
{
output[index]=output[index]+9*input[position_index];
}
else
{
output[index]=output[index]-input[position_index];
}
}
}
}
template <typename Dtype>
__global__ void add_different_layers(const int n,int num_per_channel1, int num_per_channel2, Dtype* L_mask, Dtype* real,Dtype* imag, Dtype* sh_real, Dtype* sh_imag) {
CUDA_KERNEL_LOOP(index, n) {
//index
int channel_index=index/num_per_channel1;
int index1=index%num_per_channel1;
int index2=num_per_channel2*channel_index+L_mask[index1]-1;
if(L_mask[index1]==0)
{
sh_real[index]=sh_real[index];
sh_imag[index]=sh_imag[index];
}
else
{
sh_real[index]=sh_real[index]+real[index2];
sh_imag[index]=sh_imag[index]+imag[index2];
}
}
}
template <typename Dtype>
__global__ void crop_sample(const int n,int num_per_channel1, int num_per_channel2, Dtype* L_mask1, Dtype* sh_real, Dtype* sh_imag, Dtype* output_real, Dtype* output_imag) {
CUDA_KERNEL_LOOP(index, n) {
//index
int position_index=index%num_per_channel1;
int channel_index=index/num_per_channel1;
int index1=(L_mask1[position_index]-1)+num_per_channel2*channel_index;
output_real[index]=sh_real[index1];
output_imag[index]=sh_imag[index1];
}
}
template <typename Dtype>
__global__ void compupte_H_transpose(const int n,Dtype* H, Dtype* H_transpose, int num_per_channel_real, int height, int width) {
CUDA_KERNEL_LOOP(index, n) {
int channel_id=index/num_per_channel_real;
int index1=index%num_per_channel_real;
int height_id=index1/width;
int width_id=index1%width;
int new_index=width_id*width+height_id+channel_id*num_per_channel_real;
H_transpose[new_index]=H[index];
}
}
template <typename Dtype>
__global__ void compute_AW(const int n, Dtype* H_transpose, Dtype* Ap, Dtype* AW, int A_height, int A_width, int num_per_channel_real) {
CUDA_KERNEL_LOOP(index, n) {
int channel_id=index/A_height;
int current_index=index%A_height;
int index1=Ap[A_width*current_index]; int index2=Ap[A_width*current_index+1];
AW[index]=H_transpose[index1+channel_id*num_per_channel_real]-H_transpose[index2+channel_id*num_per_channel_real];
}
}
template <typename Dtype>
__global__ void compute_ATAW_positive(const int n, Dtype* ATAW_positive_index, Dtype* ATAW, Dtype* AW, int AW_length_per_channel, int index_height, int index_width, int num_per_channel_real) {
CUDA_KERNEL_LOOP(index, n) {
int channel_id=index/index_height;
int current_index=index%index_height;
int H_index=ATAW_positive_index[current_index*index_width]+channel_id*num_per_channel_real;
int AW_base_index=AW_length_per_channel*channel_id;
for (int i=1;i<index_width;i++)
{
int AW_index=ATAW_positive_index[current_index*index_width+i];
if(AW_index==-1)
break;
AW_index=AW_index+AW_base_index;
ATAW[H_index]=ATAW[H_index]+AW[AW_index];
}
}
}
template <typename Dtype>
__global__ void compute_ATAW_negative(const int n, Dtype* ATAW_negative_index, Dtype* ATAW, Dtype* AW, int AW_length_per_channel, int index_height, int index_width, int num_per_channel_real) {
CUDA_KERNEL_LOOP(index, n) {
int channel_id=index/index_height;
int current_index=index%index_height;
int H_index=ATAW_negative_index[current_index*index_width]+channel_id*num_per_channel_real;
int AW_base_index=AW_length_per_channel*channel_id;
for (int i=1;i<index_width;i++)
{
int AW_index=ATAW_negative_index[current_index*index_width+i];
if(AW_index==-1)
break;
AW_index=AW_index+AW_base_index;
ATAW[H_index]=ATAW[H_index]-AW[AW_index];
}
}
}
template <typename Dtype>
__global__ void get_middle_line(const int n, Dtype* weight_sample, Dtype* middle_line, int height, int width) {
CUDA_KERNEL_LOOP(index, n) {
}
}
template <typename Dtype>
void WtfLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* data=Layer<Dtype>::feature_num[0]->mutable_cpu_data();
int feature_num=data[0];
int count1; int count2; int count3; int number_per_sample; float scale_factor; int col_num; int row_num; int num_per_channel1;
int num_per_channel2;
Dtype* sample_weight=Layer<Dtype>::sample_weight[0]->mutable_gpu_data();
Dtype* sample_weight_cpu=Layer<Dtype>::sample_weight[0]->mutable_cpu_data();
int sample_num=Layer<Dtype>::sample_weight[0]->width();
Dtype* index_cpu=Layer<Dtype>::index[0]->mutable_cpu_data();
Dtype* index_cpu1=Layer<Dtype>::index1[0]->mutable_cpu_data();
int index[feature_num];
int index1[feature_num];
for(int i=0;i<feature_num;i++)
{
index[i]=index_cpu[i];
index1[i]=index_cpu1[i];
}
Dtype* ifftshift_mask;Dtype* fftshift_mask; Dtype* weighted_sample_real;Dtype* weighted_sample_imag;
Dtype* sample_real; Dtype* sample_imag;
Dtype* KK_real;Dtype* KK_imag;
Dtype* tmp_real1;Dtype* tmp_imag1;
Dtype* hf_real;
Dtype* hf_imag; Dtype* laplace_real; Dtype* laplace_imag; Dtype* mask;
//
//
//
for(int blob_id=0;blob_id<feature_num; blob_id++)
{
// printf("the value of blob_id is %d\n\n",blob_id);
if(blob_id!=2)
{
ifftshift_mask=Layer<Dtype>::ifftshift_mask[0]->mutable_gpu_data();
fftshift_mask=Layer<Dtype>::fftshift_mask[0]->mutable_gpu_data();
weighted_sample_real=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data();
weighted_sample_imag=Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data();
sample_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
sample_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
KK_real=Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data();
KK_imag=Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data();
tmp_real1=Layer<Dtype>::first_layer_tmp_real1[blob_id]->mutable_gpu_data();
tmp_imag1=Layer<Dtype>::first_layer_tmp_imag1[blob_id]->mutable_gpu_data();
hf_real=Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data();
hf_imag=Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data();
laplace_real=Layer<Dtype>::laplace_real[blob_id]->mutable_gpu_data();
laplace_imag=Layer<Dtype>::laplace_imag[blob_id]->mutable_gpu_data();
col_num=Layer<Dtype>::first_layer_hf_real[blob_id]->height(); row_num=Layer<Dtype>::first_layer_hf_real[blob_id]->height(); num_per_channel1=row_num*(col_num/2+1);
num_per_channel2=row_num*col_num;
count1=this->blobs_[blob_id]->channels()*row_num*(col_num/2+1);//fftshift
count2=this->blobs_[blob_id]->channels()*row_num*col_num;
count3=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->count();
number_per_sample=this->blobs_[blob_id]->channels()*(col_num/2+1)*row_num;
hipLaunchKernelGGL(( ifftshift), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, num_per_channel1, ifftshift_mask, Layer<Dtype>::matlab_hf_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::matlab_hf_imag[blob_id]->mutable_gpu_data(), this->d_freq2,row_num, col_num,num_per_channel1);
ifft2(this->inverse_plan[blob_id],this->d_freq2,this->d_in2);
scale_factor=col_num*row_num;
hipLaunchKernelGGL(( scale_out_real), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2,this->d_in2,scale_factor);
//d_in
// top[0]->Reshape(1,Layer<Dtype>::KK_real[blob_id]->channels(),77,77);
//laplace_reallaplace_imag
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, laplace_real);
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, laplace_imag);
// top[0]->Reshape(2,Layer<Dtype>::first_layer_tmp_real1[blob_id]->channels(),Layer<Dtype>::first_layer_tmp_real1[blob_id]->height(),Layer<Dtype>::first_layer_tmp_real1[blob_id]->width());
// printf("the size is %d %d\n",Layer<Dtype>::patch_mask[0]->height(),Layer<Dtype>::patch_mask[0]->width());
mask=Layer<Dtype>::binary_mask[0]->mutable_gpu_data();
hipLaunchKernelGGL(( add_mask), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2, num_per_channel2,mask, this->d_in2, this->d_in_tmp2);
//H_masked
caffe_copy(Layer<Dtype>::H_masked[blob_id]->count(),(Dtype*)this->d_in_tmp2,Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data());
fft2(this->forward_plan[blob_id],this->d_in_tmp2,this->d_freq2);
//hfsamplesf
hipLaunchKernelGGL(( fftshift), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1,num_per_channel1,fftshift_mask,this->d_freq2,Layer<Dtype>::hf_tmp_real[blob_id]->mutable_gpu_data(),
Layer<Dtype>::hf_tmp_imag[blob_id]->mutable_gpu_data());
mask=Layer<Dtype>::binary_mask_adaptive[0]->mutable_gpu_data();
hipLaunchKernelGGL(( add_mask), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2, num_per_channel2,mask, this->d_in2, this->d_in_tmp2);
fft2(this->forward_plan[blob_id],this->d_in_tmp2,this->d_freq2);
//hfsamplesf
hipLaunchKernelGGL(( fftshift), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1,num_per_channel1,fftshift_mask,this->d_freq2,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),
Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data());
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count3)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count3,weighted_sample_real);
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count3)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count3,weighted_sample_imag);
hipLaunchKernelGGL(( my_weight_sample_kernel), dim3(CAFFE_GET_BLOCKS(count3)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count3, sample_real, sample_imag,hf_real, hf_imag, weighted_sample_real,weighted_sample_imag,number_per_sample, num_per_channel1);
}
else
{
ifftshift_mask=Layer<Dtype>::ifftshift_mask[1]->mutable_gpu_data();
fftshift_mask=Layer<Dtype>::fftshift_mask[1]->mutable_gpu_data();
weighted_sample_real=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data();
weighted_sample_imag=Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data();
sample_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
sample_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
KK_real=Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data();
KK_imag=Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data();
tmp_real1=Layer<Dtype>::first_layer_tmp_real1[blob_id]->mutable_gpu_data();
tmp_imag1=Layer<Dtype>::first_layer_tmp_imag1[blob_id]->mutable_gpu_data();
hf_real=Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data();
hf_imag=Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data();
laplace_real=Layer<Dtype>::laplace_real[blob_id]->mutable_gpu_data();
laplace_imag=Layer<Dtype>::laplace_imag[blob_id]->mutable_gpu_data();
col_num=Layer<Dtype>::first_layer_hf_real[blob_id]->height(); row_num=Layer<Dtype>::first_layer_hf_real[blob_id]->height(); num_per_channel1=row_num*(col_num/2+1);
num_per_channel2=row_num*col_num;
count1=this->blobs_[blob_id]->channels()*row_num*(col_num/2+1);//fftshift
count2=this->blobs_[blob_id]->channels()*row_num*col_num;
count3=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->count();
number_per_sample=this->blobs_[blob_id]->channels()*(col_num/2+1)*row_num;
hipLaunchKernelGGL(( ifftshift), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, num_per_channel1, ifftshift_mask, Layer<Dtype>::matlab_hf_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::matlab_hf_imag[blob_id]->mutable_gpu_data(), this->d_freq3,row_num, col_num,num_per_channel1);
ifft2(this->inverse_plan[blob_id],this->d_freq3,this->d_in3);
scale_factor=col_num*row_num;
hipLaunchKernelGGL(( scale_out_real), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2,this->d_in3,scale_factor);
//d_in
// top[0]->Reshape(1,Layer<Dtype>::KK_real[blob_id]->channels(),77,77);
//laplace_reallaplace_imag
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, laplace_real);
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, laplace_imag);
mask=Layer<Dtype>::binary_mask[1]->mutable_gpu_data();
hipLaunchKernelGGL(( add_mask), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2, num_per_channel2,mask, this->d_in3, this->d_in_tmp3);
//H_masked
caffe_copy(Layer<Dtype>::H_masked[blob_id]->count(),(Dtype*)this->d_in_tmp3,Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data());
fft2(this->forward_plan[blob_id],this->d_in_tmp3,this->d_freq3);
//hfsamplesf
hipLaunchKernelGGL(( fftshift), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1,num_per_channel1,fftshift_mask,this->d_freq3,Layer<Dtype>::hf_tmp_real[blob_id]->mutable_gpu_data(),
Layer<Dtype>::hf_tmp_imag[blob_id]->mutable_gpu_data());
// top[0]->Reshape(2,Layer<Dtype>::first_layer_tmp_real1[blob_id]->channels(),Layer<Dtype>::first_layer_tmp_real1[blob_id]->height(),Layer<Dtype>::first_layer_tmp_real1[blob_id]->width());
mask=Layer<Dtype>::binary_mask_adaptive[1]->mutable_gpu_data();
hipLaunchKernelGGL(( add_mask), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2, num_per_channel2,mask, this->d_in3, this->d_in_tmp3);
fft2(this->forward_plan[blob_id],this->d_in_tmp3,this->d_freq3);
hipLaunchKernelGGL(( fftshift), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1,num_per_channel1,fftshift_mask,this->d_freq3,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),
Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data());
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count3)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count3,weighted_sample_real);
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count3)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count3,weighted_sample_imag);
hipLaunchKernelGGL(( my_weight_sample_kernel), dim3(CAFFE_GET_BLOCKS(count3)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count3, sample_real, sample_imag,hf_real, hf_imag, weighted_sample_real,weighted_sample_imag,number_per_sample,num_per_channel1);
}
}
/*
Dtype* inner_product_result;
Dtype* tmp;
inner_product_result=Layer<Dtype>::inner_product_result[0]->mutable_gpu_data();
//weighted_sample_realweighted_sample_image
for(int blob_id=0;blob_id<feature_num;blob_id++)
{
number_per_sample=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->height()*Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->width();
for(int sample_id=0; sample_id<sample_num;sample_id++)
{
tmp=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data()+number_per_sample*sample_id;
if(sample_id==0&&blob_id==0)
{
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, 1, 1, number_per_sample,
(Dtype)2*sample_weight_cpu[sample_id] , tmp, tmp, (Dtype)0., inner_product_result);
}
else
{
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, 1, 1, number_per_sample,
(Dtype)2*sample_weight_cpu[sample_id], tmp, tmp, (Dtype)1, inner_product_result);
}
//
tmp=Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data()+number_per_sample*sample_id;
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, 1, 1, number_per_sample,
(Dtype)2*sample_weight_cpu[sample_id], tmp, tmp, (Dtype)1, inner_product_result);
}
}
*/
Dtype* L_index=Layer<Dtype>::L_index[0]->mutable_gpu_data();
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(Layer<Dtype>::sh_real[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, Layer<Dtype>::sh_real[0]->count(),Layer<Dtype>::sh_real[0]->mutable_gpu_data());
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(Layer<Dtype>::sh_imag[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, Layer<Dtype>::sh_imag[0]->count(),Layer<Dtype>::sh_imag[0]->mutable_gpu_data());
Dtype* sh_real=Layer<Dtype>::sh_real[0]->mutable_gpu_data();
Dtype* sh_imag=Layer<Dtype>::sh_imag[0]->mutable_gpu_data();
for(int blob_id=0;blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
caffe_gpu_add(Layer<Dtype>::sh_real[0]->count(),sh_real,Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data(),sh_real);
caffe_gpu_add(Layer<Dtype>::sh_imag[0]->count(),sh_imag,Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data(),sh_imag);
}
else
{
int count7=Layer<Dtype>::first_layer_weighted_sample_real[0]->count();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[0]->height()*(Layer<Dtype>::first_layer_hf_real[0]->width());
num_per_channel2=Layer<Dtype>::first_layer_hf_real[2]->height()*(Layer<Dtype>::first_layer_hf_real[2]->width());
//printf("the value is %d %d\n\n",num_per_channel1,num_per_channel2);
hipLaunchKernelGGL(( add_different_layers), dim3(CAFFE_GET_BLOCKS(count7)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count7,num_per_channel1, num_per_channel2, L_index, Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data(),Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data(), sh_real, sh_imag);
}
}
//sh_realsh_imag
Dtype* L_index1=Layer<Dtype>::L_index1[0]->mutable_gpu_data();
for(int blob_id=0;blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
count1=this->blobs_[blob_id]->channels()*Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
number_per_sample=num_per_channel1*this->blobs_[blob_id]->channels();
KK_real=Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data(); KK_imag=Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data();
sample_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
sample_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(Layer<Dtype>::KK_real[blob_id]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, Layer<Dtype>::KK_real[blob_id]->count(),KK_real);
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(Layer<Dtype>::KK_real[blob_id]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, Layer<Dtype>::KK_real[blob_id]->count(),KK_imag);
hipLaunchKernelGGL(( weight_sample_kernel_second), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, sample_real, sample_imag, sh_real, sh_imag, KK_real,KK_imag,
sample_weight, number_per_sample,num_per_channel1, sample_num);
}
else
{
count1=this->blobs_[blob_id]->channels()*Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
count2=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->count();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
num_per_channel2=Layer<Dtype>::first_layer_hf_real[0]->height()*Layer<Dtype>::first_layer_hf_real[0]->width();
number_per_sample=num_per_channel1*this->blobs_[blob_id]->channels();
weighted_sample_real=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data();
weighted_sample_imag=Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data();
KK_real=Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data(); KK_imag=Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data();
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(Layer<Dtype>::KK_real[blob_id]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, Layer<Dtype>::KK_real[blob_id]->count(),KK_real);
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(Layer<Dtype>::KK_real[blob_id]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, Layer<Dtype>::KK_real[blob_id]->count(),KK_imag);
sample_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
sample_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
hipLaunchKernelGGL(( crop_sample), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2,num_per_channel1,num_per_channel2, L_index1, sh_real, sh_imag, Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data(), Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data());
//
hipLaunchKernelGGL(( weight_sample_kernel_second), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, sample_real, sample_imag, weighted_sample_real, weighted_sample_imag, KK_real,KK_imag,sample_weight, number_per_sample,num_per_channel1, sample_num);
}
}
int count_H;
int num_per_channel_real;
Dtype* App;
//ATAW_MC
Dtype* resolution_data=Layer<Dtype>::resolution_index[0]->mutable_cpu_data();
int resolution_index=resolution_data[0];
Dtype* ATAW_positive_index;
Dtype* ATAW_negative_index;
if(resolution_index==1)
{
//printf("we select this branch\n");
for(int blob_id=0; blob_id<feature_num;blob_id++)
{
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
num_per_channel_real=Layer<Dtype>::H_transpose[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->width();
Dtype* H_masked=Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data();
Dtype* H_transpose=Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data();
hipLaunchKernelGGL(( compupte_H_transpose), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
count_H=Layer<Dtype>::Ap[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
hipLaunchKernelGGL(( compute_AW), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, H_transpose, Layer<Dtype>::Ap[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data(), Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::Ap[blob_id]->width(), num_per_channel_real);
//ATAW_MC
ATAW_positive_index=Layer<Dtype>::ATAW_positive_index[blob_id]->mutable_gpu_data();
ATAW_negative_index=Layer<Dtype>::ATAW_negative_index[blob_id]->mutable_gpu_data();
//if(blob_id==1)
// {
// top[0]->Reshape(1,1,Layer<Dtype>::AW[blob_id]->width(),1);
// caffe_copy(top[0]->count(),Layer<Dtype>::AW[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// }
//
count_H=Layer<Dtype>::ATAW_MC[blob_id]->height()*Layer<Dtype>::ATAW_MC[blob_id]->width()*Layer<Dtype>::ATAW_MC[blob_id]->channels();
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data());
count_H=Layer<Dtype>::ATAW_positive_index[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
hipLaunchKernelGGL(( compute_ATAW_positive), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, ATAW_positive_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::ATAW_positive_index[blob_id]->height(), Layer<Dtype>::ATAW_positive_index[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::ATAW_negative_index[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();hipLaunchKernelGGL((
compute_ATAW_negative), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, ATAW_negative_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::ATAW_negative_index[blob_id]->height(), Layer<Dtype>::ATAW_negative_index[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
hipLaunchKernelGGL(( compupte_H_transpose), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
// if(blob_id==4)
// {
// top[0]->Reshape(1,Layer<Dtype>::H_transpose[blob_id]->channels(),Layer<Dtype>::H_transpose[blob_id]->height(),Layer<Dtype>::H_transpose[blob_id]->width());
// caffe_copy(top[0]->count(),Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// }
}
}
else
{
//printf("we select this branch\n");
for(int blob_id=0; blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
num_per_channel_real=Layer<Dtype>::H_transpose[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->width();
Dtype* H_masked=Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data();
Dtype* H_transpose=Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data();
hipLaunchKernelGGL(( compupte_H_transpose), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
count_H=Layer<Dtype>::Ap1[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
hipLaunchKernelGGL(( compute_AW), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, H_transpose, Layer<Dtype>::Ap1[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW1[blob_id]->mutable_gpu_data(), Layer<Dtype>::Ap1[blob_id]->height(), Layer<Dtype>::Ap1[blob_id]->width(), num_per_channel_real);
//ATAW_MC
ATAW_positive_index=Layer<Dtype>::ATAW_positive_index1[blob_id]->mutable_gpu_data();
ATAW_negative_index=Layer<Dtype>::ATAW_negative_index1[blob_id]->mutable_gpu_data();
//
count_H=Layer<Dtype>::ATAW_MC[blob_id]->height()*Layer<Dtype>::ATAW_MC[blob_id]->width()*Layer<Dtype>::ATAW_MC[blob_id]->channels();
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data());
count_H=Layer<Dtype>::ATAW_positive_index1[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
hipLaunchKernelGGL(( compute_ATAW_positive), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, ATAW_positive_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW1[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap1[blob_id]->height(), Layer<Dtype>::ATAW_positive_index1[blob_id]->height(), Layer<Dtype>::ATAW_positive_index1[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::ATAW_negative_index1[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();hipLaunchKernelGGL((
compute_ATAW_negative), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, ATAW_negative_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW1[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap1[blob_id]->height(), Layer<Dtype>::ATAW_negative_index1[blob_id]->height(), Layer<Dtype>::ATAW_negative_index1[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
hipLaunchKernelGGL(( compupte_H_transpose), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
// if(blob_id==4)
// {
// top[0]->Reshape(1,Layer<Dtype>::H_transpose[blob_id]->channels(),Layer<Dtype>::H_transpose[blob_id]->height(),Layer<Dtype>::H_transpose[blob_id]->width());
// caffe_copy(top[0]->count(),Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// }
}
else
{
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
num_per_channel_real=Layer<Dtype>::H_transpose[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->width();
Dtype* H_masked=Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data();
Dtype* H_transpose=Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data();
hipLaunchKernelGGL(( compupte_H_transpose), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
count_H=Layer<Dtype>::Ap[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
hipLaunchKernelGGL(( compute_AW), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, H_transpose, Layer<Dtype>::Ap[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data(), Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::Ap[blob_id]->width(), num_per_channel_real);
//ATAW_MC
ATAW_positive_index=Layer<Dtype>::ATAW_positive_index[blob_id]->mutable_gpu_data();
ATAW_negative_index=Layer<Dtype>::ATAW_negative_index[blob_id]->mutable_gpu_data();
//if(blob_id==1)
// {
// top[0]->Reshape(1,1,Layer<Dtype>::AW[blob_id]->width(),1);
// caffe_copy(top[0]->count(),Layer<Dtype>::AW[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// }
//
count_H=Layer<Dtype>::ATAW_MC[blob_id]->height()*Layer<Dtype>::ATAW_MC[blob_id]->width()*Layer<Dtype>::ATAW_MC[blob_id]->channels();
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data());
count_H=Layer<Dtype>::ATAW_positive_index[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
hipLaunchKernelGGL(( compute_ATAW_positive), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, ATAW_positive_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::ATAW_positive_index[blob_id]->height(), Layer<Dtype>::ATAW_positive_index[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::ATAW_negative_index[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();hipLaunchKernelGGL((
compute_ATAW_negative), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, ATAW_negative_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::ATAW_negative_index[blob_id]->height(), Layer<Dtype>::ATAW_negative_index[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
hipLaunchKernelGGL(( compupte_H_transpose), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
}
}
}
//second layer
Dtype lambda1=0.1; Dtype lambda2=1; Dtype lambda3=0.0;
Dtype* data1=Layer<Dtype>::mu[0]->mutable_cpu_data();
Dtype* data2=Layer<Dtype>::eta[0]->mutable_cpu_data();
Dtype mu=data1[0]; Dtype eta=data2[0];
Dtype* data11=Layer<Dtype>::factor[0]->mutable_cpu_data();
Dtype factor=data11[0];
for(int blob_id=0;blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
count1=Layer<Dtype>::KK_real[blob_id]->count();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
ifftshift_mask=Layer<Dtype>::ifftshift_mask[0]->mutable_gpu_data();
fftshift_mask=Layer<Dtype>::fftshift_mask[0]->mutable_gpu_data();
row_num=Layer<Dtype>::KK_real[blob_id]->height(); col_num=row_num;
num_per_channel2=row_num*col_num;
count2=num_per_channel2*this->blobs_[blob_id]->channels();
hipLaunchKernelGGL(( ifftshift_second), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, num_per_channel1, ifftshift_mask, Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data(), this->d_freq2,row_num, col_num,num_per_channel1);
ifft2_second(this->inverse_plan[blob_id],this->d_freq2,this->d_in2);
scale_factor=col_num*row_num;
hipLaunchKernelGGL(( scale_out_real_second), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2,this->d_in2,scale_factor);
mask=Layer<Dtype>::binary_mask_adaptive[0]->mutable_gpu_data();
hipLaunchKernelGGL(( add_mask_second), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2, num_per_channel2,mask, this->d_in2, this->d_in_tmp2);
num_per_channel_real=Layer<Dtype>::H_masked[blob_id]->height()*Layer<Dtype>::H_masked[blob_id]->width();
count_H=Layer<Dtype>::H_masked[blob_id]->count();
mask=Layer<Dtype>::reg_window[0]->mutable_gpu_data();
hipLaunchKernelGGL(( add_reg_mask), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, num_per_channel_real, mask, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(),Layer<Dtype>::H_reged[blob_id]->mutable_gpu_data());
//,H_maskedATAW_MC
caffe_gpu_add1(count2,(Dtype*) this->d_in_tmp2,Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),(Dtype)1.0,mu, (Dtype*)this->d_in_tmp2);
caffe_gpu_add1(count2,(Dtype*) this->d_in_tmp2,Layer<Dtype>::H_reged[blob_id]->mutable_gpu_data(),(Dtype)1.0,eta, (Dtype*)this->d_in_tmp2);
fft2_second(this->forward_plan[blob_id],this->d_in_tmp2,this->d_freq2);
hipLaunchKernelGGL(( fftshift_second), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1,num_per_channel1,fftshift_mask,this->d_freq2,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data());
//blob
caffe_copy(this->blobs_[blob_id]->count()/2,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),this->blobs_[blob_id]->mutable_gpu_data());
caffe_copy(this->blobs_[blob_id]->count()/2,Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data(),this->blobs_[blob_id]->mutable_gpu_data()+this->blobs_[blob_id]->count()/2);
// printf("%d %d %d %d\n",this->blobs_[blob_id]->num(),this->blobs_[blob_id]->channels(),this->blobs_[blob_id]->height(),this->blobs_[blob_id]->width());
}
else
{
count1=Layer<Dtype>::KK_real[blob_id]->count();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
ifftshift_mask=Layer<Dtype>::ifftshift_mask[1]->mutable_gpu_data();
fftshift_mask=Layer<Dtype>::fftshift_mask[1]->mutable_gpu_data();
row_num=Layer<Dtype>::KK_real[blob_id]->height(); col_num=row_num;
num_per_channel2=row_num*col_num;
count2=num_per_channel2*this->blobs_[blob_id]->channels();
hipLaunchKernelGGL(( ifftshift_second), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, num_per_channel1, ifftshift_mask, Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data(), this->d_freq3,row_num, col_num,num_per_channel1);
ifft2_second(this->inverse_plan[blob_id],this->d_freq3,this->d_in3);
scale_factor=col_num*row_num;
hipLaunchKernelGGL(( scale_out_real_second), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2,this->d_in3,scale_factor);
mask=Layer<Dtype>::binary_mask_adaptive[1]->mutable_gpu_data();
hipLaunchKernelGGL(( add_mask_second), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2, num_per_channel2,mask, this->d_in3, this->d_in_tmp3);
num_per_channel_real=Layer<Dtype>::H_masked[blob_id]->height()*Layer<Dtype>::H_masked[blob_id]->width();
count_H=Layer<Dtype>::H_masked[blob_id]->count();
mask=Layer<Dtype>::reg_window[1]->mutable_gpu_data();
hipLaunchKernelGGL(( add_reg_mask), dim3(CAFFE_GET_BLOCKS(count_H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_H, num_per_channel_real, mask, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(),Layer<Dtype>::H_reged[blob_id]->mutable_gpu_data());
//,H_maskedATAW_MC
caffe_gpu_add1(count2,(Dtype*)
this->d_in_tmp3,Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),(Dtype)1.0,(Dtype)factor*mu, (Dtype*)this->d_in_tmp3);
caffe_gpu_add1(count2,(Dtype*) this->d_in_tmp3,Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(),(Dtype)1.0, (Dtype)0, (Dtype*)this->d_in_tmp3);
fft2_second(this->forward_plan[blob_id],this->d_in_tmp3,this->d_freq3);
hipLaunchKernelGGL(( fftshift_second), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1,num_per_channel1,fftshift_mask,this->d_freq3,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data());
// printf("the frame_id is %d\n",frame_id);
caffe_copy(this->blobs_[blob_id]->count()/2,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),this->blobs_[blob_id]->mutable_gpu_data());
caffe_copy(this->blobs_[blob_id]->count()/2,Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data(),this->blobs_[blob_id]->mutable_gpu_data()+this->blobs_[blob_id]->count()/2);
}
}
top[0]->Reshape(Layer<Dtype>::matlab_hf_real[0]->num(),Layer<Dtype>::matlab_hf_real[0]->channels(),Layer<Dtype>::matlab_hf_real[0]->height(),Layer<Dtype>::matlab_hf_real[0]->width());
//caffe_copy(top[0]->count(),Layer<Dtype>::matlab_hf_real[0]->mutable_gpu_data(),top[0]->mutable_gpu_data());
caffe_copy(top[0]->count(),Layer<Dtype>::first_layer_hf_real[0]->mutable_gpu_data(),top[0]->mutable_gpu_data());
Dtype* clear_memory_cpu=Layer<Dtype>::clear_memory[0]->mutable_cpu_data();
if(clear_memory_cpu[0]>0.5) //memory
{
hipFree(this->d_in1); hipFree(this->d_in2); hipFree(this->d_in3); hipFree(this->d_in4);
hipFree(this->d_in_tmp1); hipFree(this->d_in_tmp2); hipFree(this->d_in_tmp3); hipFree(this->d_in_tmp4);
hipFree(this->d_freq1); hipFree(this->d_freq2); hipFree(this->d_freq3); hipFree(this->d_freq4);
hipFree(this->d_in_total1); hipFree(this->d_in_total2);
hipFree(this->d_freq_total1); hipFree(this->d_freq_total2);
hipFree(this->d_in_sub_total1); hipFree(this->d_in_sub_total2);
hipFree(this->d_freq_sub_total1); hipFree(this->d_freq_sub_total2);
hipfftDestroy(this->forward_plan[0]); hipfftDestroy(this->forward_plan[1]); hipfftDestroy(this->forward_plan[2]); hipfftDestroy(this->forward_plan[3]);
hipfftDestroy(this->forward_plan_total[0]); hipfftDestroy(this->forward_plan_total[1]);
hipfftDestroy(this->forward_plan_sub_total[0]); hipfftDestroy(this->forward_plan_sub_total[1]);
hipfftDestroy(this->inverse_plan[0]); hipfftDestroy(this->inverse_plan[1]); hipfftDestroy(this->inverse_plan[2]); hipfftDestroy(this->inverse_plan[3]);
hipfftDestroy(this->inverse_plan_total[0]); hipfftDestroy(this->inverse_plan_total[1]);
if(feature_num==5)
{ printf("the memory is released\n");
hipFree(this->d_in5);
hipFree(this->d_in_tmp5);
hipFree(this->d_freq5);
hipfftDestroy(this->forward_plan[4]);
hipfftDestroy(this->inverse_plan[4]);
}
}
//Dtype* sample_real_cpu=Layer<Dtype>::first_layer_samplef_real[0]->mutable_cpu_data();
//Dtype* sample_imag_cpu=Layer<Dtype>::first_layer_samplef_imag[0]->mutable_cpu_data();
//Dtype* sh_real_cpu=Layer<Dtype>::sh_real[0]->mutable_cpu_data();
//Dtype* sh_imag_cpu=Layer<Dtype>::sh_imag[0]->mutable_cpu_data();
//weighted_sample_real
//top[0]->Reshape(Layer<Dtype>::first_layer_weighted_sample_real[0]->num(),Layer<Dtype>::first_layer_weighted_sample_real[0]->channels(),Layer<Dtype>::first_layer_weighted_sample_real[0]->height(),
//Layer<Dtype>::first_layer_weighted_sample_real[0]->width());
//caffe_copy(top[0]->count(),sh_imag,top[0]->mutable_gpu_data());
//top[0]->Reshape(Layer<Dtype>::KK_real[2]->num(),Layer<Dtype>::KK_real[2]->channels(),Layer<Dtype>::KK_real[2]->height(),Layer<Dtype>::KK_real[2]->width());
//caffe_copy(top[0]->count(),Layer<Dtype>::KK_imag[2]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// top[0]->Reshape(Layer<Dtype>::first_layer_weighted_sample_real[2]->num(),Layer<Dtype>::first_layer_weighted_sample_real[2]->channels(),Layer<Dtype>::first_layer_weighted_sample_real[2]->height(),Layer<Dtype>::first_layer_weighted_sample_real[2]->width());
//caffe_copy(top[0]->count(),Layer<Dtype>::first_layer_weighted_sample_real[2]->mutable_gpu_data(),top[0]->mutable_gpu_data());
}
template <typename Dtype>
void WtfLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
}
INSTANTIATE_LAYER_GPU_FUNCS(WtfLayer);
} // namespace caffe
| cc951c41268c161f847789637ef7a7903faac298.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include "common/inc/helper_functions.h"
#include "common/inc/helper_cuda.h"
typedef float2 Complex;
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
namespace caffe {
void fft2_second(cufftHandle forward_plan, float* d_in, float2* d_freq)
{
cufftExecR2C(forward_plan, d_in, d_freq);
}
void ifft2_second(cufftHandle inverse_plan, float2* d_freq, float* d_out)
{
cufftExecC2R(inverse_plan, d_freq, d_out);
}
template <typename Dtype>
__global__ void ifftshift_second(const int n, int num_per_channel, Dtype* L_mask, Dtype* input_real, Dtype* input_imag, float2* output, int row_num, int col_num,int num_per_channel1) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>0)
{int ori_index=L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=input_imag[ori_index];
}
else
{ int ori_index=-L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=-input_imag[ori_index];
}
}
}
template <typename Dtype>
__global__ void fftshift_second(const int n, int num_per_channel1, Dtype* L_mask, float2* input, Dtype* output_real, Dtype* output_imag) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>-0.5)
{
int ori_index=L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=input[ori_index].y;
// output_real[index]=ori_index;
// output_imag[index]=ori_index;
}
else
{
int ori_index=-L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=-input[ori_index].y;//复数域求共轭操作
//output_real[index]=ori_index;
//output_imag[index]=-ori_index;//复数域求共轭操作
}
}
}
__global__ void scale_out_real_second(const int n, float* input, float scale_factor) {
CUDA_KERNEL_LOOP(index, n) {
input[index]=input[index]/scale_factor;
}
}
template <typename Dtype>
__global__ void add_mask_second(const int n, int num_per_channel, Dtype* mask, float* input, float * output) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel;
int current_index=index%num_per_channel;
output[index]=input[index]*mask[current_index];
}
}
template <typename Dtype>
__global__ void add_reg_mask(const int n, int num_per_channel, Dtype* mask, Dtype* input, Dtype * output) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel;
int current_index=index%num_per_channel;
output[index]=input[index]*mask[current_index];
}
}
void fft2(cufftHandle forward_plan, float* d_in, float2* d_freq)
{
cufftExecR2C(forward_plan, d_in, d_freq);
}
void ifft2(cufftHandle inverse_plan, float2* d_freq, float* d_out)
{
cufftExecC2R(inverse_plan, d_freq, d_out);
}
__global__ void copy_memory_to_blob(const int n, float2* mem1, float* tmp1, float* tmp2) {
CUDA_KERNEL_LOOP(index, n) {
}
}
__global__ void copy_memory_from_blob(const int n, float2* mem1, float* tmp1, float* tmp2) {
CUDA_KERNEL_LOOP(index, n) {
}
}
template <typename Dtype>
__global__ void set_zeros(const int n, Dtype* in_out) {
CUDA_KERNEL_LOOP(index, n) {
in_out[index]=0;
}
}
__global__ void scale_out_real(const int n, float* input, float scale_factor) {
CUDA_KERNEL_LOOP(index, n) {
input[index]=input[index]/scale_factor;
}
}
template <typename Dtype>
__global__ void add_mask(const int n, int num_per_channel, Dtype* mask, float* input, float * output) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel;
int current_index=index%num_per_channel;
output[index]=input[index]*mask[current_index];
}
}
template <typename Dtype>
__global__ void ifftshift(const int n, int num_per_channel, Dtype* L_mask, Dtype* input_real, Dtype* input_imag, float2* output, int row_num, int col_num,int num_per_channel1) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>0)
{int ori_index=L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=input_imag[ori_index];
}
else
{ int ori_index=-L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=-input_imag[ori_index];
}
}
}
template <typename Dtype>
__global__ void fftshift(const int n, int num_per_channel1, Dtype* L_mask, float2* input, Dtype* output_real, Dtype* output_imag) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>-0.5)
{
int ori_index=L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=input[ori_index].y;
// output_real[index]=ori_index;
// output_imag[index]=ori_index;
}
else
{
int ori_index=-L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=-input[ori_index].y;//复数域求共轭操作
//output_real[index]=ori_index;
//output_imag[index]=-ori_index;//复数域求共轭操作
}
}
}
template <typename Dtype>
__global__ void obtain_output(const int n,int number_per_sample1, int number_per_sample2,Dtype* L_mask, Dtype* real1, Dtype* real2, Dtype* real3, Dtype* real4, Dtype* real5,Dtype* imag1,Dtype* imag2, Dtype* imag3,Dtype* imag4,Dtype* imag5,Dtype* y_real, Dtype* y_imag) {
CUDA_KERNEL_LOOP(index, n) {
//我们首先判断当前的index是第几个样本的
int sample_index1=index/number_per_sample1;
int index1=index%number_per_sample1;
int index2=number_per_sample2*sample_index1+L_mask[index1]-1;
if(L_mask[index1]==0)
{
y_real[index]=real1[index]+real2[index]+real4[index]+real5[index];
y_imag[index]=imag1[index]+imag2[index]+imag4[index]+imag5[index];
}
else
{
y_real[index]=real1[index]+real2[index]+real3[index2]+real4[index]+real5[index];
y_imag[index]=imag1[index]+imag2[index]+imag3[index2]+imag4[index]+imag5[index];
}
}
}
template <typename Dtype>
__global__ void obtain_freq(const int n, float2* input, Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
output[index]=input[index].x;
output[index+n]=input[index].y;
}
}
template <typename Dtype>
__global__ void pad_filter(const int n,Dtype* pad_mask, int pad_h, int pad_w, int num_per_channel1, int num_per_channel2, int filter_h, int filter_w, int height, int width, int padded_height, int padded_width, Dtype* h_real_in, Dtype* h_imag_in , Dtype* h_real_out, Dtype* h_imag_out) {
CUDA_KERNEL_LOOP(index, n) {
// 首先确定当前的height和width
int current_index=index%num_per_channel1;
int channel_index=index/num_per_channel1;
int index_ori=pad_mask[current_index]+channel_index*num_per_channel2;
h_real_out[index]=h_real_in[0];
h_imag_out[index]=h_imag_in[0];
}
}
template <typename Dtype>
__global__ void get_col(const int n, Dtype* col_mask, Dtype* h_real_in, Dtype* h_imag_in, Dtype* h_real_col, Dtype* h_imag_col) {
CUDA_KERNEL_LOOP(index, n) {
int index_ori=col_mask[index];
h_real_col[index]=h_real_in[index_ori];
h_imag_col[index]=h_imag_in[index_ori];
}
}
template <typename Dtype>
__global__ void get_freq(const int n, float2* freq, Dtype* top_data_real, Dtype* top_data_imag) {
CUDA_KERNEL_LOOP(index, n) {
top_data_real[index]=freq[index].x;
top_data_imag[index]=freq[index].y;
}
}
template <typename Dtype>
__global__ void set_freq(const int n, float2* freq, Dtype* input_data) {
CUDA_KERNEL_LOOP(index, n) {
freq[index].x=input_data[index];
freq[index].y=input_data[index+n];
}
}
template <typename Dtype>
__global__ void laplace_add(const int n, Dtype* input1, Dtype* input2, Dtype* output1, Dtype* output2,Dtype factor) {
CUDA_KERNEL_LOOP(index, n) {
output1[index]=output1[index]+factor*input1[index];
output2[index]=output2[index]+factor*input2[index];
}
}
template <typename Dtype>
__global__ void my_weight_sample_kernel(const int n, Dtype* sample_real, Dtype* sample_imag,
Dtype* weight_real, Dtype* weight_imag, Dtype* weighted_sample_real,Dtype* weighted_sample_imag, int number_per_sample,int number_per_channel) {
CUDA_KERNEL_LOOP(index, n) {
int channel_num=number_per_sample/number_per_channel;
int sample_index=index/number_per_channel;
int position_index=index%number_per_channel;
for(int i=0;i<channel_num;i++)
{int hf_base_position=position_index+i*number_per_channel;
//weighted_sample_real[0]= weighted_sample_real[0]+weight_real[0]*sample_real[0]+weight_imag[0]*sample_imag[0];
// weighted_sample_real[1]=hf_base_position;
// weighted_sample_real[0]=sample_real[0];
// printf("the index is %d\n",index);
weighted_sample_real[index]= weighted_sample_real[index]+weight_real[hf_base_position]*sample_real[hf_base_position+number_per_sample*sample_index]+weight_imag[hf_base_position]*sample_imag[hf_base_position+number_per_sample*sample_index];
weighted_sample_imag[index]= weighted_sample_imag[index]-weight_real[hf_base_position]*sample_imag[hf_base_position+number_per_sample*sample_index]+weight_imag[hf_base_position]*sample_real[hf_base_position+number_per_sample*sample_index];
}
}
}
template <typename Dtype>
__global__ void weight_sample_kernel_second(const int n, Dtype* sample_real, Dtype* sample_imag,
Dtype* weighted_sample_real, Dtype* weighted_sample_imag, Dtype* KK_real,Dtype* KK_imag,Dtype* sample_weight, int number_per_sample,int number_per_channel, int sample_num) {
CUDA_KERNEL_LOOP(index, n) {
int position_index=index%number_per_channel;
for(int i=0; i<sample_num;i++)
{
int weighted_sample_index=position_index+i*number_per_channel;
int index1=index+i*number_per_sample;
KK_real[index]=KK_real[index]+sample_weight[i]*(weighted_sample_real[weighted_sample_index]*sample_real[index1]-weighted_sample_imag[weighted_sample_index]*sample_imag[index1]);
KK_imag[index]=KK_imag[index]+sample_weight[i]*(weighted_sample_real[weighted_sample_index]*sample_imag[index1]+weighted_sample_imag[weighted_sample_index]*sample_real[index1]);
}
}
}
template <typename Dtype>
__global__ void fuse_result(const int n, Dtype* input,Dtype* output, int channels, int num_per_channel2,int number_per_sample1 ) {
CUDA_KERNEL_LOOP(index, n) {
//首先判断当前元素是第几个frag
for(int frag_id=0;frag_id<10;frag_id++)
{ int position_index=index+number_per_sample1*frag_id;
if(frag_id<9)
{
output[index]=output[index]+9*input[position_index];
}
else
{
output[index]=output[index]-input[position_index];
}
}
}
}
template <typename Dtype>
__global__ void add_different_layers(const int n,int num_per_channel1, int num_per_channel2, Dtype* L_mask, Dtype* real,Dtype* imag, Dtype* sh_real, Dtype* sh_imag) {
CUDA_KERNEL_LOOP(index, n) {
//我们首先判断当前的index是第几个样本的
int channel_index=index/num_per_channel1;
int index1=index%num_per_channel1;
int index2=num_per_channel2*channel_index+L_mask[index1]-1;
if(L_mask[index1]==0)
{
sh_real[index]=sh_real[index];
sh_imag[index]=sh_imag[index];
}
else
{
sh_real[index]=sh_real[index]+real[index2];
sh_imag[index]=sh_imag[index]+imag[index2];
}
}
}
template <typename Dtype>
__global__ void crop_sample(const int n,int num_per_channel1, int num_per_channel2, Dtype* L_mask1, Dtype* sh_real, Dtype* sh_imag, Dtype* output_real, Dtype* output_imag) {
CUDA_KERNEL_LOOP(index, n) {
//我们首先判断当前的index是第几个样本的
int position_index=index%num_per_channel1;
int channel_index=index/num_per_channel1;
int index1=(L_mask1[position_index]-1)+num_per_channel2*channel_index;
output_real[index]=sh_real[index1];
output_imag[index]=sh_imag[index1];
}
}
template <typename Dtype>
__global__ void compupte_H_transpose(const int n,Dtype* H, Dtype* H_transpose, int num_per_channel_real, int height, int width) {
CUDA_KERNEL_LOOP(index, n) {
int channel_id=index/num_per_channel_real;
int index1=index%num_per_channel_real;
int height_id=index1/width;
int width_id=index1%width;
int new_index=width_id*width+height_id+channel_id*num_per_channel_real;
H_transpose[new_index]=H[index];
}
}
template <typename Dtype>
__global__ void compute_AW(const int n, Dtype* H_transpose, Dtype* Ap, Dtype* AW, int A_height, int A_width, int num_per_channel_real) {
CUDA_KERNEL_LOOP(index, n) {
int channel_id=index/A_height;
int current_index=index%A_height;
int index1=Ap[A_width*current_index]; int index2=Ap[A_width*current_index+1];
AW[index]=H_transpose[index1+channel_id*num_per_channel_real]-H_transpose[index2+channel_id*num_per_channel_real];
}
}
template <typename Dtype>
__global__ void compute_ATAW_positive(const int n, Dtype* ATAW_positive_index, Dtype* ATAW, Dtype* AW, int AW_length_per_channel, int index_height, int index_width, int num_per_channel_real) {
CUDA_KERNEL_LOOP(index, n) {
int channel_id=index/index_height;
int current_index=index%index_height;
int H_index=ATAW_positive_index[current_index*index_width]+channel_id*num_per_channel_real;
int AW_base_index=AW_length_per_channel*channel_id;
for (int i=1;i<index_width;i++)
{
int AW_index=ATAW_positive_index[current_index*index_width+i];
if(AW_index==-1)
break;
AW_index=AW_index+AW_base_index;
ATAW[H_index]=ATAW[H_index]+AW[AW_index];
}
}
}
template <typename Dtype>
__global__ void compute_ATAW_negative(const int n, Dtype* ATAW_negative_index, Dtype* ATAW, Dtype* AW, int AW_length_per_channel, int index_height, int index_width, int num_per_channel_real) {
CUDA_KERNEL_LOOP(index, n) {
int channel_id=index/index_height;
int current_index=index%index_height;
int H_index=ATAW_negative_index[current_index*index_width]+channel_id*num_per_channel_real;
int AW_base_index=AW_length_per_channel*channel_id;
for (int i=1;i<index_width;i++)
{
int AW_index=ATAW_negative_index[current_index*index_width+i];
if(AW_index==-1)
break;
AW_index=AW_index+AW_base_index;
ATAW[H_index]=ATAW[H_index]-AW[AW_index];
}
}
}
template <typename Dtype>
__global__ void get_middle_line(const int n, Dtype* weight_sample, Dtype* middle_line, int height, int width) {
CUDA_KERNEL_LOOP(index, n) {
}
}
template <typename Dtype>
void WtfLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* data=Layer<Dtype>::feature_num[0]->mutable_cpu_data();
int feature_num=data[0];
int count1; int count2; int count3; int number_per_sample; float scale_factor; int col_num; int row_num; int num_per_channel1;
int num_per_channel2;
Dtype* sample_weight=Layer<Dtype>::sample_weight[0]->mutable_gpu_data();
Dtype* sample_weight_cpu=Layer<Dtype>::sample_weight[0]->mutable_cpu_data();
int sample_num=Layer<Dtype>::sample_weight[0]->width();
Dtype* index_cpu=Layer<Dtype>::index[0]->mutable_cpu_data();
Dtype* index_cpu1=Layer<Dtype>::index1[0]->mutable_cpu_data();
int index[feature_num];
int index1[feature_num];
for(int i=0;i<feature_num;i++)
{
index[i]=index_cpu[i];
index1[i]=index_cpu1[i];
}
Dtype* ifftshift_mask;Dtype* fftshift_mask; Dtype* weighted_sample_real;Dtype* weighted_sample_imag;
Dtype* sample_real; Dtype* sample_imag;
Dtype* KK_real;Dtype* KK_imag;
Dtype* tmp_real1;Dtype* tmp_imag1;
Dtype* hf_real;
Dtype* hf_imag; Dtype* laplace_real; Dtype* laplace_imag; Dtype* mask;
//××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
//××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
//××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
for(int blob_id=0;blob_id<feature_num; blob_id++)
{
// printf("the value of blob_id is %d\n\n",blob_id);
if(blob_id!=2)
{
ifftshift_mask=Layer<Dtype>::ifftshift_mask[0]->mutable_gpu_data();
fftshift_mask=Layer<Dtype>::fftshift_mask[0]->mutable_gpu_data();
weighted_sample_real=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data();
weighted_sample_imag=Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data();
sample_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
sample_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
KK_real=Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data();
KK_imag=Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data();
tmp_real1=Layer<Dtype>::first_layer_tmp_real1[blob_id]->mutable_gpu_data();
tmp_imag1=Layer<Dtype>::first_layer_tmp_imag1[blob_id]->mutable_gpu_data();
hf_real=Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data();
hf_imag=Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data();
laplace_real=Layer<Dtype>::laplace_real[blob_id]->mutable_gpu_data();
laplace_imag=Layer<Dtype>::laplace_imag[blob_id]->mutable_gpu_data();
col_num=Layer<Dtype>::first_layer_hf_real[blob_id]->height(); row_num=Layer<Dtype>::first_layer_hf_real[blob_id]->height(); num_per_channel1=row_num*(col_num/2+1);
num_per_channel2=row_num*col_num;
count1=this->blobs_[blob_id]->channels()*row_num*(col_num/2+1);//只考虑一半变量的反fftshift
count2=this->blobs_[blob_id]->channels()*row_num*col_num;
count3=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->count();
number_per_sample=this->blobs_[blob_id]->channels()*(col_num/2+1)*row_num;
ifftshift<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, num_per_channel1, ifftshift_mask, Layer<Dtype>::matlab_hf_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::matlab_hf_imag[blob_id]->mutable_gpu_data(), this->d_freq2,row_num, col_num,num_per_channel1);
ifft2(this->inverse_plan[blob_id],this->d_freq2,this->d_in2);
scale_factor=col_num*row_num;
scale_out_real<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2,this->d_in2,scale_factor);
//首先测试求得的d_in是对的
// top[0]->Reshape(1,Layer<Dtype>::KK_real[blob_id]->channels(),77,77);
//对laplace_real及laplace_imag进行清零
set_zeros<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, laplace_real);
set_zeros<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, laplace_imag);
// top[0]->Reshape(2,Layer<Dtype>::first_layer_tmp_real1[blob_id]->channels(),Layer<Dtype>::first_layer_tmp_real1[blob_id]->height(),Layer<Dtype>::first_layer_tmp_real1[blob_id]->width());
// printf("the size is %d %d\n",Layer<Dtype>::patch_mask[0]->height(),Layer<Dtype>::patch_mask[0]->width());
mask=Layer<Dtype>::binary_mask[0]->mutable_gpu_data();
add_mask<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2, num_per_channel2,mask, this->d_in2, this->d_in_tmp2);
//将结果写入变量写入H_masked并读出
caffe_copy(Layer<Dtype>::H_masked[blob_id]->count(),(Dtype*)this->d_in_tmp2,Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data());
fft2(this->forward_plan[blob_id],this->d_in_tmp2,this->d_freq2);
//接着计算hf与samplesf的内积操作
fftshift<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1,num_per_channel1,fftshift_mask,this->d_freq2,Layer<Dtype>::hf_tmp_real[blob_id]->mutable_gpu_data(),
Layer<Dtype>::hf_tmp_imag[blob_id]->mutable_gpu_data());
mask=Layer<Dtype>::binary_mask_adaptive[0]->mutable_gpu_data();
add_mask<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2, num_per_channel2,mask, this->d_in2, this->d_in_tmp2);
fft2(this->forward_plan[blob_id],this->d_in_tmp2,this->d_freq2);
//接着计算hf与samplesf的内积操作
fftshift<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1,num_per_channel1,fftshift_mask,this->d_freq2,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),
Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data());
set_zeros<<<CAFFE_GET_BLOCKS(count3), CAFFE_CUDA_NUM_THREADS>>>(count3,weighted_sample_real);
set_zeros<<<CAFFE_GET_BLOCKS(count3), CAFFE_CUDA_NUM_THREADS>>>(count3,weighted_sample_imag);
my_weight_sample_kernel<<<CAFFE_GET_BLOCKS(count3), CAFFE_CUDA_NUM_THREADS>>>(count3, sample_real, sample_imag,hf_real, hf_imag, weighted_sample_real,weighted_sample_imag,number_per_sample, num_per_channel1);
}
else
{
ifftshift_mask=Layer<Dtype>::ifftshift_mask[1]->mutable_gpu_data();
fftshift_mask=Layer<Dtype>::fftshift_mask[1]->mutable_gpu_data();
weighted_sample_real=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data();
weighted_sample_imag=Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data();
sample_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
sample_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
KK_real=Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data();
KK_imag=Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data();
tmp_real1=Layer<Dtype>::first_layer_tmp_real1[blob_id]->mutable_gpu_data();
tmp_imag1=Layer<Dtype>::first_layer_tmp_imag1[blob_id]->mutable_gpu_data();
hf_real=Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data();
hf_imag=Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data();
laplace_real=Layer<Dtype>::laplace_real[blob_id]->mutable_gpu_data();
laplace_imag=Layer<Dtype>::laplace_imag[blob_id]->mutable_gpu_data();
col_num=Layer<Dtype>::first_layer_hf_real[blob_id]->height(); row_num=Layer<Dtype>::first_layer_hf_real[blob_id]->height(); num_per_channel1=row_num*(col_num/2+1);
num_per_channel2=row_num*col_num;
count1=this->blobs_[blob_id]->channels()*row_num*(col_num/2+1);//只考虑一半变量的反fftshift
count2=this->blobs_[blob_id]->channels()*row_num*col_num;
count3=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->count();
number_per_sample=this->blobs_[blob_id]->channels()*(col_num/2+1)*row_num;
ifftshift<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, num_per_channel1, ifftshift_mask, Layer<Dtype>::matlab_hf_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::matlab_hf_imag[blob_id]->mutable_gpu_data(), this->d_freq3,row_num, col_num,num_per_channel1);
ifft2(this->inverse_plan[blob_id],this->d_freq3,this->d_in3);
scale_factor=col_num*row_num;
scale_out_real<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2,this->d_in3,scale_factor);
//首先测试求得的d_in是对的
// top[0]->Reshape(1,Layer<Dtype>::KK_real[blob_id]->channels(),77,77);
//对laplace_real及laplace_imag进行清零
set_zeros<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, laplace_real);
set_zeros<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, laplace_imag);
mask=Layer<Dtype>::binary_mask[1]->mutable_gpu_data();
add_mask<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2, num_per_channel2,mask, this->d_in3, this->d_in_tmp3);
//将结果写入变量写入H_masked并读出
caffe_copy(Layer<Dtype>::H_masked[blob_id]->count(),(Dtype*)this->d_in_tmp3,Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data());
fft2(this->forward_plan[blob_id],this->d_in_tmp3,this->d_freq3);
//接着计算hf与samplesf的内积操作
fftshift<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1,num_per_channel1,fftshift_mask,this->d_freq3,Layer<Dtype>::hf_tmp_real[blob_id]->mutable_gpu_data(),
Layer<Dtype>::hf_tmp_imag[blob_id]->mutable_gpu_data());
// top[0]->Reshape(2,Layer<Dtype>::first_layer_tmp_real1[blob_id]->channels(),Layer<Dtype>::first_layer_tmp_real1[blob_id]->height(),Layer<Dtype>::first_layer_tmp_real1[blob_id]->width());
mask=Layer<Dtype>::binary_mask_adaptive[1]->mutable_gpu_data();
add_mask<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2, num_per_channel2,mask, this->d_in3, this->d_in_tmp3);
fft2(this->forward_plan[blob_id],this->d_in_tmp3,this->d_freq3);
fftshift<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1,num_per_channel1,fftshift_mask,this->d_freq3,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),
Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data());
set_zeros<<<CAFFE_GET_BLOCKS(count3), CAFFE_CUDA_NUM_THREADS>>>(count3,weighted_sample_real);
set_zeros<<<CAFFE_GET_BLOCKS(count3), CAFFE_CUDA_NUM_THREADS>>>(count3,weighted_sample_imag);
my_weight_sample_kernel<<<CAFFE_GET_BLOCKS(count3), CAFFE_CUDA_NUM_THREADS>>>(count3, sample_real, sample_imag,hf_real, hf_imag, weighted_sample_real,weighted_sample_imag,number_per_sample,num_per_channel1);
}
}
/*
Dtype* inner_product_result;
Dtype* tmp;
inner_product_result=Layer<Dtype>::inner_product_result[0]->mutable_gpu_data();
//首先在此处计算weighted_sample_real和weighted_sample_image的内积
for(int blob_id=0;blob_id<feature_num;blob_id++)
{
number_per_sample=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->height()*Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->width();
for(int sample_id=0; sample_id<sample_num;sample_id++)
{
tmp=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data()+number_per_sample*sample_id;
if(sample_id==0&&blob_id==0)
{
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, 1, 1, number_per_sample,
(Dtype)2*sample_weight_cpu[sample_id] , tmp, tmp, (Dtype)0., inner_product_result);
}
else
{
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, 1, 1, number_per_sample,
(Dtype)2*sample_weight_cpu[sample_id], tmp, tmp, (Dtype)1, inner_product_result);
}
//接着加虚部
tmp=Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data()+number_per_sample*sample_id;
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, 1, 1, number_per_sample,
(Dtype)2*sample_weight_cpu[sample_id], tmp, tmp, (Dtype)1, inner_product_result);
}
}
*/
Dtype* L_index=Layer<Dtype>::L_index[0]->mutable_gpu_data();
set_zeros<<<CAFFE_GET_BLOCKS(Layer<Dtype>::sh_real[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(Layer<Dtype>::sh_real[0]->count(),Layer<Dtype>::sh_real[0]->mutable_gpu_data());
set_zeros<<<CAFFE_GET_BLOCKS(Layer<Dtype>::sh_imag[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(Layer<Dtype>::sh_imag[0]->count(),Layer<Dtype>::sh_imag[0]->mutable_gpu_data());
Dtype* sh_real=Layer<Dtype>::sh_real[0]->mutable_gpu_data();
Dtype* sh_imag=Layer<Dtype>::sh_imag[0]->mutable_gpu_data();
for(int blob_id=0;blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
caffe_gpu_add(Layer<Dtype>::sh_real[0]->count(),sh_real,Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data(),sh_real);
caffe_gpu_add(Layer<Dtype>::sh_imag[0]->count(),sh_imag,Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data(),sh_imag);
}
else
{
int count7=Layer<Dtype>::first_layer_weighted_sample_real[0]->count();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[0]->height()*(Layer<Dtype>::first_layer_hf_real[0]->width());
num_per_channel2=Layer<Dtype>::first_layer_hf_real[2]->height()*(Layer<Dtype>::first_layer_hf_real[2]->width());
//printf("the value is %d %d\n\n",num_per_channel1,num_per_channel2);
add_different_layers<<<CAFFE_GET_BLOCKS(count7), CAFFE_CUDA_NUM_THREADS>>>(count7,num_per_channel1, num_per_channel2, L_index, Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data(),Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data(), sh_real, sh_imag);
}
}
//接着利用sh_real及sh_imag接着计算输出
Dtype* L_index1=Layer<Dtype>::L_index1[0]->mutable_gpu_data();
for(int blob_id=0;blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
count1=this->blobs_[blob_id]->channels()*Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
number_per_sample=num_per_channel1*this->blobs_[blob_id]->channels();
KK_real=Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data(); KK_imag=Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data();
sample_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
sample_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
set_zeros<<<CAFFE_GET_BLOCKS(Layer<Dtype>::KK_real[blob_id]->count()), CAFFE_CUDA_NUM_THREADS>>>(Layer<Dtype>::KK_real[blob_id]->count(),KK_real);
set_zeros<<<CAFFE_GET_BLOCKS(Layer<Dtype>::KK_real[blob_id]->count()), CAFFE_CUDA_NUM_THREADS>>>(Layer<Dtype>::KK_real[blob_id]->count(),KK_imag);
weight_sample_kernel_second<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, sample_real, sample_imag, sh_real, sh_imag, KK_real,KK_imag,
sample_weight, number_per_sample,num_per_channel1, sample_num);
}
else
{
count1=this->blobs_[blob_id]->channels()*Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
count2=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->count();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
num_per_channel2=Layer<Dtype>::first_layer_hf_real[0]->height()*Layer<Dtype>::first_layer_hf_real[0]->width();
number_per_sample=num_per_channel1*this->blobs_[blob_id]->channels();
weighted_sample_real=Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data();
weighted_sample_imag=Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data();
KK_real=Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data(); KK_imag=Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data();
set_zeros<<<CAFFE_GET_BLOCKS(Layer<Dtype>::KK_real[blob_id]->count()), CAFFE_CUDA_NUM_THREADS>>>(Layer<Dtype>::KK_real[blob_id]->count(),KK_real);
set_zeros<<<CAFFE_GET_BLOCKS(Layer<Dtype>::KK_real[blob_id]->count()), CAFFE_CUDA_NUM_THREADS>>>(Layer<Dtype>::KK_real[blob_id]->count(),KK_imag);
sample_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
sample_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
crop_sample<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2,num_per_channel1,num_per_channel2, L_index1, sh_real, sh_imag, Layer<Dtype>::first_layer_weighted_sample_real[blob_id]->mutable_gpu_data(), Layer<Dtype>::first_layer_weighted_sample_imag[blob_id]->mutable_gpu_data());
//接着做第二次和样本的内积操作
weight_sample_kernel_second<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, sample_real, sample_imag, weighted_sample_real, weighted_sample_imag, KK_real,KK_imag,sample_weight, number_per_sample,num_per_channel1, sample_num);
}
}
int count_H;
int num_per_channel_real;
Dtype* App;
//计算ATAW_MC
Dtype* resolution_data=Layer<Dtype>::resolution_index[0]->mutable_cpu_data();
int resolution_index=resolution_data[0];
Dtype* ATAW_positive_index;
Dtype* ATAW_negative_index;
if(resolution_index==1)
{
//printf("we select this branch\n");
for(int blob_id=0; blob_id<feature_num;blob_id++)
{
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
num_per_channel_real=Layer<Dtype>::H_transpose[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->width();
Dtype* H_masked=Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data();
Dtype* H_transpose=Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data();
compupte_H_transpose<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
count_H=Layer<Dtype>::Ap[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_AW<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, H_transpose, Layer<Dtype>::Ap[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data(), Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::Ap[blob_id]->width(), num_per_channel_real);
//接着我们要去计算ATAW_MC
ATAW_positive_index=Layer<Dtype>::ATAW_positive_index[blob_id]->mutable_gpu_data();
ATAW_negative_index=Layer<Dtype>::ATAW_negative_index[blob_id]->mutable_gpu_data();
//if(blob_id==1)
// {
// top[0]->Reshape(1,1,Layer<Dtype>::AW[blob_id]->width(),1);
// caffe_copy(top[0]->count(),Layer<Dtype>::AW[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// }
//首先对要计算的变量清零
count_H=Layer<Dtype>::ATAW_MC[blob_id]->height()*Layer<Dtype>::ATAW_MC[blob_id]->width()*Layer<Dtype>::ATAW_MC[blob_id]->channels();
set_zeros<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data());
count_H=Layer<Dtype>::ATAW_positive_index[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_ATAW_positive<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, ATAW_positive_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::ATAW_positive_index[blob_id]->height(), Layer<Dtype>::ATAW_positive_index[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::ATAW_negative_index[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_ATAW_negative<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, ATAW_negative_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::ATAW_negative_index[blob_id]->height(), Layer<Dtype>::ATAW_negative_index[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
compupte_H_transpose<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
// if(blob_id==4)
// {
// top[0]->Reshape(1,Layer<Dtype>::H_transpose[blob_id]->channels(),Layer<Dtype>::H_transpose[blob_id]->height(),Layer<Dtype>::H_transpose[blob_id]->width());
// caffe_copy(top[0]->count(),Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// }
}
}
else
{
//printf("we select this branch\n");
for(int blob_id=0; blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
num_per_channel_real=Layer<Dtype>::H_transpose[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->width();
Dtype* H_masked=Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data();
Dtype* H_transpose=Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data();
compupte_H_transpose<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
count_H=Layer<Dtype>::Ap1[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_AW<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, H_transpose, Layer<Dtype>::Ap1[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW1[blob_id]->mutable_gpu_data(), Layer<Dtype>::Ap1[blob_id]->height(), Layer<Dtype>::Ap1[blob_id]->width(), num_per_channel_real);
//接着我们要去计算ATAW_MC
ATAW_positive_index=Layer<Dtype>::ATAW_positive_index1[blob_id]->mutable_gpu_data();
ATAW_negative_index=Layer<Dtype>::ATAW_negative_index1[blob_id]->mutable_gpu_data();
//首先对要计算的变量清零
count_H=Layer<Dtype>::ATAW_MC[blob_id]->height()*Layer<Dtype>::ATAW_MC[blob_id]->width()*Layer<Dtype>::ATAW_MC[blob_id]->channels();
set_zeros<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data());
count_H=Layer<Dtype>::ATAW_positive_index1[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_ATAW_positive<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, ATAW_positive_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW1[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap1[blob_id]->height(), Layer<Dtype>::ATAW_positive_index1[blob_id]->height(), Layer<Dtype>::ATAW_positive_index1[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::ATAW_negative_index1[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_ATAW_negative<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, ATAW_negative_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW1[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap1[blob_id]->height(), Layer<Dtype>::ATAW_negative_index1[blob_id]->height(), Layer<Dtype>::ATAW_negative_index1[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
compupte_H_transpose<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
// if(blob_id==4)
// {
// top[0]->Reshape(1,Layer<Dtype>::H_transpose[blob_id]->channels(),Layer<Dtype>::H_transpose[blob_id]->height(),Layer<Dtype>::H_transpose[blob_id]->width());
// caffe_copy(top[0]->count(),Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// }
}
else
{
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
num_per_channel_real=Layer<Dtype>::H_transpose[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->width();
Dtype* H_masked=Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data();
Dtype* H_transpose=Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data();
compupte_H_transpose<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
count_H=Layer<Dtype>::Ap[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_AW<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, H_transpose, Layer<Dtype>::Ap[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data(), Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::Ap[blob_id]->width(), num_per_channel_real);
//接着我们要去计算ATAW_MC
ATAW_positive_index=Layer<Dtype>::ATAW_positive_index[blob_id]->mutable_gpu_data();
ATAW_negative_index=Layer<Dtype>::ATAW_negative_index[blob_id]->mutable_gpu_data();
//if(blob_id==1)
// {
// top[0]->Reshape(1,1,Layer<Dtype>::AW[blob_id]->width(),1);
// caffe_copy(top[0]->count(),Layer<Dtype>::AW[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// }
//首先对要计算的变量清零
count_H=Layer<Dtype>::ATAW_MC[blob_id]->height()*Layer<Dtype>::ATAW_MC[blob_id]->width()*Layer<Dtype>::ATAW_MC[blob_id]->channels();
set_zeros<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data());
count_H=Layer<Dtype>::ATAW_positive_index[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_ATAW_positive<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, ATAW_positive_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::ATAW_positive_index[blob_id]->height(), Layer<Dtype>::ATAW_positive_index[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::ATAW_negative_index[blob_id]->height()*Layer<Dtype>::H_transpose[blob_id]->channels();
compute_ATAW_negative<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, ATAW_negative_index, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::AW[blob_id]->mutable_gpu_data() , Layer<Dtype>::Ap[blob_id]->height(), Layer<Dtype>::ATAW_negative_index[blob_id]->height(), Layer<Dtype>::ATAW_negative_index[blob_id]->width(), num_per_channel_real);
count_H=Layer<Dtype>::H_transpose[blob_id]->count();
compupte_H_transpose<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, Layer<Dtype>::ATAW_MC[blob_id]->mutable_gpu_data(), Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),
num_per_channel_real, Layer<Dtype>::H_masked[blob_id]->height(), Layer<Dtype>::H_masked[blob_id]->width());
}
}
}
//second layer加入到文件中
Dtype lambda1=0.1; Dtype lambda2=1; Dtype lambda3=0.0;
Dtype* data1=Layer<Dtype>::mu[0]->mutable_cpu_data();
Dtype* data2=Layer<Dtype>::eta[0]->mutable_cpu_data();
Dtype mu=data1[0]; Dtype eta=data2[0];
Dtype* data11=Layer<Dtype>::factor[0]->mutable_cpu_data();
Dtype factor=data11[0];
for(int blob_id=0;blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
count1=Layer<Dtype>::KK_real[blob_id]->count();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
ifftshift_mask=Layer<Dtype>::ifftshift_mask[0]->mutable_gpu_data();
fftshift_mask=Layer<Dtype>::fftshift_mask[0]->mutable_gpu_data();
row_num=Layer<Dtype>::KK_real[blob_id]->height(); col_num=row_num;
num_per_channel2=row_num*col_num;
count2=num_per_channel2*this->blobs_[blob_id]->channels();
ifftshift_second<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, num_per_channel1, ifftshift_mask, Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data(), this->d_freq2,row_num, col_num,num_per_channel1);
ifft2_second(this->inverse_plan[blob_id],this->d_freq2,this->d_in2);
scale_factor=col_num*row_num;
scale_out_real_second<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2,this->d_in2,scale_factor);
mask=Layer<Dtype>::binary_mask_adaptive[0]->mutable_gpu_data();
add_mask_second<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2, num_per_channel2,mask, this->d_in2, this->d_in_tmp2);
num_per_channel_real=Layer<Dtype>::H_masked[blob_id]->height()*Layer<Dtype>::H_masked[blob_id]->width();
count_H=Layer<Dtype>::H_masked[blob_id]->count();
mask=Layer<Dtype>::reg_window[0]->mutable_gpu_data();
add_reg_mask<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, num_per_channel_real, mask, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(),Layer<Dtype>::H_reged[blob_id]->mutable_gpu_data());
//将当前输出,H_masked与ATAW_MC加权求和
caffe_gpu_add1(count2,(Dtype*) this->d_in_tmp2,Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),(Dtype)1.0,mu, (Dtype*)this->d_in_tmp2);
caffe_gpu_add1(count2,(Dtype*) this->d_in_tmp2,Layer<Dtype>::H_reged[blob_id]->mutable_gpu_data(),(Dtype)1.0,eta, (Dtype*)this->d_in_tmp2);
fft2_second(this->forward_plan[blob_id],this->d_in_tmp2,this->d_freq2);
fftshift_second<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1,num_per_channel1,fftshift_mask,this->d_freq2,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data());
//将结果存入blob中
caffe_copy(this->blobs_[blob_id]->count()/2,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),this->blobs_[blob_id]->mutable_gpu_data());
caffe_copy(this->blobs_[blob_id]->count()/2,Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data(),this->blobs_[blob_id]->mutable_gpu_data()+this->blobs_[blob_id]->count()/2);
// printf("%d %d %d %d\n",this->blobs_[blob_id]->num(),this->blobs_[blob_id]->channels(),this->blobs_[blob_id]->height(),this->blobs_[blob_id]->width());
}
else
{
count1=Layer<Dtype>::KK_real[blob_id]->count();
num_per_channel1=Layer<Dtype>::first_layer_hf_real[blob_id]->height()*Layer<Dtype>::first_layer_hf_real[blob_id]->width();
ifftshift_mask=Layer<Dtype>::ifftshift_mask[1]->mutable_gpu_data();
fftshift_mask=Layer<Dtype>::fftshift_mask[1]->mutable_gpu_data();
row_num=Layer<Dtype>::KK_real[blob_id]->height(); col_num=row_num;
num_per_channel2=row_num*col_num;
count2=num_per_channel2*this->blobs_[blob_id]->channels();
ifftshift_second<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, num_per_channel1, ifftshift_mask, Layer<Dtype>::KK_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::KK_imag[blob_id]->mutable_gpu_data(), this->d_freq3,row_num, col_num,num_per_channel1);
ifft2_second(this->inverse_plan[blob_id],this->d_freq3,this->d_in3);
scale_factor=col_num*row_num;
scale_out_real_second<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2,this->d_in3,scale_factor);
mask=Layer<Dtype>::binary_mask_adaptive[1]->mutable_gpu_data();
add_mask_second<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2, num_per_channel2,mask, this->d_in3, this->d_in_tmp3);
num_per_channel_real=Layer<Dtype>::H_masked[blob_id]->height()*Layer<Dtype>::H_masked[blob_id]->width();
count_H=Layer<Dtype>::H_masked[blob_id]->count();
mask=Layer<Dtype>::reg_window[1]->mutable_gpu_data();
add_reg_mask<<<CAFFE_GET_BLOCKS(count_H), CAFFE_CUDA_NUM_THREADS>>>(count_H, num_per_channel_real, mask, Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(),Layer<Dtype>::H_reged[blob_id]->mutable_gpu_data());
//将当前输出,H_masked与ATAW_MC加权求和
caffe_gpu_add1(count2,(Dtype*)
this->d_in_tmp3,Layer<Dtype>::H_transpose[blob_id]->mutable_gpu_data(),(Dtype)1.0,(Dtype)factor*mu, (Dtype*)this->d_in_tmp3);
caffe_gpu_add1(count2,(Dtype*) this->d_in_tmp3,Layer<Dtype>::H_masked[blob_id]->mutable_gpu_data(),(Dtype)1.0, (Dtype)0, (Dtype*)this->d_in_tmp3);
fft2_second(this->forward_plan[blob_id],this->d_in_tmp3,this->d_freq3);
fftshift_second<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1,num_per_channel1,fftshift_mask,this->d_freq3,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data());
// printf("the frame_id is %d\n",frame_id);
caffe_copy(this->blobs_[blob_id]->count()/2,Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),this->blobs_[blob_id]->mutable_gpu_data());
caffe_copy(this->blobs_[blob_id]->count()/2,Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data(),this->blobs_[blob_id]->mutable_gpu_data()+this->blobs_[blob_id]->count()/2);
}
}
top[0]->Reshape(Layer<Dtype>::matlab_hf_real[0]->num(),Layer<Dtype>::matlab_hf_real[0]->channels(),Layer<Dtype>::matlab_hf_real[0]->height(),Layer<Dtype>::matlab_hf_real[0]->width());
//caffe_copy(top[0]->count(),Layer<Dtype>::matlab_hf_real[0]->mutable_gpu_data(),top[0]->mutable_gpu_data());
caffe_copy(top[0]->count(),Layer<Dtype>::first_layer_hf_real[0]->mutable_gpu_data(),top[0]->mutable_gpu_data());
Dtype* clear_memory_cpu=Layer<Dtype>::clear_memory[0]->mutable_cpu_data();
if(clear_memory_cpu[0]>0.5) //清空申请的memory
{
cudaFree(this->d_in1); cudaFree(this->d_in2); cudaFree(this->d_in3); cudaFree(this->d_in4);
cudaFree(this->d_in_tmp1); cudaFree(this->d_in_tmp2); cudaFree(this->d_in_tmp3); cudaFree(this->d_in_tmp4);
cudaFree(this->d_freq1); cudaFree(this->d_freq2); cudaFree(this->d_freq3); cudaFree(this->d_freq4);
cudaFree(this->d_in_total1); cudaFree(this->d_in_total2);
cudaFree(this->d_freq_total1); cudaFree(this->d_freq_total2);
cudaFree(this->d_in_sub_total1); cudaFree(this->d_in_sub_total2);
cudaFree(this->d_freq_sub_total1); cudaFree(this->d_freq_sub_total2);
cufftDestroy(this->forward_plan[0]); cufftDestroy(this->forward_plan[1]); cufftDestroy(this->forward_plan[2]); cufftDestroy(this->forward_plan[3]);
cufftDestroy(this->forward_plan_total[0]); cufftDestroy(this->forward_plan_total[1]);
cufftDestroy(this->forward_plan_sub_total[0]); cufftDestroy(this->forward_plan_sub_total[1]);
cufftDestroy(this->inverse_plan[0]); cufftDestroy(this->inverse_plan[1]); cufftDestroy(this->inverse_plan[2]); cufftDestroy(this->inverse_plan[3]);
cufftDestroy(this->inverse_plan_total[0]); cufftDestroy(this->inverse_plan_total[1]);
if(feature_num==5)
{ printf("the memory is released\n");
cudaFree(this->d_in5);
cudaFree(this->d_in_tmp5);
cudaFree(this->d_freq5);
cufftDestroy(this->forward_plan[4]);
cufftDestroy(this->inverse_plan[4]);
}
}
//Dtype* sample_real_cpu=Layer<Dtype>::first_layer_samplef_real[0]->mutable_cpu_data();
//Dtype* sample_imag_cpu=Layer<Dtype>::first_layer_samplef_imag[0]->mutable_cpu_data();
//Dtype* sh_real_cpu=Layer<Dtype>::sh_real[0]->mutable_cpu_data();
//Dtype* sh_imag_cpu=Layer<Dtype>::sh_imag[0]->mutable_cpu_data();
//接着我们试着将weighted_sample_real加到一起
//top[0]->Reshape(Layer<Dtype>::first_layer_weighted_sample_real[0]->num(),Layer<Dtype>::first_layer_weighted_sample_real[0]->channels(),Layer<Dtype>::first_layer_weighted_sample_real[0]->height(),
//Layer<Dtype>::first_layer_weighted_sample_real[0]->width());
//caffe_copy(top[0]->count(),sh_imag,top[0]->mutable_gpu_data());
//top[0]->Reshape(Layer<Dtype>::KK_real[2]->num(),Layer<Dtype>::KK_real[2]->channels(),Layer<Dtype>::KK_real[2]->height(),Layer<Dtype>::KK_real[2]->width());
//caffe_copy(top[0]->count(),Layer<Dtype>::KK_imag[2]->mutable_gpu_data(),top[0]->mutable_gpu_data());
// top[0]->Reshape(Layer<Dtype>::first_layer_weighted_sample_real[2]->num(),Layer<Dtype>::first_layer_weighted_sample_real[2]->channels(),Layer<Dtype>::first_layer_weighted_sample_real[2]->height(),Layer<Dtype>::first_layer_weighted_sample_real[2]->width());
//caffe_copy(top[0]->count(),Layer<Dtype>::first_layer_weighted_sample_real[2]->mutable_gpu_data(),top[0]->mutable_gpu_data());
}
template <typename Dtype>
void WtfLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
}
INSTANTIATE_LAYER_GPU_FUNCS(WtfLayer);
} // namespace caffe
|
33f9dc3cfd90f0f0569bfd2d14a2edc74fde6d8c.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <array/DataTypeUtils.h>
#include <array/PrimaryPointerDeallocator.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <execution/LaunchContext.h>
#include <helpers/ConstantHelper.h>
#include <helpers/logger.h>
#include <helpers/shape.h>
#include <ops/specials.h>
#define CONSTANT_LIMIT 49152
__constant__ char deviceConstantMemory[CONSTANT_LIMIT];
namespace sd {
static void *getConstantSpace() {
sd::Pointer dConstAddr;
auto dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) throw cuda_exception::build("hipGetSymbolAddress(...) failed", dZ);
return dConstAddr;
}
int ConstantHelper::getCurrentDevice() { return AffinityManager::currentDeviceId(); }
int ConstantHelper::getNumberOfDevices() { return AffinityManager::numberOfDevices(); }
ConstantHelper::ConstantHelper() {
auto initialDevice = getCurrentDevice();
auto numDevices = getNumberOfDevices();
_devicePointers.resize(numDevices);
_deviceOffsets.resize(numDevices);
_cache.resize(numDevices);
_counters.resize(numDevices);
// filling all pointers
for (int e = 0; e < numDevices; e++) {
auto res = hipSetDevice(e);
if (res != 0) throw cuda_exception::build("hipSetDevice failed", res);
auto constant = getConstantSpace();
SD_MAP_IMPL<ConstantDescriptor, ConstantHolder *> devCache;
_devicePointers[e] = constant;
_deviceOffsets[e] = 0;
_cache[e] = devCache;
_counters[e] = 0L;
}
//
auto res = hipSetDevice(initialDevice);
if (res != 0) throw cuda_exception::build("Final hipSetDevice failed", res);
}
ConstantHelper::~ConstantHelper() {
for (const auto &v : _cache) {
for (const auto &c : v) {
delete c.second;
}
}
}
ConstantHelper &ConstantHelper::getInstance() {
static ConstantHelper instance;
return instance;
}
void *ConstantHelper::replicatePointer(void *src, size_t numBytes, memory::Workspace *workspace) {
std::lock_guard<std::mutex> lock(_mutex);
auto deviceId = getCurrentDevice();
sd::Pointer constantPtr = nullptr;
sd::LongType constantOffset = 0L;
if (_devicePointers[deviceId] == 0) {
auto constant = getConstantSpace();
// filling default ptr, which will be 0 probably
_devicePointers[deviceId] = constant;
_deviceOffsets[deviceId] = 0;
constantPtr = constant;
} else {
constantPtr = _devicePointers[deviceId];
constantOffset = _deviceOffsets[deviceId];
}
if (constantOffset + numBytes >= CONSTANT_LIMIT) {
int8_t *ptr = nullptr;
ALLOCATE_SPECIAL(ptr, workspace, numBytes, int8_t);
auto res = hipMemcpy(ptr, src, numBytes, hipMemcpyHostToDevice);
if (res != 0) throw cuda_exception::build("hipMemcpy failed", res);
return ptr;
} else {
auto originalBytes = numBytes;
auto rem = numBytes % 8;
if (rem != 0) numBytes += 8 - rem;
_deviceOffsets[deviceId] += numBytes;
auto res = hipMemcpyToSymbol(deviceConstantMemory, const_cast<const void *>(src), originalBytes, constantOffset,
hipMemcpyHostToDevice);
if (res != 0) throw cuda_exception::build("hipMemcpyToSymbol failed", res);
return reinterpret_cast<int8_t *>(constantPtr) + constantOffset;
}
}
ConstantDataBuffer *ConstantHelper::constantBuffer(const ConstantDescriptor &descriptor, sd::DataType dataType) {
const auto deviceId = getCurrentDevice();
// all cache modifications are synchronous
_mutexHolder.lock();
if (_cache[deviceId].count(descriptor) == 0) {
_cache[deviceId][descriptor] = new ConstantHolder();
}
auto holder = _cache[deviceId][descriptor];
// release cache lock
_mutexHolder.unlock();
ConstantDataBuffer *result;
// access to this holder instance is synchronous
std::lock_guard<std::mutex> lock(*holder->mutex());
if (holder->hasBuffer(dataType)) {
result = holder->getConstantDataBuffer(dataType);
} else {
auto numBytes = descriptor.length() * DataTypeUtils::sizeOf(dataType);
auto cbuff = std::make_shared<PointerWrapper>(new int8_t[numBytes], std::make_shared<PrimaryPointerDeallocator>());
_counters[deviceId] += numBytes;
// create buffer with this dtype
if (descriptor.isFloat()) {
BUILD_DOUBLE_SELECTOR(
sd::DataType::DOUBLE, dataType, sd::SpecialTypeConverter::convertGeneric,
(nullptr, const_cast<double *>(descriptor.floatValues().data()), descriptor.length(), cbuff->pointer()),
(sd::DataType::DOUBLE, double), SD_COMMON_TYPES);
} else if (descriptor.isInteger()) {
BUILD_DOUBLE_SELECTOR(sd::DataType::INT64, dataType, sd::SpecialTypeConverter::convertGeneric,
(nullptr, const_cast<sd::LongType *>(descriptor.integerValues().data()),
descriptor.length(), cbuff->pointer()),
(sd::DataType::INT64, sd::LongType), SD_COMMON_TYPES);
}
// we don't have deallocator here.
// TODO: we probably want to make use deallocator here, if we're not using constant memory
auto dbuff = std::make_shared<PointerWrapper>(
replicatePointer(cbuff->pointer(), descriptor.length() * DataTypeUtils::sizeOf(dataType)));
ConstantDataBuffer dataBuffer(cbuff, dbuff, descriptor.length(), dataType);
holder->addBuffer(dataBuffer, dataType);
result = holder->getConstantDataBuffer(dataType);
}
return result;
}
sd::LongType ConstantHelper::getCachedAmount(int deviceId) {
int numDevices = getNumberOfDevices();
if (deviceId > numDevices || deviceId < 0)
return 0L;
else
return _counters[deviceId];
}
} // namespace sd
| 33f9dc3cfd90f0f0569bfd2d14a2edc74fde6d8c.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <array/DataTypeUtils.h>
#include <array/PrimaryPointerDeallocator.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <execution/LaunchContext.h>
#include <helpers/ConstantHelper.h>
#include <helpers/logger.h>
#include <helpers/shape.h>
#include <ops/specials.h>
#define CONSTANT_LIMIT 49152
__constant__ char deviceConstantMemory[CONSTANT_LIMIT];
namespace sd {
static void *getConstantSpace() {
sd::Pointer dConstAddr;
auto dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) throw cuda_exception::build("cudaGetSymbolAddress(...) failed", dZ);
return dConstAddr;
}
int ConstantHelper::getCurrentDevice() { return AffinityManager::currentDeviceId(); }
int ConstantHelper::getNumberOfDevices() { return AffinityManager::numberOfDevices(); }
ConstantHelper::ConstantHelper() {
auto initialDevice = getCurrentDevice();
auto numDevices = getNumberOfDevices();
_devicePointers.resize(numDevices);
_deviceOffsets.resize(numDevices);
_cache.resize(numDevices);
_counters.resize(numDevices);
// filling all pointers
for (int e = 0; e < numDevices; e++) {
auto res = cudaSetDevice(e);
if (res != 0) throw cuda_exception::build("cudaSetDevice failed", res);
auto constant = getConstantSpace();
SD_MAP_IMPL<ConstantDescriptor, ConstantHolder *> devCache;
_devicePointers[e] = constant;
_deviceOffsets[e] = 0;
_cache[e] = devCache;
_counters[e] = 0L;
}
//
auto res = cudaSetDevice(initialDevice);
if (res != 0) throw cuda_exception::build("Final cudaSetDevice failed", res);
}
ConstantHelper::~ConstantHelper() {
for (const auto &v : _cache) {
for (const auto &c : v) {
delete c.second;
}
}
}
ConstantHelper &ConstantHelper::getInstance() {
static ConstantHelper instance;
return instance;
}
void *ConstantHelper::replicatePointer(void *src, size_t numBytes, memory::Workspace *workspace) {
std::lock_guard<std::mutex> lock(_mutex);
auto deviceId = getCurrentDevice();
sd::Pointer constantPtr = nullptr;
sd::LongType constantOffset = 0L;
if (_devicePointers[deviceId] == 0) {
auto constant = getConstantSpace();
// filling default ptr, which will be 0 probably
_devicePointers[deviceId] = constant;
_deviceOffsets[deviceId] = 0;
constantPtr = constant;
} else {
constantPtr = _devicePointers[deviceId];
constantOffset = _deviceOffsets[deviceId];
}
if (constantOffset + numBytes >= CONSTANT_LIMIT) {
int8_t *ptr = nullptr;
ALLOCATE_SPECIAL(ptr, workspace, numBytes, int8_t);
auto res = cudaMemcpy(ptr, src, numBytes, cudaMemcpyHostToDevice);
if (res != 0) throw cuda_exception::build("cudaMemcpy failed", res);
return ptr;
} else {
auto originalBytes = numBytes;
auto rem = numBytes % 8;
if (rem != 0) numBytes += 8 - rem;
_deviceOffsets[deviceId] += numBytes;
auto res = cudaMemcpyToSymbol(deviceConstantMemory, const_cast<const void *>(src), originalBytes, constantOffset,
cudaMemcpyHostToDevice);
if (res != 0) throw cuda_exception::build("cudaMemcpyToSymbol failed", res);
return reinterpret_cast<int8_t *>(constantPtr) + constantOffset;
}
}
ConstantDataBuffer *ConstantHelper::constantBuffer(const ConstantDescriptor &descriptor, sd::DataType dataType) {
const auto deviceId = getCurrentDevice();
// all cache modifications are synchronous
_mutexHolder.lock();
if (_cache[deviceId].count(descriptor) == 0) {
_cache[deviceId][descriptor] = new ConstantHolder();
}
auto holder = _cache[deviceId][descriptor];
// release cache lock
_mutexHolder.unlock();
ConstantDataBuffer *result;
// access to this holder instance is synchronous
std::lock_guard<std::mutex> lock(*holder->mutex());
if (holder->hasBuffer(dataType)) {
result = holder->getConstantDataBuffer(dataType);
} else {
auto numBytes = descriptor.length() * DataTypeUtils::sizeOf(dataType);
auto cbuff = std::make_shared<PointerWrapper>(new int8_t[numBytes], std::make_shared<PrimaryPointerDeallocator>());
_counters[deviceId] += numBytes;
// create buffer with this dtype
if (descriptor.isFloat()) {
BUILD_DOUBLE_SELECTOR(
sd::DataType::DOUBLE, dataType, sd::SpecialTypeConverter::convertGeneric,
(nullptr, const_cast<double *>(descriptor.floatValues().data()), descriptor.length(), cbuff->pointer()),
(sd::DataType::DOUBLE, double), SD_COMMON_TYPES);
} else if (descriptor.isInteger()) {
BUILD_DOUBLE_SELECTOR(sd::DataType::INT64, dataType, sd::SpecialTypeConverter::convertGeneric,
(nullptr, const_cast<sd::LongType *>(descriptor.integerValues().data()),
descriptor.length(), cbuff->pointer()),
(sd::DataType::INT64, sd::LongType), SD_COMMON_TYPES);
}
// we don't have deallocator here.
// TODO: we probably want to make use deallocator here, if we're not using constant memory
auto dbuff = std::make_shared<PointerWrapper>(
replicatePointer(cbuff->pointer(), descriptor.length() * DataTypeUtils::sizeOf(dataType)));
ConstantDataBuffer dataBuffer(cbuff, dbuff, descriptor.length(), dataType);
holder->addBuffer(dataBuffer, dataType);
result = holder->getConstantDataBuffer(dataType);
}
return result;
}
sd::LongType ConstantHelper::getCachedAmount(int deviceId) {
int numDevices = getNumberOfDevices();
if (deviceId > numDevices || deviceId < 0)
return 0L;
else
return _counters[deviceId];
}
} // namespace sd
|
f5cf2e7899ca237eabc9346198e0a5b194df49bb.hip | // !!! This is a file automatically generated by hipify!!!
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define N 512
//
// kernel routine
//
__global__ void add_threads(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
//
// main code
//
//int main(int argc, char **argv)
int main(void)
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N * sizeof(int);
time_t t;
printf("DEBUG: Size of 'int' type: %lu\n", sizeof(int));
srand((unsigned) time(&t));
// initialise card
//cutilDeviceInit(argc, argv);
// allocate device copies of a, b, c
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
for (int i=0; i < N; ++i)
{
#if 0
a[i] = rand()%N;
b[i] = rand()%N;
#else
a[i] = 5;
b[i] = 5;
#endif
}
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",0, a[0], 0, b[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",N-1, a[N-1], N-1, b[N-1]);
// copy inputs to device
hipMemcpy( dev_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, size, hipMemcpyHostToDevice );
printf("INFO: Launching CUDA kernel: add_block with blocks=%d, threads=%d...", 1, N);
// launch add() kernel with N parallel blocks
hipLaunchKernelGGL(( add_threads), dim3(1), dim3(N) , 0, 0, dev_a, dev_b, dev_c );
printf(" Done\n");
// copy device result back to host copy of c
hipMemcpy( c, dev_c, size, hipMemcpyDeviceToHost );
#if 1
for (int i=0; i<N; i++)
{
if (fabs(a[i]+b[i]-c[i]) > 1e-5)
{
printf("ERROR: *** FAILED ***\n");
break;
} else
{
if (i == (N -1))
printf("INFO: PASS\n");
}
//printf("Checking results %d\n", a[i]+b[i]-c[i]);
}
#endif
#if 1
printf("DEBUG: a[0]=%d, b[0]=%d, c[0]=%d\n", a[0], b[0], c[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d, c[%d]=%d\n", N-1, a[N-1], N-1, b[N-1], N-1, c[N-1]);
//printf("Checking results %d\n", a[0]+b[0]-c[0]);
#endif
free( a );
free( b );
free( c );
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
hipDeviceReset();
return 0;
}
| f5cf2e7899ca237eabc9346198e0a5b194df49bb.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cuda_runtime.h>
#define N 512
//
// kernel routine
//
__global__ void add_threads(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
//
// main code
//
//int main(int argc, char **argv)
int main(void)
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N * sizeof(int);
time_t t;
printf("DEBUG: Size of 'int' type: %lu\n", sizeof(int));
srand((unsigned) time(&t));
// initialise card
//cutilDeviceInit(argc, argv);
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
for (int i=0; i < N; ++i)
{
#if 0
a[i] = rand()%N;
b[i] = rand()%N;
#else
a[i] = 5;
b[i] = 5;
#endif
}
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",0, a[0], 0, b[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",N-1, a[N-1], N-1, b[N-1]);
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
printf("INFO: Launching CUDA kernel: add_block with blocks=%d, threads=%d...", 1, N);
// launch add() kernel with N parallel blocks
add_threads<<< 1, N >>>( dev_a, dev_b, dev_c );
printf(" Done\n");
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost );
#if 1
for (int i=0; i<N; i++)
{
if (fabs(a[i]+b[i]-c[i]) > 1e-5)
{
printf("ERROR: *** FAILED ***\n");
break;
} else
{
if (i == (N -1))
printf("INFO: PASS\n");
}
//printf("Checking results %d\n", a[i]+b[i]-c[i]);
}
#endif
#if 1
printf("DEBUG: a[0]=%d, b[0]=%d, c[0]=%d\n", a[0], b[0], c[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d, c[%d]=%d\n", N-1, a[N-1], N-1, b[N-1], N-1, c[N-1]);
//printf("Checking results %d\n", a[0]+b[0]-c[0]);
#endif
free( a );
free( b );
free( c );
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
cudaDeviceReset();
return 0;
}
|
61cdd34f1a4d667c949b4d0d58b70175368a4246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <stdio.h>
#define LENGTH 10
#define TIME 5
#define STEP_X 0.05
#define STEP_T 0.001
#ifdef WIN32
#define GNUPLOT_NAME "pgnuplot -persist"
#else
#define GNUPLOT_NAME "gnuplot -persist"
#endif
static double *hostData = nullptr, *hostBuffer = nullptr;
static double *devData = nullptr, *devBuffer = nullptr;
static void _free() {
if (::hostData != nullptr)
std::free((void *)::hostData);
if (::hostBuffer != nullptr)
std::free((void *)::hostBuffer);
if (::devData != nullptr)
hipFree((void *)::devData);
if (::devBuffer != nullptr)
hipFree((void *)::devBuffer);
}
/*
* CUDA errors catching block
*/
static void _checkCudaErrorAux(const char *, unsigned, const char *, hipError_t);
#define cudaCheck(value) _checkCudaErrorAux(__FILE__, __LINE__, #value, value)
static void _checkCudaErrorAux(const char *file, unsigned line, const char *statement, hipError_t err) {
if (err == hipSuccess)
return;
std::cerr << statement << " returned " << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl;
system("pause");
_free();
exit(1);
}
/*
* CUDA kernel block
*/
__global__ void gpuWork(double *data, double *buffer, const std::size_t size,
const double stepX, const double stepT, const double maxTime) {
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
for (auto i = 0.0; i < maxTime; i += stepT) {
buffer[size - 1] = 5.0;
if (idx < size - 1 && idx > 0)
buffer[idx] = ((data[idx + 1] - 2.0 * data[idx] + data[idx - 1]) * stepT / (stepX * stepX)) + data[idx];
__syncthreads();
if (idx < size)
data[idx] = buffer[idx];
__syncthreads();
}
}
__global__ void gpuWorkOptimized(double * __restrict__ data, double * __restrict__ buffer, const std::size_t size,
const double stepX, const double stepT, const double maxTime) {
for (auto i = 0.0; i < maxTime; i += stepT) {
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
buffer[size - 1] = 5.0;
if (idx < size - 1 && idx > 0)
buffer[idx] = ((data[idx + 1] - 2.0 * data[idx] + data[idx - 1]) * stepT / (stepX * stepX)) + data[idx];
__syncthreads();
if (idx < size)
data[idx] = buffer[idx];
__syncthreads();
}
}
/*
* CPU block
*/
#pragma omp parallel
void cpuWork(double *data, double *buffer, const std::size_t size,
const double stepX, const double stepT, const double maxTime) {
#pragma omp for
for (auto i = 0.0; i < maxTime; i += stepT) {
buffer[size - 1] = 5.0;
#pragma omp for
for (auto i = 1; i < size - 1; i++)
buffer[i] = ((data[i + 1] - 2.0 * data[i] + data[i - 1]) * stepT / (stepX * stepX)) + data[i];
std::copy(buffer, buffer + size, data);
}
}
/*
* Init
*/
int init(std::size_t size) {
::hostData = (double *)std::calloc(size, sizeof(double));
if (!::hostData)
return 1;
::hostBuffer = (double *)std::calloc(size, sizeof(double));
if (!::hostData)
return 1;
cudaCheck(hipMalloc((void **)&::devData, size * sizeof(double)));
cudaCheck(hipMalloc((void **)&::devBuffer, size * sizeof(double)));
std::memset((void *)::hostData, 0, size);
std::memset((void *)::hostBuffer, 0, size);
return 0;
}
/*
* Print
*/
void consolePrint(const double *result, std::size_t size, double stepX) {
auto x = 0.0;
for (auto i = 0; i < size; i++) {
std::cout << std::setw(6) << std::left << x << std::right << result[i] << std::endl;
x += stepX;
}
}
int gnuplotPrint(const double *result, std::size_t size, double stepX) {
FILE *gpPipe = nullptr;
#if defined _WIN32
gpPipe = _popen(GNUPLOT_NAME, "w");
#else
gpPipe = popen(GNUPLOT_NAME, "w");
#endif
if (gpPipe == NULL)
return 1;
fprintf(gpPipe, "plot '-'\n");
auto x = 0.0;
for (auto i = 0; i < size; i++) {
std::cout << x << " " << hostData[i] << std::endl;
fprintf(gpPipe, "%f\t%f\n", x, hostData[i]);
x += stepX;
}
std::cout << std::endl;
fprintf(gpPipe, "%s\n", "e");
fflush(gpPipe);
// Waiting for user key input
std::cin.clear();
std::cin.ignore(std::cin.rdbuf()->in_avail());
std::cin.get();
#if defined _WIN32
_pclose(gpPipe);
#else
pclose(gpPipe);
#endif
return 0;
}
int filePrint(const char *filename, const double *result, const std::size_t size, double stepX) {
std::ofstream ofs(filename, std::ios_base::out | std::ios_base::trunc);
if (!ofs.is_open())
return 1;
ofs << "plot '-'" << std::endl;
auto x = 0.0;
for (auto i = 0; i < size; i++) {
ofs << x << "\t" << result[i] << std::endl;
x += stepX;
}
ofs << "e" << std::endl;
return 0;
}
/*
* Main
*/
int main() {
const size_t length = LENGTH;
const double time = TIME;
const double stepX = STEP_X; // Length (x coord) increment
const double stepT = STEP_T; // Time increment
const std::size_t nPoints = static_cast<std::size_t>(length / stepX);
const std::size_t size = nPoints * sizeof(double);
if (init(nPoints)) {
_free();
return 1;
}
cudaCheck(hipMemcpy(devData, hostData, size, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(devBuffer, hostBuffer, size, hipMemcpyHostToDevice));
/*
* CPU text
*/
auto beginTime = std::chrono::steady_clock::now();
cpuWork(hostData, hostBuffer, nPoints, stepX, stepT, time);
auto chronoTimeCPU = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - beginTime).count();
if (filePrint("cpu_plot.txt", hostData, nPoints, stepX)) {
_free();
return 1;
}
/*
* GPU test
*/
dim3 nThreads(256);
dim3 nBlocks(1);
/*
* Default kernel function
*/
beginTime = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( gpuWork) , dim3(nBlocks), dim3(nThreads), 0, 0, devData, devBuffer, nPoints, stepX, stepT, time);
auto chronoTimeGPU = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - beginTime).count();
// Result to array
cudaCheck(hipMemcpy(hostData, devData, size, hipMemcpyDeviceToHost));
/*
* Kernel function optimization
*/
hipFuncSetCacheConfig(gpuWorkOptimized, hipFuncCachePreferL1);
beginTime = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( gpuWorkOptimized) , dim3(nBlocks), dim3(nThreads), 0, 0, devData, devBuffer, nPoints, stepX, stepT, time);
auto chronoTimeGPUOp = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - beginTime).count();
/*
* Output
*/
consolePrint(hostData, nPoints, stepX);
std::cout << std::endl << std::endl;
/*if (gnuplotPrint(hostData, nPoints, stepX)) {
_free();
return 1;
}*/
if (filePrint("gpu_plot.txt", hostData, nPoints, stepX)) {
_free();
return 1;
}
std::cout << std::setw(20) << std::left << "CPU time " << chronoTimeCPU << " us" << std::endl;
std::cout << std::setw(20) << std::left << "GPU time " << chronoTimeGPU << " us" << std::endl;
std::cout << std::setw(20) << std::left << "GPU optimized time " << chronoTimeGPUOp << " us" << std::endl;
/*
* Memory free
*/
cudaCheck(hipFree((void *)devData));
cudaCheck(hipFree((void *)devBuffer));
_free();
system("pause");
return 0;
}
| 61cdd34f1a4d667c949b4d0d58b70175368a4246.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <stdio.h>
#define LENGTH 10
#define TIME 5
#define STEP_X 0.05
#define STEP_T 0.001
#ifdef WIN32
#define GNUPLOT_NAME "pgnuplot -persist"
#else
#define GNUPLOT_NAME "gnuplot -persist"
#endif
static double *hostData = nullptr, *hostBuffer = nullptr;
static double *devData = nullptr, *devBuffer = nullptr;
static void _free() {
if (::hostData != nullptr)
std::free((void *)::hostData);
if (::hostBuffer != nullptr)
std::free((void *)::hostBuffer);
if (::devData != nullptr)
cudaFree((void *)::devData);
if (::devBuffer != nullptr)
cudaFree((void *)::devBuffer);
}
/*
* CUDA errors catching block
*/
static void _checkCudaErrorAux(const char *, unsigned, const char *, cudaError_t);
#define cudaCheck(value) _checkCudaErrorAux(__FILE__, __LINE__, #value, value)
static void _checkCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
std::cerr << statement << " returned " << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl;
system("pause");
_free();
exit(1);
}
/*
* CUDA kernel block
*/
__global__ void gpuWork(double *data, double *buffer, const std::size_t size,
const double stepX, const double stepT, const double maxTime) {
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
for (auto i = 0.0; i < maxTime; i += stepT) {
buffer[size - 1] = 5.0;
if (idx < size - 1 && idx > 0)
buffer[idx] = ((data[idx + 1] - 2.0 * data[idx] + data[idx - 1]) * stepT / (stepX * stepX)) + data[idx];
__syncthreads();
if (idx < size)
data[idx] = buffer[idx];
__syncthreads();
}
}
__global__ void gpuWorkOptimized(double * __restrict__ data, double * __restrict__ buffer, const std::size_t size,
const double stepX, const double stepT, const double maxTime) {
for (auto i = 0.0; i < maxTime; i += stepT) {
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
buffer[size - 1] = 5.0;
if (idx < size - 1 && idx > 0)
buffer[idx] = ((data[idx + 1] - 2.0 * data[idx] + data[idx - 1]) * stepT / (stepX * stepX)) + data[idx];
__syncthreads();
if (idx < size)
data[idx] = buffer[idx];
__syncthreads();
}
}
/*
* CPU block
*/
#pragma omp parallel
void cpuWork(double *data, double *buffer, const std::size_t size,
const double stepX, const double stepT, const double maxTime) {
#pragma omp for
for (auto i = 0.0; i < maxTime; i += stepT) {
buffer[size - 1] = 5.0;
#pragma omp for
for (auto i = 1; i < size - 1; i++)
buffer[i] = ((data[i + 1] - 2.0 * data[i] + data[i - 1]) * stepT / (stepX * stepX)) + data[i];
std::copy(buffer, buffer + size, data);
}
}
/*
* Init
*/
int init(std::size_t size) {
::hostData = (double *)std::calloc(size, sizeof(double));
if (!::hostData)
return 1;
::hostBuffer = (double *)std::calloc(size, sizeof(double));
if (!::hostData)
return 1;
cudaCheck(cudaMalloc((void **)&::devData, size * sizeof(double)));
cudaCheck(cudaMalloc((void **)&::devBuffer, size * sizeof(double)));
std::memset((void *)::hostData, 0, size);
std::memset((void *)::hostBuffer, 0, size);
return 0;
}
/*
* Print
*/
void consolePrint(const double *result, std::size_t size, double stepX) {
auto x = 0.0;
for (auto i = 0; i < size; i++) {
std::cout << std::setw(6) << std::left << x << std::right << result[i] << std::endl;
x += stepX;
}
}
int gnuplotPrint(const double *result, std::size_t size, double stepX) {
FILE *gpPipe = nullptr;
#if defined _WIN32
gpPipe = _popen(GNUPLOT_NAME, "w");
#else
gpPipe = popen(GNUPLOT_NAME, "w");
#endif
if (gpPipe == NULL)
return 1;
fprintf(gpPipe, "plot '-'\n");
auto x = 0.0;
for (auto i = 0; i < size; i++) {
std::cout << x << " " << hostData[i] << std::endl;
fprintf(gpPipe, "%f\t%f\n", x, hostData[i]);
x += stepX;
}
std::cout << std::endl;
fprintf(gpPipe, "%s\n", "e");
fflush(gpPipe);
// Waiting for user key input
std::cin.clear();
std::cin.ignore(std::cin.rdbuf()->in_avail());
std::cin.get();
#if defined _WIN32
_pclose(gpPipe);
#else
pclose(gpPipe);
#endif
return 0;
}
int filePrint(const char *filename, const double *result, const std::size_t size, double stepX) {
std::ofstream ofs(filename, std::ios_base::out | std::ios_base::trunc);
if (!ofs.is_open())
return 1;
ofs << "plot '-'" << std::endl;
auto x = 0.0;
for (auto i = 0; i < size; i++) {
ofs << x << "\t" << result[i] << std::endl;
x += stepX;
}
ofs << "e" << std::endl;
return 0;
}
/*
* Main
*/
int main() {
const size_t length = LENGTH;
const double time = TIME;
const double stepX = STEP_X; // Length (x coord) increment
const double stepT = STEP_T; // Time increment
const std::size_t nPoints = static_cast<std::size_t>(length / stepX);
const std::size_t size = nPoints * sizeof(double);
if (init(nPoints)) {
_free();
return 1;
}
cudaCheck(cudaMemcpy(devData, hostData, size, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(devBuffer, hostBuffer, size, cudaMemcpyHostToDevice));
/*
* CPU text
*/
auto beginTime = std::chrono::steady_clock::now();
cpuWork(hostData, hostBuffer, nPoints, stepX, stepT, time);
auto chronoTimeCPU = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - beginTime).count();
if (filePrint("cpu_plot.txt", hostData, nPoints, stepX)) {
_free();
return 1;
}
/*
* GPU test
*/
dim3 nThreads(256);
dim3 nBlocks(1);
/*
* Default kernel function
*/
beginTime = std::chrono::steady_clock::now();
gpuWork <<<nBlocks, nThreads>>> (devData, devBuffer, nPoints, stepX, stepT, time);
auto chronoTimeGPU = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - beginTime).count();
// Result to array
cudaCheck(cudaMemcpy(hostData, devData, size, cudaMemcpyDeviceToHost));
/*
* Kernel function optimization
*/
cudaFuncSetCacheConfig(gpuWorkOptimized, cudaFuncCachePreferL1);
beginTime = std::chrono::steady_clock::now();
gpuWorkOptimized <<<nBlocks, nThreads>>> (devData, devBuffer, nPoints, stepX, stepT, time);
auto chronoTimeGPUOp = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - beginTime).count();
/*
* Output
*/
consolePrint(hostData, nPoints, stepX);
std::cout << std::endl << std::endl;
/*if (gnuplotPrint(hostData, nPoints, stepX)) {
_free();
return 1;
}*/
if (filePrint("gpu_plot.txt", hostData, nPoints, stepX)) {
_free();
return 1;
}
std::cout << std::setw(20) << std::left << "CPU time " << chronoTimeCPU << " us" << std::endl;
std::cout << std::setw(20) << std::left << "GPU time " << chronoTimeGPU << " us" << std::endl;
std::cout << std::setw(20) << std::left << "GPU optimized time " << chronoTimeGPUOp << " us" << std::endl;
/*
* Memory free
*/
cudaCheck(cudaFree((void *)devData));
cudaCheck(cudaFree((void *)devBuffer));
_free();
system("pause");
return 0;
}
|
ec1a106473b0cfa042c956fd86262a8c35042df2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <inttypes.h>
#include <stdio.h>
#include "dada_cuda.h"
#include "mopsr_cuda.h"
#define BEAMS_PER_LOOP 8
#define WARP_SIZE 32
#define NWARPS_PER_BLOCK 32
#define NSUM 32
//#define _GDEBUG 1
#ifdef __CUDA_ARCH__
#if (__CUDA_ARCH__ >= 300)
#define HAVE_SHFL
#else
#define NO_SHFL
#endif
#endif
// large parts of these kernels require SHFL instructions that are
// only available in sm_30 (kepler) or greater
// each thread loads 16 x 2bytes into shm
__global__ void input_transpose_TFS_to_FST (
const int16_t * input, int16_t * output,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned in_block_stride, const unsigned nsamp_per_block,
const unsigned out_chanant_stride)
{
extern __shared__ int16_t sdata[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
const unsigned offset = (warp_num * (WARP_SIZE * nval_per_thread)) + warp_idx;
unsigned in_idx = (blockIdx.x * blockDim.x * nval_per_thread) + offset;
unsigned sin_idx = offset;
unsigned ival;
for (ival=0; ival<nval_per_thread; ival++)
{
if (in_idx < nval * nval_per_thread)
sdata[sin_idx] = input[in_idx];
else
sdata[sin_idx] = 0;
in_idx += WARP_SIZE;
sin_idx += WARP_SIZE;
}
__syncthreads();
// at this point we have had 1024 threads each load 40 bytes (20 * 2) 40960 bytes / block
// the sdata is order as TFS
// For 40 channels, 16 ant, this is 32 time samples (nice)
// for 40 channels, 4 ant, this is 128 time samples
// each thread in a warp will write out 20 sets of time samples (40 chan, 16 ant => 640)
unsigned nchanant = nchan * nant;
// starting (first of the 20 ichan ant)
unsigned ichanant = warp_num * nval_per_thread;
// the time sample this thread will write out
unsigned isamp = warp_idx;
// determine which shared memory index for this output ichan and isamp
unsigned sout_idx = (isamp * nchanant) + ichanant;
// determine the output index for this thread
unsigned out_idx = (ichanant * out_chanant_stride) + (blockIdx.x * nsamp_per_block) + (isamp);
for (ival=0; ival<nval_per_thread; ival++)
{
output[out_idx] = sdata[sout_idx];
sout_idx ++;
out_idx += out_chanant_stride;
}
return;
}
__global__ void input_transpose_TFS_to_FST_hires (
int16_t * in, int16_t * out,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned samp_stride, const unsigned chan_block_stride,
const unsigned out_chanant_stride)
{
// for loaded data samples
extern __shared__ int16_t sdata[];
const int nsamp_per_block = 32;
const int nchan_per_block = 16;
const int nchanant_per_block = nant * nchan_per_block;
const int warp_num = threadIdx.x / 32;
const int warp_idx = threadIdx.x & 0x1F; // % 32
// each warp reads a time sample, with the warp threads each reading the antenna and channels required
// offsets time sample offset + channel block offset + the chanant
unsigned idx = (blockIdx.x * nsamp_per_block + warp_num) * samp_stride + (blockIdx.y * chan_block_stride) + warp_idx;
// the right time sample in shm the chanant bank conflict trick
unsigned sdx = (nchanant_per_block * warp_num) + warp_idx;// + (warp_num * 2);
// read the TFS input to TFS shared memory
for (unsigned i=0; i<nval_per_thread; i++)
{
if (idx < nval)
{
sdata[sdx] = in[idx];
idx += 32;
sdx += 32;
}
}
__syncthreads();
// each warp will write out 32 time samples for a single antenna, for a number of channels
const int ant = warp_num % nant;
int ichan = nval_per_thread * (warp_num / nant);
int ichanant = ichan * nant + ant;
// offset for this thread in shared memory
// sample * sample_stride_in_shm + chanant offset + shm bank trick
sdx = (warp_idx * nant * nchan_per_block) + ichanant;// + (warp_idx * 2);
// output chanant for this warp
const int ochanant = (blockIdx.y * nchan_per_block * nant) + ichanant;
int osamp = (blockIdx.x * nsamp_per_block) + warp_idx;
int64_t odx = ochanant * out_chanant_stride + osamp;
// loop over channels
for (unsigned i=0; i<nval_per_thread; i++)
{
out[odx] = sdata[sdx];
sdx += nant;
odx += out_chanant_stride * nant;
}
}
/*
* Transpose a block of data from TFS to FST
*/
void mopsr_input_transpose_TFS_to_FST (hipStream_t stream,
void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// have issues with this kernel if the nant is 1 and nchan 40, try
// changing nthread to be a divisor of nval_per_block
// since we want a warp of 32 threads to write out just 1 chunk
const unsigned nsamp_per_block = 32;
const unsigned nval_per_block = nsamp_per_block * nchan * nant;
// special case where not a clean multiple [TODO validate this!]
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
// the total number of values we have to process is
const uint64_t nval = nbytes / (ndim * nval_per_thread);
int nblocks = nval / nthread;
if (nval % nthread)
nblocks++;
const size_t sdata_bytes = nthread * ndim * nval_per_thread;
const unsigned in_block_stride = nthread * nval_per_thread;
const unsigned out_chanant_stride = ndat;
#ifdef _GDEBUG
fprintf (stderr, "input_transpose_TFS_to_FST: nval_per_block=%u, nval_per_thread=%u\n", nval_per_block, nval_per_thread);
fprintf (stderr, "input_transpose_TFS_to_FST: nbytes=%lu, ndat=%lu, nval=%lu\n", nbytes, ndat, nval);
fprintf (stderr, "input_transpose_TFS_to_FST: nthread=%d, nblocks=%d\n", nthread, nblocks);
fprintf (stderr, "input_transpose_TFS_to_FST: input=%p output=%p sdata_bytes=%ld, in_block_stride=%d, nsamp_per_block=%u out_chan_stride=%u\n", d_in, d_out, sdata_bytes, in_block_stride, nsamp_per_block, out_chanant_stride);
#endif
hipLaunchKernelGGL(( input_transpose_TFS_to_FST), dim3(nblocks),dim3(nthread),sdata_bytes,stream, (int16_t *)d_in, (int16_t *) d_out, nchan, nant, nval, nval_per_thread, in_block_stride, nsamp_per_block, out_chanant_stride);
}
void mopsr_input_transpose_TFS_to_FST_hires (hipStream_t stream,
void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// process 32 samples and 16 channels in a block
const unsigned nsamp_per_block = 32;
const unsigned nchan_per_block = 16;
const unsigned nchanblocks = nchan / nchan_per_block;
const unsigned nval_per_block = nsamp_per_block * nchan_per_block * nant;
const uint64_t nsamp = nbytes / (nchan * nant * ndim);
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
// the total number of values we have to process is
const uint64_t nval = nbytes / ndim;
// the total number of samples is
dim3 blocks = dim3 (nsamp / nsamp_per_block, nchanblocks);
if (nsamp % nsamp_per_block)
blocks.x++;
const size_t sdata_bytes = (nsamp_per_block * nchan_per_block * nant * ndim) + 256;
// nbytes of bytes different (for input) between each block of data
const unsigned samp_stride = nchan * nant;
const unsigned chan_block_stride = nchan_per_block * nant;
#ifdef _GDEBUG
fprintf (stderr, "input_transpose_TFS_to_FST: nval_per_block=%u, nval_per_thread=%u\n", nval_per_block, nval_per_thread);
fprintf (stderr, "input_transpose_TFS_to_FST: nbytes=%lu, nval=%lu\n", nbytes, nval);
fprintf (stderr, "input_transpose_TFS_to_FST: input=%p output=%p sdata_bytes=%ld, nsamp_per_block=%u\n", d_in, d_out, sdata_bytes, nsamp_per_block);
#endif
hipLaunchKernelGGL(( input_transpose_TFS_to_FST_hires), dim3(blocks),dim3(nthread),sdata_bytes,stream, (int16_t *) d_in,
(int16_t *) d_out, nchan, nant, nval, nval_per_thread, samp_stride, chan_block_stride, nsamp);
#ifdef _GDEBUG
check_error_stream("input_transpose_TFS_to_FST_hires", stream);
#endif
}
// this will work best in FST format
__global__ void mopsr_input_ant_sum_kernel (const int16_t * input, int16_t * output, const uint64_t nsamp, const unsigned nchan, const unsigned nant)
{
const unsigned ichan = blockIdx.y;
const uint64_t isamp = blockIdx.x * blockDim.x + threadIdx.x;
if (isamp >= nsamp)
return;
unsigned i_idx = (ichan * nsamp * nant) + isamp;
const unsigned o_idx = (ichan * nsamp) + isamp;
// each thread will load data for nant into register memory
int16_t ant16[MOPSR_MAX_NANT_PER_AQ];
unsigned iant;
for (iant=0; iant<nant; iant++)
{
ant16[iant] = input[i_idx];
i_idx += nsamp;
}
float re = 0;
float im = 0;
int8_t * ant8 = (int8_t *) ant16;
for (iant=0; iant<nant; iant++)
{
if (iant % 2 == 0)
{
re += (float) ant8[2*iant];
im += (float) ant8[2*iant+1];
}
}
ant8[0] = (int8_t) rintf (re);
ant8[1] = (int8_t) rintf (im);
output[o_idx] = ant16[0];
}
//
// sum all modules in stream together [ORDER FST -> FT]
//
void mopsr_input_sum_ant (hipStream_t stream, void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
const uint64_t nsamp = nbytes / (nchan * nant * ndim);
// number of threads that actually load data
const unsigned nthread = 1024;
dim3 blocks (nsamp / nthread, nchan);
if (nsamp % nthread)
blocks.x++;
#ifdef _GDEBUG
fprintf (stderr, "input_ant_sum: bytes=%lu nsamp=%lu\n", nbytes, nsamp);
fprintf (stderr, "input_ant_sum: nchan=%u nant=%u\n", nchan, nant);
fprintf (stderr, "input_ant_sum: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
#endif
hipLaunchKernelGGL(( mopsr_input_ant_sum_kernel), dim3(blocks), dim3(nthread), 0, stream, (const int16_t *) d_in, (int16_t *) d_out, nsamp, nchan, nant);
#ifdef _GDEBUG
check_error_stream("mopsr_input_ant_sum_kernel", stream);
#endif
}
__global__ void input_transpose_FT_to_TF_kernel (
const int16_t * input, int16_t * output, const uint64_t nsamp,
const unsigned nchan, const unsigned nval, const unsigned nval_per_thread,
const unsigned nsamp_per_block)
{
extern __shared__ int16_t sdata[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
//const unsigned nwarp = blockDim.x / WARP_SIZE;
const unsigned nwarp_chunk_per_chan = nsamp_per_block / WARP_SIZE;
const unsigned iwarp_chunk = warp_num * nval_per_thread;
unsigned ichan = iwarp_chunk / nwarp_chunk_per_chan;
unsigned ichunk = iwarp_chunk % nwarp_chunk_per_chan;
// offset from base pointer to the chanant this warp starts at
uint64_t in_idx = (ichan * nsamp) + (blockIdx.x * nsamp_per_block) + (ichunk * WARP_SIZE) + warp_idx;
// to avoid shm bank conflicts add some padding
unsigned sin_idx = (warp_num * WARP_SIZE * nval_per_thread) + warp_idx + (2 * ichan);
unsigned ival;
//int8_t * tmp = (int8_t*) sdata;
for (ival=0; ival<nval_per_thread; ival++)
{
if (in_idx < nval * nval_per_thread)
sdata[sin_idx] = input[in_idx];
else
sdata[sin_idx] = 0;
//if ((blockIdx.x == 0) && (warp_num == 1))
// printf ("%d.%d.%d sdata[%d]=%d ichunk=%u ichan0=%u\n", blockIdx.x, threadIdx.x, ival, sin_idx, tmp[2*sin_idx], ichunk, ichan);
// shared memory increases linearly
sin_idx += WARP_SIZE;
in_idx += WARP_SIZE;
ichunk++;
// if we are moving channel
if (ichunk >= nwarp_chunk_per_chan)
{
in_idx += (nsamp - nsamp_per_block);
sin_idx += 2;
ichunk = 0;
}
}
__syncthreads();
// starting ichan and isamp or this thread/warp to write out
const unsigned ichan0 = warp_idx;
const unsigned isamp0 = (warp_num * WARP_SIZE * nval_per_thread) / nchan;
const unsigned nchansamp_block = nchan * nsamp_per_block;
// block offset isamp warp offset thread offset
uint64_t out_idx = (blockIdx.x * nchansamp_block) + (isamp0 * nchan) + ichan0;
// chan_offset sample offset
unsigned sout_idx = (ichan0 * nsamp_per_block) + isamp0;
const unsigned thread_stride = WARP_SIZE * nsamp_per_block;
const unsigned thread_rewind = nchansamp_block - 1;
unsigned warp_idat = warp_idx;
for (ival=0; ival<nval_per_thread; ival++)
{
ichan = warp_idat % nchan;
output[out_idx] = sdata[sout_idx + (2*ichan)];
// update the output index
out_idx += WARP_SIZE;
// update our warp idat so we can keep track of ichan
warp_idat += WARP_SIZE;
// update our shared memory output index
sout_idx += thread_stride;
if (sout_idx >= nchansamp_block)
sout_idx -= thread_rewind;
}
}
void mopsr_input_transpose_FT_to_TF (hipStream_t stream, void * d_in, void * d_out, uint64_t nbytes, unsigned nchan)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// since we want a warp of 32 threads to write out just 1 chunk
const unsigned nsamp_per_block = WARP_SIZE * 4;
const unsigned nval_per_block = nsamp_per_block * nchan;
// special case where not a clean multiple [TODO validate this!]
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
const uint64_t nsamp = nbytes / (ndim * nchan);
// the total number of values we have to process is
const uint64_t nval = nbytes / (ndim * nval_per_thread);
int nblocks = nval / nthread;
if (nval % nthread)
nblocks++;
const size_t sdata_bytes = nthread * ndim * nval_per_thread + (2 * nchan);
#ifdef _GDEBUG
fprintf (stderr, "input_transpose_FT_to_TF: nsamp_per_block=%u nval_per_block=%u, nval_per_thread=%u\n", nsamp_per_block, nval_per_block, nval_per_thread);
fprintf (stderr, "input_transpose_FT_to_TF: nbytes=%lu, nsamp=%lu, nval=%lu\n", nbytes, nsamp, nval);
fprintf (stderr, "input_transpose_FT_to_TF: nthread=%d, nblocks=%d\n", nthread, nblocks);
fprintf (stderr, "input_transpose_FT_to_TF: input=%p output=%p sdata_bytes=%ld\n", d_in, d_out, sdata_bytes);
#endif
hipLaunchKernelGGL(( input_transpose_FT_to_TF_kernel), dim3(nblocks),dim3(nthread),sdata_bytes,stream, (int16_t *) d_in, (int16_t *) d_out, nsamp, nchan, nval, nval_per_thread, nsamp_per_block);
#ifdef _GDEBUG
check_error_stream ("input_transpose_FT_to_TF", stream);
#endif
}
__global__ void input_transpose_FST_to_STF (
const int16_t * input, int16_t * output,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned nsamp_per_block, const uint64_t out_ant_stride)
{
extern __shared__ int16_t sdata[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
const uint64_t nsamp = nsamp_per_block * gridDim.x;
unsigned isamp = warp_idx;
// offset from base pointer to the chanant this warp starts at
uint64_t in_idx = (blockIdx.x * WARP_SIZE) + (warp_num * nsamp * nval_per_thread) + warp_idx;
unsigned sin_dat = warp_num * nsamp_per_block * nval_per_thread + isamp;
const unsigned nantsamp_block = nant * nsamp_per_block;
const unsigned nchansamp_block = nchan * nsamp_per_block;
const unsigned nchanantsamp_block = nant * nchan * nsamp_per_block;
unsigned ival, ichan, iant, sin_idx;
for (ival=0; ival<nval_per_thread; ival++)
{
ichan = sin_dat / nantsamp_block;
iant = (sin_dat % nantsamp_block) / nsamp_per_block;
// note that we add ichan to the shm index to avoid shm bank conflicts on shm read (later)
sin_idx = (ichan * nantsamp_block) + (iant * nsamp_per_block) + isamp + (2 * ichan);
if (in_idx < nval * nval_per_thread)
sdata[sin_idx] = input[in_idx];
else
sdata[sin_idx] = 0;
sin_dat += nsamp_per_block;
in_idx += nsamp;
}
__syncthreads();
// antenna for this WARP
iant = (warp_num * nant) / WARP_SIZE;
// shared memory strides
const unsigned swarp_stride = nval_per_thread * WARP_SIZE; // number of dats per warp
const unsigned sant_base = iant * nsamp_per_block;
// starting ichan and isamp or this thread/warp to write out
const unsigned ichan0 = warp_idx;
const unsigned nchansamp_per_warp = (WARP_SIZE * nval_per_thread) / nchan;
const unsigned isamp0 = (warp_num * nchansamp_per_warp) % nsamp_per_block;
const unsigned out_warp_offset = warp_num % (WARP_SIZE / nant);
// ant offset block offset isamp warp offset
uint64_t out_idx = (iant * out_ant_stride) + (blockIdx.x * nchansamp_block) + (out_warp_offset * swarp_stride) + ichan0;
// chan_offset ant offset sample offset
unsigned sout_idx = (ichan0 * nantsamp_block) + sant_base + isamp0;
const unsigned thread_stride = WARP_SIZE * nsamp_per_block * nant;
const unsigned thread_rewind = nchanantsamp_block - 1;
unsigned warp_idat = warp_idx;
for (ival=0; ival<nval_per_thread; ival++)
{
ichan = warp_idat % nchan;
if ((blockIdx.x == 16) && (threadIdx.x < 32))
printf ("[%u] output[%u] = sdata[%d]\n", threadIdx.x, out_idx, sout_idx + 2*ichan);
//output[out_idx] = sdata[sout_idx + 2*ichan];
// update the output index
out_idx += WARP_SIZE;
// update our warp idat so we can keep track of ichan
warp_idat += WARP_SIZE;
// update our shared memory output index
sout_idx += thread_stride;
if (sout_idx >= nchanantsamp_block)
sout_idx -= thread_rewind;
}
}
void mopsr_input_transpose_FST_to_STF (hipStream_t stream,
void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// have issues with this kernel if the nant is 1 and nchan 40, try
// changing nthread to be a divisor of nval_per_block
// since we want a warp of 32 threads to write out just 1 chunk
const unsigned nsamp_per_block = WARP_SIZE;
const unsigned nval_per_block = nsamp_per_block * nchan * nant;
// special case where not a clean multiple [TODO validate this!]
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
// the total number of values we have to process is
const uint64_t nval = nbytes / (ndim * nval_per_thread);
int nblocks = nval / nthread;
if (nval % nthread)
nblocks++;
const size_t sdata_bytes = nthread * ndim * nval_per_thread + (2 * nchan);
const unsigned out_ant_stride = nbytes / (nant * ndim);
// TODO might need to pass nsamp to kernel!!!
#ifdef _GDEBUG
const uint64_t ndat = nbytes / (nchan * nant * ndim);
fprintf (stderr, "input_transpose_FST_to_STF: nval_per_block=%u, nval_per_thread=%u\n", nval_per_block, nval_per_thread);
fprintf (stderr, "input_transpose_FST_to_STF: nbytes=%lu, ndat=%lu, nval=%lu\n", nbytes, ndat, nval);
fprintf (stderr, "input_transpose_FST_to_STF: nthread=%d, nblocks=%d\n", nthread, nblocks);
fprintf (stderr, "input_transpose_FST_to_STF: input=%p output=%p sdata_bytes=%ld,nsamp_per_block=%u out_ant_stride=%u\n", d_in, d_out, sdata_bytes, nsamp_per_block, out_ant_stride);
#endif
hipLaunchKernelGGL(( input_transpose_FST_to_STF), dim3(nblocks),dim3(nthread),sdata_bytes,stream, (int16_t *)d_in, (int16_t *) d_out, nchan, nant, nval, nval_per_thread, nsamp_per_block, out_ant_stride);
#ifdef _GDEBUG
check_error_stream ("input_transpose_FST_to_STF", stream);
#endif
}
// perform a transpose from FST to FTS order, but with 2 samples paired
// with each other in 32-bit word
__global__ void input_transpose_FST_to_FTS (
const int32_t * input, int32_t * output,
const unsigned nchan, const unsigned nant,
const unsigned nsamp_per_block, const unsigned chan_stride)
{
// to hold 16+1 32-bit packed samples, for all antenna
extern __shared__ int32_t sdata_32t[];
const unsigned warp_num = threadIdx.x / nsamp_per_block;
const unsigned warp_idx = threadIdx.x % nsamp_per_block;
const int warps_per_block = blockDim.x / nsamp_per_block;
const uint64_t nsamp = nsamp_per_block * gridDim.x;
// input offset (ichan * chan_stride) + (iant * nsamp) + (iblock * nsamp_per_block) + isamp
uint64_t idx = (blockIdx.y * chan_stride) + (warp_num * nsamp) + (blockIdx.x * nsamp_per_block) + warp_idx;
// shm offset (iant * nsamp per block) + isamp
unsigned sdx = (warp_num * nsamp_per_block) + warp_idx;
// loop offsets
const unsigned ant_stride = nsamp * warps_per_block;
//const unsigned sin_stride = (WARP_SIZE + 1);
const unsigned sin_stride = nsamp_per_block * warps_per_block;
// read ST data straight into shm, with 1 sample stride offset for bank conflicts
for (unsigned iant=warp_num; iant<nant; iant+=warps_per_block)
{
sdata_32t[sdx] = input[idx];
idx += ant_stride;
sdx += sin_stride;
}
// ensure all blocks have loaded nant * nsamp_per_block samples
__syncthreads();
// write out transposed data, each warp writing out nsamp_per_block antenna
idx = (blockIdx.y * chan_stride) + (blockIdx.x * nsamp_per_block * nant) + threadIdx.x;
// assume nthread == nantenna, output antenna = threadId.x
sdx = threadIdx.x * nsamp_per_block;
//sdx = threadIdx.x * (WARP_SIZE + 1);
for (unsigned isamp=0; isamp<nsamp_per_block; isamp++)
{
output[idx] = sdata_32t[sdx];
// increment to next output sample
idx += nant;
sdx++;
}
}
// transpose each channel from ST to TS order
void mopsr_input_transpose_FST_to_FTS (hipStream_t stream,
void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim) / 2;
unsigned nthread = nant;
// each block reads 16x2 time samples for each antenna
const unsigned nsamp_per_block = 16;
const unsigned chan_stride = nant * ndat;
dim3 blocks = dim3 (ndat / nsamp_per_block, nchan, 1);
const size_t sdata_bytes = nant * sizeof(int32_t) * (nsamp_per_block + 1);
fprintf (stderr, "input_transpose_FST_to_FTS: nbytes=%lu, ndat=%lu, nant=%u nchan=%u \n", nbytes, ndat, nant, nchan);
fprintf (stderr, "input_transpose_FST_to_FTS: nthread=%d, blocks.x=%d\n", nthread, blocks.x);
fprintf (stderr, "input_transpose_FST_to_FTS: input=%p output=%p sdata_bytes=%ld nsamp_per_block=%u chan_stride=%u\n", d_in, d_out, sdata_bytes, nsamp_per_block, chan_stride);
hipLaunchKernelGGL(( input_transpose_FST_to_FTS), dim3(blocks),dim3(nthread),sdata_bytes,stream, (int32_t *)d_in, (int32_t *) d_out, nchan, nant, nsamp_per_block, chan_stride);
#ifdef _GDEBUG
check_error_stream ("input_transpose_FST_to_FTS", stream);
#endif
}
// scaling factors for antenna
__device__ __constant__ float d_ant_scales [MOPSR_MAX_NANT_PER_AQ];
__global__ void input_rephase (int16_t * input, cuFloatComplex const * __restrict__ corrections,
uint64_t nbytes, const unsigned chan_stride, const unsigned ant_stride)
{
extern __shared__ cuFloatComplex corr_sh[];
const unsigned isamp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned iant = blockIdx.y;
const unsigned ichan = blockIdx.z;
const unsigned idx = (ichan * chan_stride + iant*ant_stride + isamp);
const unsigned icorr = isamp % MOPSR_UNIQUE_CORRECTIONS;
// all threads in this block will be using the same ant and channel, so we only need
// to load into shm the 32 co-efficients for this ant and channel
if (threadIdx.x < MOPSR_UNIQUE_CORRECTIONS)
corr_sh[icorr] = corrections[ichan*MOPSR_UNIQUE_CORRECTIONS + icorr];
__syncthreads();
// coalesced int16_t read from global memory
int16_t load16 = input[idx];
int8_t * load8 = (int8_t *) &load16;
cuFloatComplex val = make_cuComplex((float) load8[0], (float) load8[1]);
cuFloatComplex res = cuCmulf(val, corr_sh[icorr]);
const float scale = d_ant_scales[iant];
load8[0] = (int8_t) (cuCrealf(res) * scale);
load8[1] = (int8_t) (cuCimagf(res) * scale);
input[idx] = load16;
}
void mopsr_input_rephase (hipStream_t stream, void * d_data, void * d_corrections,
uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
const size_t sdata_bytes = MOPSR_UNIQUE_CORRECTIONS * sizeof(cuFloatComplex);
const unsigned nthread = 1024;
dim3 blocks (ndat / nthread, nant, nchan);
if (ndat % nthread)
blocks.x++;
#ifdef _GDEBUG
fprintf (stderr, "input_rephase: bytes=%lu ndat=%lu\n", nbytes, ndat);
fprintf (stderr, "input_rephase: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
#endif
const unsigned chan_stride = nant * ndat;
const unsigned ant_stride = ndat;
hipLaunchKernelGGL(( input_rephase), dim3(blocks), dim3(nthread), sdata_bytes, stream, (int16_t*) d_data,
(cuFloatComplex *) d_corrections, nbytes, chan_stride, ant_stride);
#ifdef _GDEBUG
check_error_stream("input_rephase", stream);
#endif
}
__global__ void input_rephase_TFS (int16_t * input, uint64_t nval, const unsigned nchan,
const unsigned nant, const unsigned chan_offset)
{
const unsigned samp_stride = nchan * nant;
const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nval)
return;
const unsigned isamp = idx / samp_stride; // sample number
const unsigned ipos = isamp % 32; // sample position for FIR filter
const unsigned ichan = (idx % samp_stride) / nant; //
const unsigned iant = idx % nant;
// load the 16 bit value from global memory
int16_t load16 = input[idx];
int8_t * load8 = (int8_t *) &load16;
// calculate the rephasing factor for this channel and sample
float ratio = 2 * M_PI * (5.0 / 32.0);
float theta = (chan_offset + ichan) * ratio * ipos;
cuFloatComplex rephase = make_cuComplex(sinf(theta), -1 * cos(theta));
cuFloatComplex val = make_cuComplex((float) load8[0] + 0.5, (float) load8[1] + 0.5);
cuFloatComplex res = cuCmulf(val, rephase);
const float scale = d_ant_scales[iant];
load8[0] = (int8_t) rintf ((cuCrealf(res) * scale) - 0.5);
load8[1] = (int8_t) rintf ((cuCimagf(res) * scale) - 0.5);
// write out to global memory (in-place)
input[idx] = load16;
}
void mopsr_input_rephase_TFS (hipStream_t stream, void * d_data, uint64_t nbytes,
unsigned nchan, unsigned nant, unsigned chan_offset)
{
const unsigned ndim = 2;
const uint64_t nval = nbytes / ndim;
const unsigned nthread = 1024;
unsigned nblocks = (unsigned) (nval / nthread);
if (nval % nthread)
nblocks++;
#ifdef _GDEBUG
fprintf (stderr, "input_rephase_TFS: bytes=%lu nval=%lu\n", nbytes, nval);
fprintf (stderr, "input_rephase_TFS: blocks=%u\n", nblocks);
#endif
hipLaunchKernelGGL(( input_rephase_TFS), dim3(nblocks), dim3(nthread), 0, stream, (int16_t*) d_data, nval, nchan, nant, chan_offset);
#ifdef _GDEBUG
check_error_stream("input_rephase_TFS", stream);
#endif
}
void mopsr_input_rephase_scales (hipStream_t stream, float * h_ant_scales, size_t nbytes)
{
hipMemcpyToSymbolAsync (d_ant_scales, (void *) h_ant_scales, nbytes, 0, hipMemcpyHostToDevice, stream);
}
// apply a fractional delay correction to a channel / antenna, warps will always
__global__ void input_delay (int8_t * input, int8_t * output, int8_t * overlap, float * delays,
unsigned nthread_run, uint64_t ndat, const unsigned chan_stride,
const unsigned ant_stride, const unsigned ntap)
{
extern __shared__ float sdata_delay[];
float * filter = sdata_delay;
float * reals = filter + ntap;
float * imags = reals + blockDim.x;
const unsigned half_ntap = (ntap / 2);
const unsigned in_offset = 2 * half_ntap;
const unsigned isamp = blockIdx.x * nthread_run + threadIdx.x;
const unsigned iant = blockIdx.y;
const unsigned nant = blockDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ndim = 2;
if (threadIdx.x < ndat)
return;
// calculate the filter coefficients for the delay
if (threadIdx.x < ntap)
{
float x = (threadIdx.x - half_ntap) + delays[ichan*nant * iant];
if (x == 0)
filter[threadIdx.x] = 1;
else
{
x *= M_PI;
filter[threadIdx.x] = sinf(x) / x;
}
}
// each thread must also load its data from main memory
unsigned data_idx = ichan*chan_stride + iant*ant_stride + isamp;
// the first block needs to load data from the overlap buffer, not from input block - in_offset
if (blockIdx.x == 0)
{
if (threadIdx.x < in_offset)
{
const unsigned overlap_idx = (ichan*nant*ntap + iant*ntap + isamp) * ndim;
reals[threadIdx.x] = (float) overlap[overlap_idx + 0];
imags[threadIdx.x] = (float) overlap[overlap_idx + 1];
}
else
{
reals[threadIdx.x] = (float) input[2*(data_idx - in_offset)];
imags[threadIdx.x] = (float) input[2*(data_idx - in_offset)+1];
}
}
else
{
reals[threadIdx.x] = (float) input[2*(data_idx - in_offset)];
imags[threadIdx.x] = (float) input[2*(data_idx - in_offset)+1];
}
__syncthreads();
// there are 2 * half_ntap threads that dont calculate anything
if (threadIdx.x < nthread_run)
{
float re = 0;
float im = 0;
unsigned i;
for (i=0; i<ntap; i++)
{
re += reals[i] * filter[i];
im += imags[i] * filter[i];
}
output[2*data_idx] = (int8_t) floor(re + 0.5);
output[2*data_idx+1] = (int8_t) floor(im + 0.5);
}
}
//
// Perform fractional delay correction, out-of-place
//
void mopsr_input_delay_fractional (hipStream_t stream, void * d_in,
void * d_out, void * d_overlap,
float * d_delays, uint64_t nbytes,
unsigned nchan, unsigned nant, unsigned ntap)
{
fprintf (stderr, "mopsr_input_delay_fractional()\n");
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
const unsigned half_ntap = ntap / 2;
// number of threads that actually load data
const unsigned nthread_load = 1024;
const unsigned nthread_run = nthread_load - (2 * half_ntap);
// need shared memory to load the ntap coefficients + nthread_load data points
const size_t sdata_bytes = (ntap * 2) + (nthread_load * 2);
dim3 blocks (ndat / nthread_run, nant, nchan);
if (ndat % nthread_run)
blocks.x++;
fprintf (stderr, "mopsr_input_delay: bytes=%lu ndat=%lu\n", nbytes, ndat);
fprintf (stderr, "mopsr_input_delay: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
const unsigned chan_stride = nant * ndat;
const unsigned ant_stride = ndat;
hipLaunchKernelGGL(( input_delay), dim3(blocks), dim3(nthread_load), sdata_bytes, stream, (int8_t *) d_in, (int8_t *) d_out,
(int8_t *) d_overlap, (float *) d_delays, nthread_run, ndat, chan_stride, ant_stride, ntap);
#ifdef _GDEBUG
check_error_stream("input_delay", stream);
#endif
}
__global__ void tile_beams_kernel (int16_t * input, float * output,
float * beam_sin_thetas,
float * ant_factors,
unsigned nbeam, uint64_t ndat, unsigned nant)
{
extern __shared__ int16_t sdata_tb[];
float * sh_ant_factors = (float *) (sdata_tb + (32 * nant));
//const unsigned ndim = 2;
const unsigned sample = blockIdx.x * blockDim.x + threadIdx.x;
//unsigned warp_idx = threadIdx.x % WARP_SIZE;
unsigned warp_num = threadIdx.x / WARP_SIZE;
unsigned iant = warp_num;
unsigned i_idx = (iant * ndat) + sample;
unsigned s_idx = threadIdx.x;
const uint64_t in_stride = WARP_SIZE * ndat;
while (iant < nant)
{
if (sample < ndat)
{
//if ((blockIdx.x == 0) && (warp_num == 0))
// printf ("[%d][%d] reading [%d] = [%d]\n", blockIdx.x, threadIdx.x, s_idx, i_idx);
sdata_tb[s_idx] = input[i_idx];
s_idx += blockDim.x;
i_idx += in_stride;
}
iant += WARP_SIZE;
}
// load the antenna factors into shared memory
if (threadIdx.x < nant)
{
sh_ant_factors[threadIdx.x] = ant_factors[threadIdx.x];
}
// load input data to shared memory such that
// [s0t0 s0t1 s0t2 ... s0t31]
// [s1t0 s1t1 t1t2 ... s1t31]
// [ ... ]
// [s351t0 s351t1 ... s351t31]
__syncthreads();
// Form tied array beams, detecting and summing as we go,
// only use as many beams as there are threads
int8_t * sdata_tb_re = (int8_t *) sdata_tb;
int8_t * sdata_tb_im = sdata_tb_re + 1;
cuFloatComplex phasor, samp_sum;
// TODO change beam_thetas to be sin(beam_thetas on CPU)
unsigned ibeam = threadIdx.x;
if (ibeam < nbeam)
{
// a simple 1 time load from gmem, coalesced
const float sin_theta = beam_sin_thetas[ibeam];
cuFloatComplex beam_sum = make_cuComplex(0,0);
s_idx = 0;
for (unsigned iant=0; iant<nant; iant++)
{
sincosf(sin_theta * sh_ant_factors[iant], &(phasor.y), &(phasor.x));
samp_sum = make_cuComplex(0,0);
for (unsigned isamp=0; isamp<32; isamp++)
{
samp_sum.x += (float) sdata_tb_re[2*isamp];
samp_sum.y += (float) sdata_tb_im[2*isamp];
}
s_idx += 128;
beam_sum = cuCaddf( beam_sum, cuCmulf (samp_sum, phasor));
}
output[(blockIdx.x * nbeam) + ibeam] = cuCabsf(beam_sum);
}
}
#ifdef HAVE_SHFL
__device__ __forceinline__ cuFloatComplex shflComplex( cuFloatComplex r, int lane )
{
return make_cuComplex ( __shfl( r.x, lane ), __shfl( r.y, lane ) );
}
__device__ __forceinline__ cuFloatComplex shfl_xor_Complex ( cuFloatComplex r, int lane )
{
return make_cuComplex ( __shfl_xor( r.x, lane ), __shfl_xor( r.y, lane ) );
}
#endif
#ifdef EIGHT_BIT_PHASORS
__global__ void tile_beams_kernel_2048(
const __restrict__ int32_t * input, float * output, int8_t * phasors,
unsigned nbeam, unsigned ndat, unsigned nant)
#else
__global__ void tile_beams_kernel_2048(
const __restrict__ int32_t * input, float * output, float * phasors,
unsigned nbeam, unsigned ndat, unsigned nant)
#endif
{
extern __shared__ float sdata_tb_c[];
float * re_phasors = sdata_tb_c + (2 * 32 * BEAMS_PER_LOOP);
float * im_phasors = re_phasors + (nant * BEAMS_PER_LOOP);
#ifdef HAVE_SHFL
const int warp_num = threadIdx.x / WARP_SIZE;
const int warp_idx = threadIdx.x & 0x1F;
float power;
#endif
int32_t val32;
int8_t * ptr8 = (int8_t *) &val32;
const unsigned nbeamant = nbeam * nant;
//const float scale = 127.5;
cuFloatComplex b1s[BEAMS_PER_LOOP];
cuFloatComplex b2s[BEAMS_PER_LOOP];
cuFloatComplex val;
// this kernel exectutes for computes 4 beams at a time for 1024 samples
for (unsigned ibeam=0; ibeam<nbeam; ibeam += BEAMS_PER_LOOP)
{
unsigned ibeamant = ibeam * nant;
// use all threads in the warp to load the load phasors for this beam
// and all antenna into shared memory.
for (unsigned i=threadIdx.x; i<nant*BEAMS_PER_LOOP; i+=blockDim.x)
{
re_phasors[i] = (float) phasors[ibeamant + i];
im_phasors[i] = (float) phasors[nbeamant + ibeamant + i];
}
__syncthreads();
// for all the antenna, perform complex multiplications on the 4 beams
unsigned idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//for (unsigned i=0; i<nbeam_loop; i++)
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
b1s[i].x = 0;
b1s[i].y = 0;
b2s[i].x = 0;
b2s[i].y = 0;
}
for (unsigned iant=0; iant<nant; iant++)
{
// load 4 x 8bit values (2 complex samples) for this time sample and antenna
val32 = input[idx];
// make a complex float from this input
//val = make_cuComplex (((float) ptr8[0]) / scale, ((float) ptr8[1]) / scale);
val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
unsigned pidx = iant;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
// multiply by phasor and add to the beam (yes this is a += operation)
if (ibeam == 0 && i == 0)
b1s[0].x = fmaf( val.x, val.x, fmaf (val.y, val.y, b1s[0].x));
else
b1s[i] = cuCfmaf (make_cuComplex(re_phasors[pidx], im_phasors[pidx]), val, b1s[i]);
pidx += nant;
}
//val = make_cuComplex (((float) ptr8[2] / scale), ((float) ptr8[3]) / scale);
val = make_cuComplex ((float) ptr8[2], (float) ptr8[3]);
pidx = iant;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
// multiply by phasor and add to the beam (yes this is a += operation)
if (ibeam == 0 && i == 0)
b2s[0].x = fmaf( val.x, val.x, fmaf (val.y, val.y, b2s[0].x));
else
b2s[i] = cuCfmaf (make_cuComplex(re_phasors[pidx],im_phasors[pidx]), val, b2s[i]);
pidx += nant;
}
idx += ndat/2;
}
#ifdef HAVE_SHFL
// detect each sample and integrate across the warp (factor of 32 in time)
// this takes us from 1.28us to 81.92
unsigned sdx = warp_num;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
if (ibeam == 0 && i == 0)
power = b1s[i].x;
else
power = (b1s[i].x * b1s[i].x) + (b1s[i].y * b1s[i].y);
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
// power now contains the integrated power for this warp (i.e. 40.96 us samples
// we write these to shared memory in ST order. T=64, S=8
if (warp_idx == 0)
{
sdata_tb_c[sdx] = power;
}
if (ibeam == 0 && i == 0)
power = b2s[i].x;
else
power = (b2s[i].x * b2s[i].x) + (b2s[i].y * b2s[i].y);
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
// power now contains the integrated power for this warp (i.e. 40.96 us samples
// we write these to shared memory in ST order. T=32, S=8
if (warp_idx == 0)
{
sdata_tb_c[sdx] += power;
sdx += 32;
}
}
__syncthreads();
// one warp per output beam
if (warp_num < BEAMS_PER_LOOP)
{
// threads to generate 4 x 655.36 time samples from 32 x 81.92)
power = sdata_tb_c[(warp_num * WARP_SIZE) + warp_idx];
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
// warp_idxs 0, 8, 16, 24 have the 4 time samples for beam warp_num
if (warp_idx % 8 == 0)
{
const unsigned obeam = ibeam + warp_num;
const unsigned ndat_out = ndat / 512;
const unsigned osamp = warp_idx / 8;
// output is in ST format
unsigned out_idx = (obeam * ndat_out) + (blockIdx.x * 4) + osamp;
output[out_idx] = power;
}
}
#endif
}
}
// load 2048 samples per block, form beams, scrunch down x32 to write out
// 64 samples. input FST output SFT
__global__ void tile_beams_kernel_2048_32scr (
const __restrict__ int32_t * input, float * output, float * phasors,
unsigned nbeam, unsigned ndat, unsigned nant)
{
extern __shared__ float sdata_tb_c[];
//const float scale = 127.5;
float * re_phasors = sdata_tb_c + (2 * 32 * BEAMS_PER_LOOP);
float * im_phasors = re_phasors + (nant * BEAMS_PER_LOOP);
const int warp_num = threadIdx.x / WARP_SIZE;
const int warp_idx = threadIdx.x & 0x1F;
#ifdef HAVE_SHFL
float power;
#endif
int32_t val32;
int8_t * ptr8 = (int8_t *) &val32;
const unsigned nbeamant = nbeam * nant;
cuFloatComplex b1s[BEAMS_PER_LOOP];
cuFloatComplex b2s[BEAMS_PER_LOOP];
cuFloatComplex val;
// shift phasors pointer by ichan * chan_stride
phasors += blockIdx.y * nant * nbeam * 2;
// shift input by ndat/2 (since int32_t *)
input += blockIdx.y * nant * (ndat / 2);
// shift output by output_ndat to align to right channel
output += blockIdx.y * (ndat / 32);
// this kernel exectutes for computes 4 beams at a time for 1024 samples
for (unsigned ibeam=0; ibeam<nbeam; ibeam += BEAMS_PER_LOOP)
{
unsigned ibeamant = ibeam * nant;
// use all threads in the warp to load the load phasors for this beam
// and all antenna into shared memory.
for (unsigned i=threadIdx.x; i<nant*BEAMS_PER_LOOP; i+=blockDim.x)
{
re_phasors[i] = (float) phasors[ibeamant + i];
im_phasors[i] = (float) phasors[nbeamant + ibeamant + i];
}
__syncthreads();
// for all the antenna, perform complex multiplications on the 4 beams
unsigned idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
b1s[i].x = 0;
b1s[i].y = 0;
b2s[i].x = 0;
b2s[i].y = 0;
}
for (unsigned iant=0; iant<nant; iant++)
{
// load 4 x 8bit values (2 complex samples) for this time sample and antenna
val32 = input[idx];
// make a complex float from this input
//val = make_cuComplex ((float) ptr8[0] / scale, (float) ptr8[1] / scale);
val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
unsigned pidx = iant;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
// multiply by phasor and add to the beam (yes this is a += operation)
if (ibeam == 0 && i == 0)
b1s[0].x = fmaf( val.x, val.x, fmaf (val.y, val.y, b1s[0].x));
else
{
//b1s[i] = cuCaddf (val, b1s[i]);
b1s[i] = cuCfmaf (make_cuComplex(re_phasors[pidx], im_phasors[pidx]), val, b1s[i]);
}
//if (blockIdx.x == 0 && ibeam == 0 && i == 1 && threadIdx.x == 0)
// printf("OLD: samp=%d ant=%d antenna=(%f,%f) weight=(%f,%f) sum=(%f,%f)\n",
// 2*threadIdx.x, iant,
// val.x, val.y, re_phasors[pidx], im_phasors[pidx], b1s[i].x, b1s[i].y);
pidx += nant;
}
//val = make_cuComplex ((float) ptr8[2] / scale, (float) ptr8[3] / scale);
val = make_cuComplex ((float) ptr8[2], (float) ptr8[3]);
pidx = iant;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
// multiply by phasor and add to the beam (yes this is a += operation)
if (ibeam == 0 && i == 0)
b2s[0].x = fmaf( val.x, val.x, fmaf (val.y, val.y, b2s[0].x));
else
{
//b2s[i] = cuCaddf (val, b2s[i]);
b2s[i] = cuCfmaf (make_cuComplex(re_phasors[pidx],im_phasors[pidx]), val, b2s[i]);
}
//if (blockIdx.x == 0 && ibeam == 0 && i == 1)
// printf("OLD: samp=%d ant=%d antenna=(%f,%f) sum=(%f,%f)\n",
// 2*threadIdx.x+1, iant,
// val.x, val.y, b2s[i].x, b2s[i].y);
pidx += nant;
}
idx += ndat/2;
}
// detect each sample and integrate across the warp (factor of 32 in time)
// this takes us from 10.24 to 327.68 us
#ifdef HAVE_SHFL
unsigned sdx = 2 * warp_num;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
if (ibeam == 0 && i == 0)
power = b1s[i].x;
else
power = (b1s[i].x * b1s[i].x) + (b1s[i].y * b1s[i].y);
//if ((blockIdx.y == 0) && (ibeam == 0) && (i == 1) && (blockIdx.x == 0) && (warp_num == 0))
// printf("GPU %d %f %f %f\n", 2*threadIdx.x+0, b1s[i].x, b1s[i].y, power);
// since the consecutive samples are spread across b1 and b2
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
// power now contains the integrated power for this warp (i.e. 327.68 us
// samples. Write these to shared memory in ST order. T=64, S=8
if (warp_idx == 0 || warp_idx == 16)
{
sdata_tb_c[sdx + warp_idx/16] = power; // sample sdx + 0 and 1
}
if (ibeam == 0 && i == 0)
power = b2s[i].x;
else
power = (b2s[i].x * b2s[i].x) + (b2s[i].y * b2s[i].y);
//if ((blockIdx.y == 0) && (ibeam == 0) && (i == 1) && (blockIdx.x == 0) && (warp_num == 0))
// printf("GPU %d %f %f %f\n", 2*threadIdx.x+1, b2s[i].x, b2s[i].y, power);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
if (warp_idx == 0 || warp_idx == 16)
{
sdata_tb_c[sdx+warp_idx/16] += power;
//if ((blockIdx.y == 0) && (ibeam == 0) && (i == 1) && (blockIdx.x == 0) && (warp_num == 0))
// printf("GPU %d sdata_tb_c[%d]==%f\n", warp_idx, sdx+warp_idx/16, sdata_tb_c[sdx+warp_idx/16]);
sdx += 64;
}
}
__syncthreads();
#endif
// there are now 8beams * (2 * 32) samples in SHM to write
// out to gmem, do 1 warp per beam,
if (warp_num < BEAMS_PER_LOOP)
{
// ibeam * 64 + warp_idx
unsigned sdx = (warp_num * WARP_SIZE * 2) + warp_idx;
const unsigned obeam = ibeam + warp_num;
// obeam * nchan * ndat_out + osamp_block + osamp
unsigned odx = (obeam * gridDim.y * (ndat / 32)) + (blockIdx.x * 64) + warp_idx;
// write out the 64 samples for this beam
output[odx] = sdata_tb_c[sdx];
output[odx+32] = sdata_tb_c[sdx+32];
}
}
}
/*
__host__ __device__
cuFloatComplex ComplexInt8_to_cuFloatComplex(ComplexInt8 input)
{
return make_cuFloatComplex((float)(input.x),(float)(input.y));
}
__host__ __device__
cuFloatComplex char2_to_cuFloatComplex(char2 input)
{
return make_cuFloatComplex(float(input.x)/127.0,float(input.y)/127.0);
}
// each block proceses 1024 samples, producing 32 output samples
// and 32 beams
// each warp converts 32 samples -> 1 sample, and each block
// therefore processes 32 x 32 = 1024 samples
__global__ void tile_beams_kernel_32scr_TS (
const int * __restrict__ input,
float * output,
const half2 * __restrict__ phasors,
unsigned nbeam, unsigned ndat, unsigned nant)
{
extern __shared__ half2 shared_phasors[];
const int warp_idx = threadIdx.x / WARP_SIZE; // isamp
const int lane_idx = threadIdx.x & 0x1F; // ibeam
// the beam this thread is acuumulating
const unsigned ibeam = (blockIdx.z * WARP_SIZE) + lane_idx;
// beam-forming phasors offset for this block
int phasors_offset = (blockIdx.y * nant * nbeam) + ibeam;
int shared_phasors_offset = lane_idx;
// each iteration of the loop, loads phasors for these beams + ants
// phasors are read/stored in ant,beam order
for (int iant = lane_idx; iant < nant; iant+=WARP_SIZE)
{
// storage iant * nbeam + ibeam
//shared_phasors[shared_phasors_offset] = phasors[phasors_offset];
shared_phasors[shared_phasors_offset] = __floats2half2_rn(1.0f, 0.0f);
phasors_offset += nbeam;
shared_phasors_offset += WARP_SIZE;
}
__syncthreads();
// output sample offset, each warp loads a set of 32 samples that are condensed to 1 sample
const int osamp = (blockIdx.x * NWARPS_PER_BLOCK) + warp_idx;
// input sample offset /2 since loading 2 antenna at a time
int sample_offset = NSUM * osamp / 2;
// accumulated output power
float power = 0.0f;
int two_samples;
// pointer to access antenna
// TODO check wither other_ant is really necessary
char4 * samples = (char4 *) &two_samples;
// samples to be processed
for (int isamp = sample_offset; isamp < sample_offset + (NSUM/2); ++isamp)
{
// compute the offset of this block on the input
// channel offset + samp offset
// (blockIdx.y * nant * nsamp)/2 + (isamp * nant)/2 + lane_idx;
const int idx = nant * (blockIdx.y * ndat + isamp) / 2 + lane_idx;
// set the complex accumulator for the tied-array beam
cuFloatComplex tb = make_cuComplex(0.0f, 0.0f);
// loop over nant (/2 since dual load)
for (int iant=0; iant<nant; iant+=WARP_SIZE)
{
// load 2 complex samples at once
two_samples = input[idx + iant];
cuFloatComplex sample1 = make_cuFloatComplex(float(samples->x),float(samples->y));
cuFloatComplex sample2 = make_cuFloatComplex(float(samples->z),float(samples->w));
cuFloatComplex sample;
//if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && warp_idx == 0 && iant < 64)
// printf("[%d][%d] load isamp=%d offset=%d\n", threadIdx.x, iant, isamp, idx + iant);
// other antenna pair for shuffling
//int other_two_samples;
// use warp shuffle to sum these antenna into the TB result and share
for (int other_lane_idx=0; other_lane_idx < WARP_SIZE; ++other_lane_idx)
{
cuFloatComplex weight = __half22float2(shared_phasors[iant*WARP_SIZE+lane_idx]);
//cuFloatComplex weight = make_cuFloatComplex(1.0f,0.0f);
#ifdef HAVE_SHFL
// shuffle the first sample across
sample.x = __shfl(sample1.x, other_lane_idx);
sample.y = __shfl(sample1.y, other_lane_idx);
//other_two_samples = __shfl(two_samples, other_lane_idx);
#endif
// first antenna pair
if (ibeam == 0)
fmaf( sample.x, sample.x, fmaf (sample.y, sample.y, tb.x));
else
tb = cuCfmaf(sample, weight, tb);
#ifdef HAVE_SHFL
sample.x = __shfl(sample2.x, other_lane_idx);
sample.y = __shfl(sample2.y, other_lane_idx);
#endif
// first antenna pair
if (ibeam == 0)
fmaf( sample.x, sample.x, fmaf (sample.y, sample.y, tb.x));
else
tb = cuCfmaf(sample, weight, tb);
//if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && ibeam == 1 && sample_offset == 0)
// printf("NEW: samp=%d iant=%d sample=(%d,%d) weight=(%f,%f) sum=(%f,%f)\n",
// 2*isamp, iant+other_lane_idx, samples->x, samples->y, weight.x, weight.y, tb.x, tb.y);
//if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && ibeam == 1 && sample_offset == 0)
// printf("NEW: samp=%d iant=%d sample=(%d,%d) weight=(%f,%f) sum=(%f,%f)\n",
// 2*isamp+1, iant+other_lane_idx, samples->w, samples->z, weight.x, weight.y, tb.x, tb.y);
}
}
// now that all antennae are summed, detected and accumulate the powers for this sample / beam
float amplitude;
if (ibeam == 0)
amplitude = tb.x;
else
amplitude = (tb.x * tb.x) + (tb.y * tb.y);
power += amplitude;
// if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && ibeam == 1 && threadIdx.x == 1)
// printf("NEW amplitude=%f power=%f\n", amplitude, power);
}
// the warp_idx corresponds to the output sample
// the lane_idx correponds to the output beam
// order SFT for Fan Beams
// (ibeam * nchan * n_osamp) + (ichan * n_osamp) + osamp
//unsigned odx = (ndat/32) * (ibeam * gridDim.y + blockIdx.y) + osamp;
unsigned odx = threadIdx.x;
//if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && ibeam == 1 && threadIdx.x == 1)
//printf("OUTPUT %u %u %u %f ibeam=%u nchan=%u\n", odx, blockIdx.x, threadIdx.x, power, ibeam, gridDim.y);
output[odx] = power;
}
*/
void mopsr_tile_beams_precomp (hipStream_t stream, void * d_in, void * d_fbs, void * d_phasors,
uint64_t bytes, unsigned nbeam, unsigned nant, unsigned nchan)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
const unsigned nthread = 1024;
const unsigned nbeam_block = BEAMS_PER_LOOP;
unsigned ndat_per_block = 2048;
dim3 blocks = dim3 (ndat / ndat_per_block, nchan, 1);
//unsigned nblocks = ndat / ndat_per_block;
if (ndat % ndat_per_block)
fprintf (stderr, "WARNING: ndat not divisible by %d\n", ndat_per_block);
size_t sdata_bytes = (nbeam_block * (nthread/WARP_SIZE) * sizeof(float) * 2) +
(nbeam_block * nant * sizeof (float _Complex));
#ifdef _GDEBUG
fprintf (stderr, "bytes=%lu ndat=%lu blocks=(%u,%u,%u) threads=%u shm=%ld\n", bytes, ndat, blocks.x, blocks.y, blocks.z, nthread, sdata_bytes);
#endif
#ifdef EIGHT_BIT_PHASORS
hipLaunchKernelGGL(( tile_beams_kernel_2048), dim3(blocks), dim3(nthread), sdata_bytes, stream, (int32_t *) d_in, (float *) d_fbs, (int8_t *) d_phasors, nbeam, ndat, nant);
#else
#ifdef HIRES
hipLaunchKernelGGL(( tile_beams_kernel_2048_32scr), dim3(blocks), dim3(nthread), sdata_bytes, stream, (int32_t *) d_in, (float *) d_fbs, (float *) d_phasors, nbeam, ndat, nant);
#else
hipLaunchKernelGGL(( tile_beams_kernel_2048), dim3(blocks), dim3(nthread), sdata_bytes, stream, (int32_t *) d_in, (float *) d_fbs, (float *) d_phasors, nbeam, ndat, nant);
#endif
#endif
#ifdef _GDEBUG
check_error_stream("tile_beams_kernel_2048", stream);
#endif
}
/*
// assumes data has been transposed to FTS order
void mopsr_tile_beams_transpose (hipStream_t stream, void * d_in, void * d_fbs, void * d_phasors,
uint64_t bytes, unsigned nbeam, unsigned nant, unsigned nchan)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
const unsigned nthread = 1024;
const unsigned nwarps_per_block = nthread / WARP_SIZE;
const unsigned nbeam_per_block = 32;
const uint64_t ndat_per_block = nwarps_per_block * 32;
dim3 blocks = dim3 (ndat / ndat_per_block, nchan, nbeam / nbeam_per_block);
if (ndat % ndat_per_block)
fprintf (stderr, "WARNING: ndat not divisible by %d\n", ndat_per_block);
if (nbeam % 32)
fprintf (stderr, "WARNING: nbeam not divisible by 32\n");
size_t sdata_bytes = 32 * nant * sizeof(half2);
#ifdef _GDEBUG
fprintf (stderr, "mopsr_tile_beams_transpose: bytes=%lu ndat=%lu blocks=(%u,%u,%u) threads=%u shm=%ld\n", bytes, ndat, blocks.x, blocks.y, blocks.z, nthread, sdata_bytes);
#endif
#ifdef HIRES
tile_beams_kernel_32scr_TS<<<blocks, nthread, sdata_bytes, stream>>>((int *) d_in, (float *) d_fbs, (half2 *) d_phasors, nbeam, ndat, nant);
#endif
#ifdef _GDEBUG
check_error_stream("tile_beams_kernel_32scr_TS", stream);
#endif
}
*/
__global__ void tie_beam_kernel (int16_t * in, cuFloatComplex * out, cuFloatComplex * d_phasors, uint64_t ndat, unsigned nant)
{
extern __shared__ cuFloatComplex s_phasors[];
const unsigned ichan = blockIdx.y;
int16_t val16;
int8_t * ptr8 = (int8_t *) &val16;
cuFloatComplex tied_beam = make_cuComplex(0.0,0.0);
// load nant phasors for the ichan
if (threadIdx.x < nant)
{
s_phasors[threadIdx.x] = d_phasors[ichan*nant + threadIdx.x];
}
__syncthreads();
// idat
const unsigned idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= ndat)
return;
//const uint64_t out_off = ndat * ichan;
// output in TF order (isamp * nchan) + ichan
const uint64_t odx = idx * gridDim.y + ichan;
const uint64_t in_off = ndat * ichan * nant;
// increment to the right channel
in += in_off;
//out += out_off;
// step through the antennas, forming tied array beam
for (unsigned iant=0; iant<nant; iant++)
{
val16 = in[idx];
// make a complex float from this input
cuFloatComplex val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
tied_beam = cuCfmaf (s_phasors[iant], val, tied_beam);
in += ndat;
}
// output ordering is good!
//out[idx] = tied_beam;
out[odx] = tied_beam;
}
/*
* create steer a tied array beam to the target position
*/
void mopsr_tie_beam (hipStream_t stream, void * d_in, void * d_out, void * d_phasors,
uint64_t bytes, unsigned nant, unsigned nchan)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
// input order is FST, output is FT
unsigned nthreads = 1024;
dim3 blocks = dim3 (ndat / nthreads, nchan, 1);
size_t shm_bytes = nant * sizeof(cuFloatComplex);
if (ndat % nthreads)
blocks.x++;
#ifdef _GDEBUG
fprintf (stderr, "bytes=%lu ndat=%lu nthreads=%u blocks=(%u,%u,%u) shm_bytes=%ld\n",
bytes, ndat, nthreads, blocks.x, blocks.y, blocks.z, shm_bytes);
#endif
hipLaunchKernelGGL(( tie_beam_kernel), dim3(blocks), dim3(nthreads), shm_bytes, stream, (int16_t *) d_in, (cuFloatComplex *) d_out, (cuFloatComplex *) d_phasors, ndat, nant);
#ifdef _GDEBUG
check_error_stream("tie_beam_kernel", stream);
#endif
}
// integrate 64 samples together from each antenna
__global__ void mod_beam_kernel_64 (int16_t * in, float * out, uint64_t ndat, unsigned nant)
{
extern __shared__ float block_power_sums[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
// offset [ant_offset + block_offset]
const unsigned offset = (blockIdx.y * ndat) + (blockIdx.x * blockDim.x);
const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= ndat)
return;
int16_t val16;
int8_t * ptr8 = (int8_t *) &val16;
// load the value from this times sample into a local variable
val16 = in[offset + threadIdx.x];
// make a complex float from this input
cuFloatComplex val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
// detect
float power = val.x * val.x + val.y * val.y;
// add all of the time samples from this warp together
#ifdef HAVE_SHFL
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
#endif
if (warp_idx == 0)
block_power_sums[warp_num] = power;
__syncthreads();
if (warp_num == 0)
{
power = block_power_sums[warp_idx];
#ifdef HAVE_SHFL
power += __shfl_down (power, 16);
#endif
if (warp_idx < 16)
{
out[offset/64 + warp_idx] = power;
}
}
}
// integrate 512 time samples together from each antenna
__global__ void mod_beam_kernel_512 (int16_t * in, float * out, uint64_t ndat)
{
extern __shared__ float block_power_sums[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
// offset [ant_offset + block_offset]
const unsigned offset = (blockIdx.z * ndat) + (blockIdx.x * blockDim.x);
const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= ndat)
return;
int16_t val16;
int8_t * ptr8 = (int8_t *) &val16;
// load the value from this times sample into a local variable
val16 = in[offset + threadIdx.x];
// make a complex float from this input
cuFloatComplex val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
// detect
float power = val.x * val.x + val.y * val.y;
// add all of the time samples from this warp together
#ifdef HAVE_SHFL
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
#endif
if (warp_idx == 0)
block_power_sums[warp_num] = power;
__syncthreads();
if (warp_num == 0)
{
power = block_power_sums[warp_idx];
#ifdef HAVE_SHFL
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
#endif
if (warp_idx == 0 || warp_idx == 16)
{
out[offset/512 + warp_idx/16] = power;
}
}
}
// integrate 32 time samples together from each antenna, FST -> SFT
__global__ void mod_beam_kernel_32 (int16_t * in, float * out, uint64_t ndat)
{
extern __shared__ float block_power_sums[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
const unsigned ichan = blockIdx.y;
const unsigned nchan = gridDim.y;
const unsigned iant = blockIdx.z;
const unsigned nant = gridDim.z;
// input sample number
const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= ndat)
return;
const uint64_t ndat_out = ndat / 32;
// input offset in FST [chan_offset + ant_offset + block_offset]
const unsigned in_offset = (ichan * ndat * nant) + (iant * ndat) + (blockIdx.x * blockDim.x);
// output offset in SFT [ant_offset + chan_offset + block_offset]
const unsigned out_offset = (iant * nchan * ndat_out) + (ichan * ndat_out) + (blockIdx.x * blockDim.x/32);
int16_t val16;
int8_t * ptr8 = (int8_t *) &val16;
// load the value from this times sample into a local variable
val16 = in[in_offset + threadIdx.x];
// make a complex float from this input
cuFloatComplex val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
// detect
float power = val.x * val.x + val.y * val.y;
// add all of the time samples from this warp together
#ifdef HAVE_SHFL
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
#endif
if (warp_idx == 0)
block_power_sums[warp_num] = power;
__syncthreads();
if (warp_num == 0)
out[out_offset + warp_idx] = block_power_sums[warp_idx];
}
void mopsr_mod_beams (hipStream_t stream, void * d_in, void * d_out, uint64_t bytes,
unsigned nant, unsigned nchan, unsigned tdec)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
unsigned threads = 1024;
dim3 blocks (ndat / threads, nchan, nant);
size_t shm_bytes = sizeof(float) * WARP_SIZE;
if (ndat % threads)
blocks.x++;
#ifdef _GDEBUG
fprintf (stderr, "ndat=%lu threads=%u blocks=(%u,%u)\n", ndat, threads, blocks.x, blocks.y);
#endif
if (tdec == 512)
hipLaunchKernelGGL(( mod_beam_kernel_512), dim3(blocks), dim3(threads), shm_bytes, stream, (int16_t *) d_in, (float *) d_out, ndat);
else if (tdec == 32)
hipLaunchKernelGGL(( mod_beam_kernel_32), dim3(blocks), dim3(threads), shm_bytes, stream, (int16_t *) d_in, (float *) d_out, ndat);
else
fprintf (stderr, "mopsr_mod_beams: unrecognized TDEC\n");
}
__global__ void mopsr_transpose_BFST_FST_kernel (int16_t * in, int16_t * out, unsigned ndat)
{
const unsigned idat = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned in_chan_stride = ndat * 8;
// iblock * nchan * in_chan_stride + ichan * in_chan_stride + idat
uint64_t idx = (blockIdx.z * gridDim.y * in_chan_stride) + (blockIdx.y * in_chan_stride) + idat;
// ndat * nant (total)
const unsigned out_chan_stride = ndat * gridDim.z * 8;
// ichan * out_chan_stride + iant * out_ant_stride + idat
uint64_t odx = (blockIdx.y * out_chan_stride) + (blockIdx.z * 8 * ndat) + idat;
for (unsigned i=0; i<8; i++)
{
out[odx] = in[idx];
idx += ndat;
odx += ndat;
}
}
/* transpose from FST (in 8antenna blocks) to FST */
void mopsr_transpose_BFST_FST (hipStream_t stream, void * d_in, void * d_out, uint64_t bytes,
unsigned nant, unsigned nchan)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
unsigned nthreads = 1024;
dim3 blocks (ndat / nthreads, nchan, nant / 8);
hipLaunchKernelGGL(( mopsr_transpose_BFST_FST_kernel), dim3(blocks), dim3(nthreads), 0, stream, (int16_t *) d_in, (int16_t *) d_out, ndat);
}
/*
using namespace half_float;
void mopsr_convert_float_to_half(void * in, void * out, size_t num)
{
float * in_ptr = (float *) in;
half_float::half * out_ptr = (half_float::half *) out;
for (unsigned i=0; i<num; i++)
{
out_ptr[i] = half_cast<half_float::half,std::numeric_limits<float>::round_style>(in_ptr[i]);
}
}
*/
| ec1a106473b0cfa042c956fd86262a8c35042df2.cu |
#include <cuda_runtime.h>
#include <cuComplex.h>
#include <inttypes.h>
#include <stdio.h>
#include "dada_cuda.h"
#include "mopsr_cuda.h"
#define BEAMS_PER_LOOP 8
#define WARP_SIZE 32
#define NWARPS_PER_BLOCK 32
#define NSUM 32
//#define _GDEBUG 1
#ifdef __CUDA_ARCH__
#if (__CUDA_ARCH__ >= 300)
#define HAVE_SHFL
#else
#define NO_SHFL
#endif
#endif
// large parts of these kernels require SHFL instructions that are
// only available in sm_30 (kepler) or greater
// each thread loads 16 x 2bytes into shm
__global__ void input_transpose_TFS_to_FST (
const int16_t * input, int16_t * output,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned in_block_stride, const unsigned nsamp_per_block,
const unsigned out_chanant_stride)
{
extern __shared__ int16_t sdata[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
const unsigned offset = (warp_num * (WARP_SIZE * nval_per_thread)) + warp_idx;
unsigned in_idx = (blockIdx.x * blockDim.x * nval_per_thread) + offset;
unsigned sin_idx = offset;
unsigned ival;
for (ival=0; ival<nval_per_thread; ival++)
{
if (in_idx < nval * nval_per_thread)
sdata[sin_idx] = input[in_idx];
else
sdata[sin_idx] = 0;
in_idx += WARP_SIZE;
sin_idx += WARP_SIZE;
}
__syncthreads();
// at this point we have had 1024 threads each load 40 bytes (20 * 2) 40960 bytes / block
// the sdata is order as TFS
// For 40 channels, 16 ant, this is 32 time samples (nice)
// for 40 channels, 4 ant, this is 128 time samples
// each thread in a warp will write out 20 sets of time samples (40 chan, 16 ant => 640)
unsigned nchanant = nchan * nant;
// starting (first of the 20 ichan ant)
unsigned ichanant = warp_num * nval_per_thread;
// the time sample this thread will write out
unsigned isamp = warp_idx;
// determine which shared memory index for this output ichan and isamp
unsigned sout_idx = (isamp * nchanant) + ichanant;
// determine the output index for this thread
unsigned out_idx = (ichanant * out_chanant_stride) + (blockIdx.x * nsamp_per_block) + (isamp);
for (ival=0; ival<nval_per_thread; ival++)
{
output[out_idx] = sdata[sout_idx];
sout_idx ++;
out_idx += out_chanant_stride;
}
return;
}
__global__ void input_transpose_TFS_to_FST_hires (
int16_t * in, int16_t * out,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned samp_stride, const unsigned chan_block_stride,
const unsigned out_chanant_stride)
{
// for loaded data samples
extern __shared__ int16_t sdata[];
const int nsamp_per_block = 32;
const int nchan_per_block = 16;
const int nchanant_per_block = nant * nchan_per_block;
const int warp_num = threadIdx.x / 32;
const int warp_idx = threadIdx.x & 0x1F; // % 32
// each warp reads a time sample, with the warp threads each reading the antenna and channels required
// offsets time sample offset + channel block offset + the chanant
unsigned idx = (blockIdx.x * nsamp_per_block + warp_num) * samp_stride + (blockIdx.y * chan_block_stride) + warp_idx;
// the right time sample in shm the chanant bank conflict trick
unsigned sdx = (nchanant_per_block * warp_num) + warp_idx;// + (warp_num * 2);
// read the TFS input to TFS shared memory
for (unsigned i=0; i<nval_per_thread; i++)
{
if (idx < nval)
{
sdata[sdx] = in[idx];
idx += 32;
sdx += 32;
}
}
__syncthreads();
// each warp will write out 32 time samples for a single antenna, for a number of channels
const int ant = warp_num % nant;
int ichan = nval_per_thread * (warp_num / nant);
int ichanant = ichan * nant + ant;
// offset for this thread in shared memory
// sample * sample_stride_in_shm + chanant offset + shm bank trick
sdx = (warp_idx * nant * nchan_per_block) + ichanant;// + (warp_idx * 2);
// output chanant for this warp
const int ochanant = (blockIdx.y * nchan_per_block * nant) + ichanant;
int osamp = (blockIdx.x * nsamp_per_block) + warp_idx;
int64_t odx = ochanant * out_chanant_stride + osamp;
// loop over channels
for (unsigned i=0; i<nval_per_thread; i++)
{
out[odx] = sdata[sdx];
sdx += nant;
odx += out_chanant_stride * nant;
}
}
/*
* Transpose a block of data from TFS to FST
*/
void mopsr_input_transpose_TFS_to_FST (cudaStream_t stream,
void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// have issues with this kernel if the nant is 1 and nchan 40, try
// changing nthread to be a divisor of nval_per_block
// since we want a warp of 32 threads to write out just 1 chunk
const unsigned nsamp_per_block = 32;
const unsigned nval_per_block = nsamp_per_block * nchan * nant;
// special case where not a clean multiple [TODO validate this!]
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
// the total number of values we have to process is
const uint64_t nval = nbytes / (ndim * nval_per_thread);
int nblocks = nval / nthread;
if (nval % nthread)
nblocks++;
const size_t sdata_bytes = nthread * ndim * nval_per_thread;
const unsigned in_block_stride = nthread * nval_per_thread;
const unsigned out_chanant_stride = ndat;
#ifdef _GDEBUG
fprintf (stderr, "input_transpose_TFS_to_FST: nval_per_block=%u, nval_per_thread=%u\n", nval_per_block, nval_per_thread);
fprintf (stderr, "input_transpose_TFS_to_FST: nbytes=%lu, ndat=%lu, nval=%lu\n", nbytes, ndat, nval);
fprintf (stderr, "input_transpose_TFS_to_FST: nthread=%d, nblocks=%d\n", nthread, nblocks);
fprintf (stderr, "input_transpose_TFS_to_FST: input=%p output=%p sdata_bytes=%ld, in_block_stride=%d, nsamp_per_block=%u out_chan_stride=%u\n", d_in, d_out, sdata_bytes, in_block_stride, nsamp_per_block, out_chanant_stride);
#endif
input_transpose_TFS_to_FST<<<nblocks,nthread,sdata_bytes,stream>>>((int16_t *)d_in, (int16_t *) d_out, nchan, nant, nval, nval_per_thread, in_block_stride, nsamp_per_block, out_chanant_stride);
}
void mopsr_input_transpose_TFS_to_FST_hires (cudaStream_t stream,
void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// process 32 samples and 16 channels in a block
const unsigned nsamp_per_block = 32;
const unsigned nchan_per_block = 16;
const unsigned nchanblocks = nchan / nchan_per_block;
const unsigned nval_per_block = nsamp_per_block * nchan_per_block * nant;
const uint64_t nsamp = nbytes / (nchan * nant * ndim);
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
// the total number of values we have to process is
const uint64_t nval = nbytes / ndim;
// the total number of samples is
dim3 blocks = dim3 (nsamp / nsamp_per_block, nchanblocks);
if (nsamp % nsamp_per_block)
blocks.x++;
const size_t sdata_bytes = (nsamp_per_block * nchan_per_block * nant * ndim) + 256;
// nbytes of bytes different (for input) between each block of data
const unsigned samp_stride = nchan * nant;
const unsigned chan_block_stride = nchan_per_block * nant;
#ifdef _GDEBUG
fprintf (stderr, "input_transpose_TFS_to_FST: nval_per_block=%u, nval_per_thread=%u\n", nval_per_block, nval_per_thread);
fprintf (stderr, "input_transpose_TFS_to_FST: nbytes=%lu, nval=%lu\n", nbytes, nval);
fprintf (stderr, "input_transpose_TFS_to_FST: input=%p output=%p sdata_bytes=%ld, nsamp_per_block=%u\n", d_in, d_out, sdata_bytes, nsamp_per_block);
#endif
input_transpose_TFS_to_FST_hires<<<blocks,nthread,sdata_bytes,stream>>>((int16_t *) d_in,
(int16_t *) d_out, nchan, nant, nval, nval_per_thread, samp_stride, chan_block_stride, nsamp);
#ifdef _GDEBUG
check_error_stream("input_transpose_TFS_to_FST_hires", stream);
#endif
}
// this will work best in FST format
__global__ void mopsr_input_ant_sum_kernel (const int16_t * input, int16_t * output, const uint64_t nsamp, const unsigned nchan, const unsigned nant)
{
const unsigned ichan = blockIdx.y;
const uint64_t isamp = blockIdx.x * blockDim.x + threadIdx.x;
if (isamp >= nsamp)
return;
unsigned i_idx = (ichan * nsamp * nant) + isamp;
const unsigned o_idx = (ichan * nsamp) + isamp;
// each thread will load data for nant into register memory
int16_t ant16[MOPSR_MAX_NANT_PER_AQ];
unsigned iant;
for (iant=0; iant<nant; iant++)
{
ant16[iant] = input[i_idx];
i_idx += nsamp;
}
float re = 0;
float im = 0;
int8_t * ant8 = (int8_t *) ant16;
for (iant=0; iant<nant; iant++)
{
if (iant % 2 == 0)
{
re += (float) ant8[2*iant];
im += (float) ant8[2*iant+1];
}
}
ant8[0] = (int8_t) rintf (re);
ant8[1] = (int8_t) rintf (im);
output[o_idx] = ant16[0];
}
//
// sum all modules in stream together [ORDER FST -> FT]
//
void mopsr_input_sum_ant (cudaStream_t stream, void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
const uint64_t nsamp = nbytes / (nchan * nant * ndim);
// number of threads that actually load data
const unsigned nthread = 1024;
dim3 blocks (nsamp / nthread, nchan);
if (nsamp % nthread)
blocks.x++;
#ifdef _GDEBUG
fprintf (stderr, "input_ant_sum: bytes=%lu nsamp=%lu\n", nbytes, nsamp);
fprintf (stderr, "input_ant_sum: nchan=%u nant=%u\n", nchan, nant);
fprintf (stderr, "input_ant_sum: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
#endif
mopsr_input_ant_sum_kernel<<<blocks, nthread, 0, stream>>>((const int16_t *) d_in, (int16_t *) d_out, nsamp, nchan, nant);
#ifdef _GDEBUG
check_error_stream("mopsr_input_ant_sum_kernel", stream);
#endif
}
__global__ void input_transpose_FT_to_TF_kernel (
const int16_t * input, int16_t * output, const uint64_t nsamp,
const unsigned nchan, const unsigned nval, const unsigned nval_per_thread,
const unsigned nsamp_per_block)
{
extern __shared__ int16_t sdata[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
//const unsigned nwarp = blockDim.x / WARP_SIZE;
const unsigned nwarp_chunk_per_chan = nsamp_per_block / WARP_SIZE;
const unsigned iwarp_chunk = warp_num * nval_per_thread;
unsigned ichan = iwarp_chunk / nwarp_chunk_per_chan;
unsigned ichunk = iwarp_chunk % nwarp_chunk_per_chan;
// offset from base pointer to the chanant this warp starts at
uint64_t in_idx = (ichan * nsamp) + (blockIdx.x * nsamp_per_block) + (ichunk * WARP_SIZE) + warp_idx;
// to avoid shm bank conflicts add some padding
unsigned sin_idx = (warp_num * WARP_SIZE * nval_per_thread) + warp_idx + (2 * ichan);
unsigned ival;
//int8_t * tmp = (int8_t*) sdata;
for (ival=0; ival<nval_per_thread; ival++)
{
if (in_idx < nval * nval_per_thread)
sdata[sin_idx] = input[in_idx];
else
sdata[sin_idx] = 0;
//if ((blockIdx.x == 0) && (warp_num == 1))
// printf ("%d.%d.%d sdata[%d]=%d ichunk=%u ichan0=%u\n", blockIdx.x, threadIdx.x, ival, sin_idx, tmp[2*sin_idx], ichunk, ichan);
// shared memory increases linearly
sin_idx += WARP_SIZE;
in_idx += WARP_SIZE;
ichunk++;
// if we are moving channel
if (ichunk >= nwarp_chunk_per_chan)
{
in_idx += (nsamp - nsamp_per_block);
sin_idx += 2;
ichunk = 0;
}
}
__syncthreads();
// starting ichan and isamp or this thread/warp to write out
const unsigned ichan0 = warp_idx;
const unsigned isamp0 = (warp_num * WARP_SIZE * nval_per_thread) / nchan;
const unsigned nchansamp_block = nchan * nsamp_per_block;
// block offset isamp warp offset thread offset
uint64_t out_idx = (blockIdx.x * nchansamp_block) + (isamp0 * nchan) + ichan0;
// chan_offset sample offset
unsigned sout_idx = (ichan0 * nsamp_per_block) + isamp0;
const unsigned thread_stride = WARP_SIZE * nsamp_per_block;
const unsigned thread_rewind = nchansamp_block - 1;
unsigned warp_idat = warp_idx;
for (ival=0; ival<nval_per_thread; ival++)
{
ichan = warp_idat % nchan;
output[out_idx] = sdata[sout_idx + (2*ichan)];
// update the output index
out_idx += WARP_SIZE;
// update our warp idat so we can keep track of ichan
warp_idat += WARP_SIZE;
// update our shared memory output index
sout_idx += thread_stride;
if (sout_idx >= nchansamp_block)
sout_idx -= thread_rewind;
}
}
void mopsr_input_transpose_FT_to_TF (cudaStream_t stream, void * d_in, void * d_out, uint64_t nbytes, unsigned nchan)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// since we want a warp of 32 threads to write out just 1 chunk
const unsigned nsamp_per_block = WARP_SIZE * 4;
const unsigned nval_per_block = nsamp_per_block * nchan;
// special case where not a clean multiple [TODO validate this!]
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
const uint64_t nsamp = nbytes / (ndim * nchan);
// the total number of values we have to process is
const uint64_t nval = nbytes / (ndim * nval_per_thread);
int nblocks = nval / nthread;
if (nval % nthread)
nblocks++;
const size_t sdata_bytes = nthread * ndim * nval_per_thread + (2 * nchan);
#ifdef _GDEBUG
fprintf (stderr, "input_transpose_FT_to_TF: nsamp_per_block=%u nval_per_block=%u, nval_per_thread=%u\n", nsamp_per_block, nval_per_block, nval_per_thread);
fprintf (stderr, "input_transpose_FT_to_TF: nbytes=%lu, nsamp=%lu, nval=%lu\n", nbytes, nsamp, nval);
fprintf (stderr, "input_transpose_FT_to_TF: nthread=%d, nblocks=%d\n", nthread, nblocks);
fprintf (stderr, "input_transpose_FT_to_TF: input=%p output=%p sdata_bytes=%ld\n", d_in, d_out, sdata_bytes);
#endif
input_transpose_FT_to_TF_kernel<<<nblocks,nthread,sdata_bytes,stream>>>((int16_t *) d_in, (int16_t *) d_out, nsamp, nchan, nval, nval_per_thread, nsamp_per_block);
#ifdef _GDEBUG
check_error_stream ("input_transpose_FT_to_TF", stream);
#endif
}
__global__ void input_transpose_FST_to_STF (
const int16_t * input, int16_t * output,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned nsamp_per_block, const uint64_t out_ant_stride)
{
extern __shared__ int16_t sdata[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
const uint64_t nsamp = nsamp_per_block * gridDim.x;
unsigned isamp = warp_idx;
// offset from base pointer to the chanant this warp starts at
uint64_t in_idx = (blockIdx.x * WARP_SIZE) + (warp_num * nsamp * nval_per_thread) + warp_idx;
unsigned sin_dat = warp_num * nsamp_per_block * nval_per_thread + isamp;
const unsigned nantsamp_block = nant * nsamp_per_block;
const unsigned nchansamp_block = nchan * nsamp_per_block;
const unsigned nchanantsamp_block = nant * nchan * nsamp_per_block;
unsigned ival, ichan, iant, sin_idx;
for (ival=0; ival<nval_per_thread; ival++)
{
ichan = sin_dat / nantsamp_block;
iant = (sin_dat % nantsamp_block) / nsamp_per_block;
// note that we add ichan to the shm index to avoid shm bank conflicts on shm read (later)
sin_idx = (ichan * nantsamp_block) + (iant * nsamp_per_block) + isamp + (2 * ichan);
if (in_idx < nval * nval_per_thread)
sdata[sin_idx] = input[in_idx];
else
sdata[sin_idx] = 0;
sin_dat += nsamp_per_block;
in_idx += nsamp;
}
__syncthreads();
// antenna for this WARP
iant = (warp_num * nant) / WARP_SIZE;
// shared memory strides
const unsigned swarp_stride = nval_per_thread * WARP_SIZE; // number of dats per warp
const unsigned sant_base = iant * nsamp_per_block;
// starting ichan and isamp or this thread/warp to write out
const unsigned ichan0 = warp_idx;
const unsigned nchansamp_per_warp = (WARP_SIZE * nval_per_thread) / nchan;
const unsigned isamp0 = (warp_num * nchansamp_per_warp) % nsamp_per_block;
const unsigned out_warp_offset = warp_num % (WARP_SIZE / nant);
// ant offset block offset isamp warp offset
uint64_t out_idx = (iant * out_ant_stride) + (blockIdx.x * nchansamp_block) + (out_warp_offset * swarp_stride) + ichan0;
// chan_offset ant offset sample offset
unsigned sout_idx = (ichan0 * nantsamp_block) + sant_base + isamp0;
const unsigned thread_stride = WARP_SIZE * nsamp_per_block * nant;
const unsigned thread_rewind = nchanantsamp_block - 1;
unsigned warp_idat = warp_idx;
for (ival=0; ival<nval_per_thread; ival++)
{
ichan = warp_idat % nchan;
if ((blockIdx.x == 16) && (threadIdx.x < 32))
printf ("[%u] output[%u] = sdata[%d]\n", threadIdx.x, out_idx, sout_idx + 2*ichan);
//output[out_idx] = sdata[sout_idx + 2*ichan];
// update the output index
out_idx += WARP_SIZE;
// update our warp idat so we can keep track of ichan
warp_idat += WARP_SIZE;
// update our shared memory output index
sout_idx += thread_stride;
if (sout_idx >= nchanantsamp_block)
sout_idx -= thread_rewind;
}
}
void mopsr_input_transpose_FST_to_STF (cudaStream_t stream,
void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// have issues with this kernel if the nant is 1 and nchan 40, try
// changing nthread to be a divisor of nval_per_block
// since we want a warp of 32 threads to write out just 1 chunk
const unsigned nsamp_per_block = WARP_SIZE;
const unsigned nval_per_block = nsamp_per_block * nchan * nant;
// special case where not a clean multiple [TODO validate this!]
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
// the total number of values we have to process is
const uint64_t nval = nbytes / (ndim * nval_per_thread);
int nblocks = nval / nthread;
if (nval % nthread)
nblocks++;
const size_t sdata_bytes = nthread * ndim * nval_per_thread + (2 * nchan);
const unsigned out_ant_stride = nbytes / (nant * ndim);
// TODO might need to pass nsamp to kernel!!!
#ifdef _GDEBUG
const uint64_t ndat = nbytes / (nchan * nant * ndim);
fprintf (stderr, "input_transpose_FST_to_STF: nval_per_block=%u, nval_per_thread=%u\n", nval_per_block, nval_per_thread);
fprintf (stderr, "input_transpose_FST_to_STF: nbytes=%lu, ndat=%lu, nval=%lu\n", nbytes, ndat, nval);
fprintf (stderr, "input_transpose_FST_to_STF: nthread=%d, nblocks=%d\n", nthread, nblocks);
fprintf (stderr, "input_transpose_FST_to_STF: input=%p output=%p sdata_bytes=%ld,nsamp_per_block=%u out_ant_stride=%u\n", d_in, d_out, sdata_bytes, nsamp_per_block, out_ant_stride);
#endif
input_transpose_FST_to_STF<<<nblocks,nthread,sdata_bytes,stream>>>((int16_t *)d_in, (int16_t *) d_out, nchan, nant, nval, nval_per_thread, nsamp_per_block, out_ant_stride);
#ifdef _GDEBUG
check_error_stream ("input_transpose_FST_to_STF", stream);
#endif
}
// perform a transpose from FST to FTS order, but with 2 samples paired
// with each other in 32-bit word
__global__ void input_transpose_FST_to_FTS (
const int32_t * input, int32_t * output,
const unsigned nchan, const unsigned nant,
const unsigned nsamp_per_block, const unsigned chan_stride)
{
// to hold 16+1 32-bit packed samples, for all antenna
extern __shared__ int32_t sdata_32t[];
const unsigned warp_num = threadIdx.x / nsamp_per_block;
const unsigned warp_idx = threadIdx.x % nsamp_per_block;
const int warps_per_block = blockDim.x / nsamp_per_block;
const uint64_t nsamp = nsamp_per_block * gridDim.x;
// input offset (ichan * chan_stride) + (iant * nsamp) + (iblock * nsamp_per_block) + isamp
uint64_t idx = (blockIdx.y * chan_stride) + (warp_num * nsamp) + (blockIdx.x * nsamp_per_block) + warp_idx;
// shm offset (iant * nsamp per block) + isamp
unsigned sdx = (warp_num * nsamp_per_block) + warp_idx;
// loop offsets
const unsigned ant_stride = nsamp * warps_per_block;
//const unsigned sin_stride = (WARP_SIZE + 1);
const unsigned sin_stride = nsamp_per_block * warps_per_block;
// read ST data straight into shm, with 1 sample stride offset for bank conflicts
for (unsigned iant=warp_num; iant<nant; iant+=warps_per_block)
{
sdata_32t[sdx] = input[idx];
idx += ant_stride;
sdx += sin_stride;
}
// ensure all blocks have loaded nant * nsamp_per_block samples
__syncthreads();
// write out transposed data, each warp writing out nsamp_per_block antenna
idx = (blockIdx.y * chan_stride) + (blockIdx.x * nsamp_per_block * nant) + threadIdx.x;
// assume nthread == nantenna, output antenna = threadId.x
sdx = threadIdx.x * nsamp_per_block;
//sdx = threadIdx.x * (WARP_SIZE + 1);
for (unsigned isamp=0; isamp<nsamp_per_block; isamp++)
{
output[idx] = sdata_32t[sdx];
// increment to next output sample
idx += nant;
sdx++;
}
}
// transpose each channel from ST to TS order
void mopsr_input_transpose_FST_to_FTS (cudaStream_t stream,
void * d_in, void * d_out, uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim) / 2;
unsigned nthread = nant;
// each block reads 16x2 time samples for each antenna
const unsigned nsamp_per_block = 16;
const unsigned chan_stride = nant * ndat;
dim3 blocks = dim3 (ndat / nsamp_per_block, nchan, 1);
const size_t sdata_bytes = nant * sizeof(int32_t) * (nsamp_per_block + 1);
fprintf (stderr, "input_transpose_FST_to_FTS: nbytes=%lu, ndat=%lu, nant=%u nchan=%u \n", nbytes, ndat, nant, nchan);
fprintf (stderr, "input_transpose_FST_to_FTS: nthread=%d, blocks.x=%d\n", nthread, blocks.x);
fprintf (stderr, "input_transpose_FST_to_FTS: input=%p output=%p sdata_bytes=%ld nsamp_per_block=%u chan_stride=%u\n", d_in, d_out, sdata_bytes, nsamp_per_block, chan_stride);
input_transpose_FST_to_FTS<<<blocks,nthread,sdata_bytes,stream>>>((int32_t *)d_in, (int32_t *) d_out, nchan, nant, nsamp_per_block, chan_stride);
#ifdef _GDEBUG
check_error_stream ("input_transpose_FST_to_FTS", stream);
#endif
}
// scaling factors for antenna
__device__ __constant__ float d_ant_scales [MOPSR_MAX_NANT_PER_AQ];
__global__ void input_rephase (int16_t * input, cuFloatComplex const * __restrict__ corrections,
uint64_t nbytes, const unsigned chan_stride, const unsigned ant_stride)
{
extern __shared__ cuFloatComplex corr_sh[];
const unsigned isamp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned iant = blockIdx.y;
const unsigned ichan = blockIdx.z;
const unsigned idx = (ichan * chan_stride + iant*ant_stride + isamp);
const unsigned icorr = isamp % MOPSR_UNIQUE_CORRECTIONS;
// all threads in this block will be using the same ant and channel, so we only need
// to load into shm the 32 co-efficients for this ant and channel
if (threadIdx.x < MOPSR_UNIQUE_CORRECTIONS)
corr_sh[icorr] = corrections[ichan*MOPSR_UNIQUE_CORRECTIONS + icorr];
__syncthreads();
// coalesced int16_t read from global memory
int16_t load16 = input[idx];
int8_t * load8 = (int8_t *) &load16;
cuFloatComplex val = make_cuComplex((float) load8[0], (float) load8[1]);
cuFloatComplex res = cuCmulf(val, corr_sh[icorr]);
const float scale = d_ant_scales[iant];
load8[0] = (int8_t) (cuCrealf(res) * scale);
load8[1] = (int8_t) (cuCimagf(res) * scale);
input[idx] = load16;
}
void mopsr_input_rephase (cudaStream_t stream, void * d_data, void * d_corrections,
uint64_t nbytes, unsigned nchan, unsigned nant)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
const size_t sdata_bytes = MOPSR_UNIQUE_CORRECTIONS * sizeof(cuFloatComplex);
const unsigned nthread = 1024;
dim3 blocks (ndat / nthread, nant, nchan);
if (ndat % nthread)
blocks.x++;
#ifdef _GDEBUG
fprintf (stderr, "input_rephase: bytes=%lu ndat=%lu\n", nbytes, ndat);
fprintf (stderr, "input_rephase: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
#endif
const unsigned chan_stride = nant * ndat;
const unsigned ant_stride = ndat;
input_rephase<<<blocks, nthread, sdata_bytes, stream>>>((int16_t*) d_data,
(cuFloatComplex *) d_corrections, nbytes, chan_stride, ant_stride);
#ifdef _GDEBUG
check_error_stream("input_rephase", stream);
#endif
}
__global__ void input_rephase_TFS (int16_t * input, uint64_t nval, const unsigned nchan,
const unsigned nant, const unsigned chan_offset)
{
const unsigned samp_stride = nchan * nant;
const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nval)
return;
const unsigned isamp = idx / samp_stride; // sample number
const unsigned ipos = isamp % 32; // sample position for FIR filter
const unsigned ichan = (idx % samp_stride) / nant; //
const unsigned iant = idx % nant;
// load the 16 bit value from global memory
int16_t load16 = input[idx];
int8_t * load8 = (int8_t *) &load16;
// calculate the rephasing factor for this channel and sample
float ratio = 2 * M_PI * (5.0 / 32.0);
float theta = (chan_offset + ichan) * ratio * ipos;
cuFloatComplex rephase = make_cuComplex(sinf(theta), -1 * cos(theta));
cuFloatComplex val = make_cuComplex((float) load8[0] + 0.5, (float) load8[1] + 0.5);
cuFloatComplex res = cuCmulf(val, rephase);
const float scale = d_ant_scales[iant];
load8[0] = (int8_t) rintf ((cuCrealf(res) * scale) - 0.5);
load8[1] = (int8_t) rintf ((cuCimagf(res) * scale) - 0.5);
// write out to global memory (in-place)
input[idx] = load16;
}
void mopsr_input_rephase_TFS (cudaStream_t stream, void * d_data, uint64_t nbytes,
unsigned nchan, unsigned nant, unsigned chan_offset)
{
const unsigned ndim = 2;
const uint64_t nval = nbytes / ndim;
const unsigned nthread = 1024;
unsigned nblocks = (unsigned) (nval / nthread);
if (nval % nthread)
nblocks++;
#ifdef _GDEBUG
fprintf (stderr, "input_rephase_TFS: bytes=%lu nval=%lu\n", nbytes, nval);
fprintf (stderr, "input_rephase_TFS: blocks=%u\n", nblocks);
#endif
input_rephase_TFS<<<nblocks, nthread, 0, stream>>>((int16_t*) d_data, nval, nchan, nant, chan_offset);
#ifdef _GDEBUG
check_error_stream("input_rephase_TFS", stream);
#endif
}
void mopsr_input_rephase_scales (cudaStream_t stream, float * h_ant_scales, size_t nbytes)
{
cudaMemcpyToSymbolAsync (d_ant_scales, (void *) h_ant_scales, nbytes, 0, cudaMemcpyHostToDevice, stream);
}
// apply a fractional delay correction to a channel / antenna, warps will always
__global__ void input_delay (int8_t * input, int8_t * output, int8_t * overlap, float * delays,
unsigned nthread_run, uint64_t ndat, const unsigned chan_stride,
const unsigned ant_stride, const unsigned ntap)
{
extern __shared__ float sdata_delay[];
float * filter = sdata_delay;
float * reals = filter + ntap;
float * imags = reals + blockDim.x;
const unsigned half_ntap = (ntap / 2);
const unsigned in_offset = 2 * half_ntap;
const unsigned isamp = blockIdx.x * nthread_run + threadIdx.x;
const unsigned iant = blockIdx.y;
const unsigned nant = blockDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ndim = 2;
if (threadIdx.x < ndat)
return;
// calculate the filter coefficients for the delay
if (threadIdx.x < ntap)
{
float x = (threadIdx.x - half_ntap) + delays[ichan*nant * iant];
if (x == 0)
filter[threadIdx.x] = 1;
else
{
x *= M_PI;
filter[threadIdx.x] = sinf(x) / x;
}
}
// each thread must also load its data from main memory
unsigned data_idx = ichan*chan_stride + iant*ant_stride + isamp;
// the first block needs to load data from the overlap buffer, not from input block - in_offset
if (blockIdx.x == 0)
{
if (threadIdx.x < in_offset)
{
const unsigned overlap_idx = (ichan*nant*ntap + iant*ntap + isamp) * ndim;
reals[threadIdx.x] = (float) overlap[overlap_idx + 0];
imags[threadIdx.x] = (float) overlap[overlap_idx + 1];
}
else
{
reals[threadIdx.x] = (float) input[2*(data_idx - in_offset)];
imags[threadIdx.x] = (float) input[2*(data_idx - in_offset)+1];
}
}
else
{
reals[threadIdx.x] = (float) input[2*(data_idx - in_offset)];
imags[threadIdx.x] = (float) input[2*(data_idx - in_offset)+1];
}
__syncthreads();
// there are 2 * half_ntap threads that dont calculate anything
if (threadIdx.x < nthread_run)
{
float re = 0;
float im = 0;
unsigned i;
for (i=0; i<ntap; i++)
{
re += reals[i] * filter[i];
im += imags[i] * filter[i];
}
output[2*data_idx] = (int8_t) floor(re + 0.5);
output[2*data_idx+1] = (int8_t) floor(im + 0.5);
}
}
//
// Perform fractional delay correction, out-of-place
//
void mopsr_input_delay_fractional (cudaStream_t stream, void * d_in,
void * d_out, void * d_overlap,
float * d_delays, uint64_t nbytes,
unsigned nchan, unsigned nant, unsigned ntap)
{
fprintf (stderr, "mopsr_input_delay_fractional()\n");
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
const unsigned half_ntap = ntap / 2;
// number of threads that actually load data
const unsigned nthread_load = 1024;
const unsigned nthread_run = nthread_load - (2 * half_ntap);
// need shared memory to load the ntap coefficients + nthread_load data points
const size_t sdata_bytes = (ntap * 2) + (nthread_load * 2);
dim3 blocks (ndat / nthread_run, nant, nchan);
if (ndat % nthread_run)
blocks.x++;
fprintf (stderr, "mopsr_input_delay: bytes=%lu ndat=%lu\n", nbytes, ndat);
fprintf (stderr, "mopsr_input_delay: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
const unsigned chan_stride = nant * ndat;
const unsigned ant_stride = ndat;
input_delay<<<blocks, nthread_load, sdata_bytes, stream>>>((int8_t *) d_in, (int8_t *) d_out,
(int8_t *) d_overlap, (float *) d_delays, nthread_run, ndat, chan_stride, ant_stride, ntap);
#ifdef _GDEBUG
check_error_stream("input_delay", stream);
#endif
}
__global__ void tile_beams_kernel (int16_t * input, float * output,
float * beam_sin_thetas,
float * ant_factors,
unsigned nbeam, uint64_t ndat, unsigned nant)
{
extern __shared__ int16_t sdata_tb[];
float * sh_ant_factors = (float *) (sdata_tb + (32 * nant));
//const unsigned ndim = 2;
const unsigned sample = blockIdx.x * blockDim.x + threadIdx.x;
//unsigned warp_idx = threadIdx.x % WARP_SIZE;
unsigned warp_num = threadIdx.x / WARP_SIZE;
unsigned iant = warp_num;
unsigned i_idx = (iant * ndat) + sample;
unsigned s_idx = threadIdx.x;
const uint64_t in_stride = WARP_SIZE * ndat;
while (iant < nant)
{
if (sample < ndat)
{
//if ((blockIdx.x == 0) && (warp_num == 0))
// printf ("[%d][%d] reading [%d] = [%d]\n", blockIdx.x, threadIdx.x, s_idx, i_idx);
sdata_tb[s_idx] = input[i_idx];
s_idx += blockDim.x;
i_idx += in_stride;
}
iant += WARP_SIZE;
}
// load the antenna factors into shared memory
if (threadIdx.x < nant)
{
sh_ant_factors[threadIdx.x] = ant_factors[threadIdx.x];
}
// load input data to shared memory such that
// [s0t0 s0t1 s0t2 ... s0t31]
// [s1t0 s1t1 t1t2 ... s1t31]
// [ ... ]
// [s351t0 s351t1 ... s351t31]
__syncthreads();
// Form tied array beams, detecting and summing as we go,
// only use as many beams as there are threads
int8_t * sdata_tb_re = (int8_t *) sdata_tb;
int8_t * sdata_tb_im = sdata_tb_re + 1;
cuFloatComplex phasor, samp_sum;
// TODO change beam_thetas to be sin(beam_thetas on CPU)
unsigned ibeam = threadIdx.x;
if (ibeam < nbeam)
{
// a simple 1 time load from gmem, coalesced
const float sin_theta = beam_sin_thetas[ibeam];
cuFloatComplex beam_sum = make_cuComplex(0,0);
s_idx = 0;
for (unsigned iant=0; iant<nant; iant++)
{
sincosf(sin_theta * sh_ant_factors[iant], &(phasor.y), &(phasor.x));
samp_sum = make_cuComplex(0,0);
for (unsigned isamp=0; isamp<32; isamp++)
{
samp_sum.x += (float) sdata_tb_re[2*isamp];
samp_sum.y += (float) sdata_tb_im[2*isamp];
}
s_idx += 128;
beam_sum = cuCaddf( beam_sum, cuCmulf (samp_sum, phasor));
}
output[(blockIdx.x * nbeam) + ibeam] = cuCabsf(beam_sum);
}
}
#ifdef HAVE_SHFL
__device__ __forceinline__ cuFloatComplex shflComplex( cuFloatComplex r, int lane )
{
return make_cuComplex ( __shfl( r.x, lane ), __shfl( r.y, lane ) );
}
__device__ __forceinline__ cuFloatComplex shfl_xor_Complex ( cuFloatComplex r, int lane )
{
return make_cuComplex ( __shfl_xor( r.x, lane ), __shfl_xor( r.y, lane ) );
}
#endif
#ifdef EIGHT_BIT_PHASORS
__global__ void tile_beams_kernel_2048(
const __restrict__ int32_t * input, float * output, int8_t * phasors,
unsigned nbeam, unsigned ndat, unsigned nant)
#else
__global__ void tile_beams_kernel_2048(
const __restrict__ int32_t * input, float * output, float * phasors,
unsigned nbeam, unsigned ndat, unsigned nant)
#endif
{
extern __shared__ float sdata_tb_c[];
float * re_phasors = sdata_tb_c + (2 * 32 * BEAMS_PER_LOOP);
float * im_phasors = re_phasors + (nant * BEAMS_PER_LOOP);
#ifdef HAVE_SHFL
const int warp_num = threadIdx.x / WARP_SIZE;
const int warp_idx = threadIdx.x & 0x1F;
float power;
#endif
int32_t val32;
int8_t * ptr8 = (int8_t *) &val32;
const unsigned nbeamant = nbeam * nant;
//const float scale = 127.5;
cuFloatComplex b1s[BEAMS_PER_LOOP];
cuFloatComplex b2s[BEAMS_PER_LOOP];
cuFloatComplex val;
// this kernel exectutes for computes 4 beams at a time for 1024 samples
for (unsigned ibeam=0; ibeam<nbeam; ibeam += BEAMS_PER_LOOP)
{
unsigned ibeamant = ibeam * nant;
// use all threads in the warp to load the load phasors for this beam
// and all antenna into shared memory.
for (unsigned i=threadIdx.x; i<nant*BEAMS_PER_LOOP; i+=blockDim.x)
{
re_phasors[i] = (float) phasors[ibeamant + i];
im_phasors[i] = (float) phasors[nbeamant + ibeamant + i];
}
__syncthreads();
// for all the antenna, perform complex multiplications on the 4 beams
unsigned idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//for (unsigned i=0; i<nbeam_loop; i++)
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
b1s[i].x = 0;
b1s[i].y = 0;
b2s[i].x = 0;
b2s[i].y = 0;
}
for (unsigned iant=0; iant<nant; iant++)
{
// load 4 x 8bit values (2 complex samples) for this time sample and antenna
val32 = input[idx];
// make a complex float from this input
//val = make_cuComplex (((float) ptr8[0]) / scale, ((float) ptr8[1]) / scale);
val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
unsigned pidx = iant;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
// multiply by phasor and add to the beam (yes this is a += operation)
if (ibeam == 0 && i == 0)
b1s[0].x = fmaf( val.x, val.x, fmaf (val.y, val.y, b1s[0].x));
else
b1s[i] = cuCfmaf (make_cuComplex(re_phasors[pidx], im_phasors[pidx]), val, b1s[i]);
pidx += nant;
}
//val = make_cuComplex (((float) ptr8[2] / scale), ((float) ptr8[3]) / scale);
val = make_cuComplex ((float) ptr8[2], (float) ptr8[3]);
pidx = iant;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
// multiply by phasor and add to the beam (yes this is a += operation)
if (ibeam == 0 && i == 0)
b2s[0].x = fmaf( val.x, val.x, fmaf (val.y, val.y, b2s[0].x));
else
b2s[i] = cuCfmaf (make_cuComplex(re_phasors[pidx],im_phasors[pidx]), val, b2s[i]);
pidx += nant;
}
idx += ndat/2;
}
#ifdef HAVE_SHFL
// detect each sample and integrate across the warp (factor of 32 in time)
// this takes us from 1.28us to 81.92
unsigned sdx = warp_num;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
if (ibeam == 0 && i == 0)
power = b1s[i].x;
else
power = (b1s[i].x * b1s[i].x) + (b1s[i].y * b1s[i].y);
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
// power now contains the integrated power for this warp (i.e. 40.96 us samples
// we write these to shared memory in ST order. T=64, S=8
if (warp_idx == 0)
{
sdata_tb_c[sdx] = power;
}
if (ibeam == 0 && i == 0)
power = b2s[i].x;
else
power = (b2s[i].x * b2s[i].x) + (b2s[i].y * b2s[i].y);
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
// power now contains the integrated power for this warp (i.e. 40.96 us samples
// we write these to shared memory in ST order. T=32, S=8
if (warp_idx == 0)
{
sdata_tb_c[sdx] += power;
sdx += 32;
}
}
__syncthreads();
// one warp per output beam
if (warp_num < BEAMS_PER_LOOP)
{
// threads to generate 4 x 655.36 time samples from 32 x 81.92)
power = sdata_tb_c[(warp_num * WARP_SIZE) + warp_idx];
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
// warp_idxs 0, 8, 16, 24 have the 4 time samples for beam warp_num
if (warp_idx % 8 == 0)
{
const unsigned obeam = ibeam + warp_num;
const unsigned ndat_out = ndat / 512;
const unsigned osamp = warp_idx / 8;
// output is in ST format
unsigned out_idx = (obeam * ndat_out) + (blockIdx.x * 4) + osamp;
output[out_idx] = power;
}
}
#endif
}
}
// load 2048 samples per block, form beams, scrunch down x32 to write out
// 64 samples. input FST output SFT
__global__ void tile_beams_kernel_2048_32scr (
const __restrict__ int32_t * input, float * output, float * phasors,
unsigned nbeam, unsigned ndat, unsigned nant)
{
extern __shared__ float sdata_tb_c[];
//const float scale = 127.5;
float * re_phasors = sdata_tb_c + (2 * 32 * BEAMS_PER_LOOP);
float * im_phasors = re_phasors + (nant * BEAMS_PER_LOOP);
const int warp_num = threadIdx.x / WARP_SIZE;
const int warp_idx = threadIdx.x & 0x1F;
#ifdef HAVE_SHFL
float power;
#endif
int32_t val32;
int8_t * ptr8 = (int8_t *) &val32;
const unsigned nbeamant = nbeam * nant;
cuFloatComplex b1s[BEAMS_PER_LOOP];
cuFloatComplex b2s[BEAMS_PER_LOOP];
cuFloatComplex val;
// shift phasors pointer by ichan * chan_stride
phasors += blockIdx.y * nant * nbeam * 2;
// shift input by ndat/2 (since int32_t *)
input += blockIdx.y * nant * (ndat / 2);
// shift output by output_ndat to align to right channel
output += blockIdx.y * (ndat / 32);
// this kernel exectutes for computes 4 beams at a time for 1024 samples
for (unsigned ibeam=0; ibeam<nbeam; ibeam += BEAMS_PER_LOOP)
{
unsigned ibeamant = ibeam * nant;
// use all threads in the warp to load the load phasors for this beam
// and all antenna into shared memory.
for (unsigned i=threadIdx.x; i<nant*BEAMS_PER_LOOP; i+=blockDim.x)
{
re_phasors[i] = (float) phasors[ibeamant + i];
im_phasors[i] = (float) phasors[nbeamant + ibeamant + i];
}
__syncthreads();
// for all the antenna, perform complex multiplications on the 4 beams
unsigned idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
b1s[i].x = 0;
b1s[i].y = 0;
b2s[i].x = 0;
b2s[i].y = 0;
}
for (unsigned iant=0; iant<nant; iant++)
{
// load 4 x 8bit values (2 complex samples) for this time sample and antenna
val32 = input[idx];
// make a complex float from this input
//val = make_cuComplex ((float) ptr8[0] / scale, (float) ptr8[1] / scale);
val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
unsigned pidx = iant;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
// multiply by phasor and add to the beam (yes this is a += operation)
if (ibeam == 0 && i == 0)
b1s[0].x = fmaf( val.x, val.x, fmaf (val.y, val.y, b1s[0].x));
else
{
//b1s[i] = cuCaddf (val, b1s[i]);
b1s[i] = cuCfmaf (make_cuComplex(re_phasors[pidx], im_phasors[pidx]), val, b1s[i]);
}
//if (blockIdx.x == 0 && ibeam == 0 && i == 1 && threadIdx.x == 0)
// printf("OLD: samp=%d ant=%d antenna=(%f,%f) weight=(%f,%f) sum=(%f,%f)\n",
// 2*threadIdx.x, iant,
// val.x, val.y, re_phasors[pidx], im_phasors[pidx], b1s[i].x, b1s[i].y);
pidx += nant;
}
//val = make_cuComplex ((float) ptr8[2] / scale, (float) ptr8[3] / scale);
val = make_cuComplex ((float) ptr8[2], (float) ptr8[3]);
pidx = iant;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
// multiply by phasor and add to the beam (yes this is a += operation)
if (ibeam == 0 && i == 0)
b2s[0].x = fmaf( val.x, val.x, fmaf (val.y, val.y, b2s[0].x));
else
{
//b2s[i] = cuCaddf (val, b2s[i]);
b2s[i] = cuCfmaf (make_cuComplex(re_phasors[pidx],im_phasors[pidx]), val, b2s[i]);
}
//if (blockIdx.x == 0 && ibeam == 0 && i == 1)
// printf("OLD: samp=%d ant=%d antenna=(%f,%f) sum=(%f,%f)\n",
// 2*threadIdx.x+1, iant,
// val.x, val.y, b2s[i].x, b2s[i].y);
pidx += nant;
}
idx += ndat/2;
}
// detect each sample and integrate across the warp (factor of 32 in time)
// this takes us from 10.24 to 327.68 us
#ifdef HAVE_SHFL
unsigned sdx = 2 * warp_num;
for (unsigned i=0; i<BEAMS_PER_LOOP; i++)
{
if (ibeam == 0 && i == 0)
power = b1s[i].x;
else
power = (b1s[i].x * b1s[i].x) + (b1s[i].y * b1s[i].y);
//if ((blockIdx.y == 0) && (ibeam == 0) && (i == 1) && (blockIdx.x == 0) && (warp_num == 0))
// printf("GPU %d %f %f %f\n", 2*threadIdx.x+0, b1s[i].x, b1s[i].y, power);
// since the consecutive samples are spread across b1 and b2
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
// power now contains the integrated power for this warp (i.e. 327.68 us
// samples. Write these to shared memory in ST order. T=64, S=8
if (warp_idx == 0 || warp_idx == 16)
{
sdata_tb_c[sdx + warp_idx/16] = power; // sample sdx + 0 and 1
}
if (ibeam == 0 && i == 0)
power = b2s[i].x;
else
power = (b2s[i].x * b2s[i].x) + (b2s[i].y * b2s[i].y);
//if ((blockIdx.y == 0) && (ibeam == 0) && (i == 1) && (blockIdx.x == 0) && (warp_num == 0))
// printf("GPU %d %f %f %f\n", 2*threadIdx.x+1, b2s[i].x, b2s[i].y, power);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
if (warp_idx == 0 || warp_idx == 16)
{
sdata_tb_c[sdx+warp_idx/16] += power;
//if ((blockIdx.y == 0) && (ibeam == 0) && (i == 1) && (blockIdx.x == 0) && (warp_num == 0))
// printf("GPU %d sdata_tb_c[%d]==%f\n", warp_idx, sdx+warp_idx/16, sdata_tb_c[sdx+warp_idx/16]);
sdx += 64;
}
}
__syncthreads();
#endif
// there are now 8beams * (2 * 32) samples in SHM to write
// out to gmem, do 1 warp per beam,
if (warp_num < BEAMS_PER_LOOP)
{
// ibeam * 64 + warp_idx
unsigned sdx = (warp_num * WARP_SIZE * 2) + warp_idx;
const unsigned obeam = ibeam + warp_num;
// obeam * nchan * ndat_out + osamp_block + osamp
unsigned odx = (obeam * gridDim.y * (ndat / 32)) + (blockIdx.x * 64) + warp_idx;
// write out the 64 samples for this beam
output[odx] = sdata_tb_c[sdx];
output[odx+32] = sdata_tb_c[sdx+32];
}
}
}
/*
__host__ __device__
cuFloatComplex ComplexInt8_to_cuFloatComplex(ComplexInt8 input)
{
return make_cuFloatComplex((float)(input.x),(float)(input.y));
}
__host__ __device__
cuFloatComplex char2_to_cuFloatComplex(char2 input)
{
return make_cuFloatComplex(float(input.x)/127.0,float(input.y)/127.0);
}
// each block proceses 1024 samples, producing 32 output samples
// and 32 beams
// each warp converts 32 samples -> 1 sample, and each block
// therefore processes 32 x 32 = 1024 samples
__global__ void tile_beams_kernel_32scr_TS (
const int * __restrict__ input,
float * output,
const half2 * __restrict__ phasors,
unsigned nbeam, unsigned ndat, unsigned nant)
{
extern __shared__ half2 shared_phasors[];
const int warp_idx = threadIdx.x / WARP_SIZE; // isamp
const int lane_idx = threadIdx.x & 0x1F; // ibeam
// the beam this thread is acuumulating
const unsigned ibeam = (blockIdx.z * WARP_SIZE) + lane_idx;
// beam-forming phasors offset for this block
int phasors_offset = (blockIdx.y * nant * nbeam) + ibeam;
int shared_phasors_offset = lane_idx;
// each iteration of the loop, loads phasors for these beams + ants
// phasors are read/stored in ant,beam order
for (int iant = lane_idx; iant < nant; iant+=WARP_SIZE)
{
// storage iant * nbeam + ibeam
//shared_phasors[shared_phasors_offset] = phasors[phasors_offset];
shared_phasors[shared_phasors_offset] = __floats2half2_rn(1.0f, 0.0f);
phasors_offset += nbeam;
shared_phasors_offset += WARP_SIZE;
}
__syncthreads();
// output sample offset, each warp loads a set of 32 samples that are condensed to 1 sample
const int osamp = (blockIdx.x * NWARPS_PER_BLOCK) + warp_idx;
// input sample offset /2 since loading 2 antenna at a time
int sample_offset = NSUM * osamp / 2;
// accumulated output power
float power = 0.0f;
int two_samples;
// pointer to access antenna
// TODO check wither other_ant is really necessary
char4 * samples = (char4 *) &two_samples;
// samples to be processed
for (int isamp = sample_offset; isamp < sample_offset + (NSUM/2); ++isamp)
{
// compute the offset of this block on the input
// channel offset + samp offset
// (blockIdx.y * nant * nsamp)/2 + (isamp * nant)/2 + lane_idx;
const int idx = nant * (blockIdx.y * ndat + isamp) / 2 + lane_idx;
// set the complex accumulator for the tied-array beam
cuFloatComplex tb = make_cuComplex(0.0f, 0.0f);
// loop over nant (/2 since dual load)
for (int iant=0; iant<nant; iant+=WARP_SIZE)
{
// load 2 complex samples at once
two_samples = input[idx + iant];
cuFloatComplex sample1 = make_cuFloatComplex(float(samples->x),float(samples->y));
cuFloatComplex sample2 = make_cuFloatComplex(float(samples->z),float(samples->w));
cuFloatComplex sample;
//if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && warp_idx == 0 && iant < 64)
// printf("[%d][%d] load isamp=%d offset=%d\n", threadIdx.x, iant, isamp, idx + iant);
// other antenna pair for shuffling
//int other_two_samples;
// use warp shuffle to sum these antenna into the TB result and share
for (int other_lane_idx=0; other_lane_idx < WARP_SIZE; ++other_lane_idx)
{
cuFloatComplex weight = __half22float2(shared_phasors[iant*WARP_SIZE+lane_idx]);
//cuFloatComplex weight = make_cuFloatComplex(1.0f,0.0f);
#ifdef HAVE_SHFL
// shuffle the first sample across
sample.x = __shfl(sample1.x, other_lane_idx);
sample.y = __shfl(sample1.y, other_lane_idx);
//other_two_samples = __shfl(two_samples, other_lane_idx);
#endif
// first antenna pair
if (ibeam == 0)
fmaf( sample.x, sample.x, fmaf (sample.y, sample.y, tb.x));
else
tb = cuCfmaf(sample, weight, tb);
#ifdef HAVE_SHFL
sample.x = __shfl(sample2.x, other_lane_idx);
sample.y = __shfl(sample2.y, other_lane_idx);
#endif
// first antenna pair
if (ibeam == 0)
fmaf( sample.x, sample.x, fmaf (sample.y, sample.y, tb.x));
else
tb = cuCfmaf(sample, weight, tb);
//if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && ibeam == 1 && sample_offset == 0)
// printf("NEW: samp=%d iant=%d sample=(%d,%d) weight=(%f,%f) sum=(%f,%f)\n",
// 2*isamp, iant+other_lane_idx, samples->x, samples->y, weight.x, weight.y, tb.x, tb.y);
//if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && ibeam == 1 && sample_offset == 0)
// printf("NEW: samp=%d iant=%d sample=(%d,%d) weight=(%f,%f) sum=(%f,%f)\n",
// 2*isamp+1, iant+other_lane_idx, samples->w, samples->z, weight.x, weight.y, tb.x, tb.y);
}
}
// now that all antennae are summed, detected and accumulate the powers for this sample / beam
float amplitude;
if (ibeam == 0)
amplitude = tb.x;
else
amplitude = (tb.x * tb.x) + (tb.y * tb.y);
power += amplitude;
// if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && ibeam == 1 && threadIdx.x == 1)
// printf("NEW amplitude=%f power=%f\n", amplitude, power);
}
// the warp_idx corresponds to the output sample
// the lane_idx correponds to the output beam
// order SFT for Fan Beams
// (ibeam * nchan * n_osamp) + (ichan * n_osamp) + osamp
//unsigned odx = (ndat/32) * (ibeam * gridDim.y + blockIdx.y) + osamp;
unsigned odx = threadIdx.x;
//if (blockIdx.y == 0 && blockIdx.x == 0 && blockIdx.z == 0 && ibeam == 1 && threadIdx.x == 1)
//printf("OUTPUT %u %u %u %f ibeam=%u nchan=%u\n", odx, blockIdx.x, threadIdx.x, power, ibeam, gridDim.y);
output[odx] = power;
}
*/
void mopsr_tile_beams_precomp (cudaStream_t stream, void * d_in, void * d_fbs, void * d_phasors,
uint64_t bytes, unsigned nbeam, unsigned nant, unsigned nchan)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
const unsigned nthread = 1024;
const unsigned nbeam_block = BEAMS_PER_LOOP;
unsigned ndat_per_block = 2048;
dim3 blocks = dim3 (ndat / ndat_per_block, nchan, 1);
//unsigned nblocks = ndat / ndat_per_block;
if (ndat % ndat_per_block)
fprintf (stderr, "WARNING: ndat not divisible by %d\n", ndat_per_block);
size_t sdata_bytes = (nbeam_block * (nthread/WARP_SIZE) * sizeof(float) * 2) +
(nbeam_block * nant * sizeof (float _Complex));
#ifdef _GDEBUG
fprintf (stderr, "bytes=%lu ndat=%lu blocks=(%u,%u,%u) threads=%u shm=%ld\n", bytes, ndat, blocks.x, blocks.y, blocks.z, nthread, sdata_bytes);
#endif
#ifdef EIGHT_BIT_PHASORS
tile_beams_kernel_2048<<<blocks, nthread, sdata_bytes, stream>>>((int32_t *) d_in, (float *) d_fbs, (int8_t *) d_phasors, nbeam, ndat, nant);
#else
#ifdef HIRES
tile_beams_kernel_2048_32scr<<<blocks, nthread, sdata_bytes, stream>>>((int32_t *) d_in, (float *) d_fbs, (float *) d_phasors, nbeam, ndat, nant);
#else
tile_beams_kernel_2048<<<blocks, nthread, sdata_bytes, stream>>>((int32_t *) d_in, (float *) d_fbs, (float *) d_phasors, nbeam, ndat, nant);
#endif
#endif
#ifdef _GDEBUG
check_error_stream("tile_beams_kernel_2048", stream);
#endif
}
/*
// assumes data has been transposed to FTS order
void mopsr_tile_beams_transpose (cudaStream_t stream, void * d_in, void * d_fbs, void * d_phasors,
uint64_t bytes, unsigned nbeam, unsigned nant, unsigned nchan)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
const unsigned nthread = 1024;
const unsigned nwarps_per_block = nthread / WARP_SIZE;
const unsigned nbeam_per_block = 32;
const uint64_t ndat_per_block = nwarps_per_block * 32;
dim3 blocks = dim3 (ndat / ndat_per_block, nchan, nbeam / nbeam_per_block);
if (ndat % ndat_per_block)
fprintf (stderr, "WARNING: ndat not divisible by %d\n", ndat_per_block);
if (nbeam % 32)
fprintf (stderr, "WARNING: nbeam not divisible by 32\n");
size_t sdata_bytes = 32 * nant * sizeof(half2);
#ifdef _GDEBUG
fprintf (stderr, "mopsr_tile_beams_transpose: bytes=%lu ndat=%lu blocks=(%u,%u,%u) threads=%u shm=%ld\n", bytes, ndat, blocks.x, blocks.y, blocks.z, nthread, sdata_bytes);
#endif
#ifdef HIRES
tile_beams_kernel_32scr_TS<<<blocks, nthread, sdata_bytes, stream>>>((int *) d_in, (float *) d_fbs, (half2 *) d_phasors, nbeam, ndat, nant);
#endif
#ifdef _GDEBUG
check_error_stream("tile_beams_kernel_32scr_TS", stream);
#endif
}
*/
__global__ void tie_beam_kernel (int16_t * in, cuFloatComplex * out, cuFloatComplex * d_phasors, uint64_t ndat, unsigned nant)
{
extern __shared__ cuFloatComplex s_phasors[];
const unsigned ichan = blockIdx.y;
int16_t val16;
int8_t * ptr8 = (int8_t *) &val16;
cuFloatComplex tied_beam = make_cuComplex(0.0,0.0);
// load nant phasors for the ichan
if (threadIdx.x < nant)
{
s_phasors[threadIdx.x] = d_phasors[ichan*nant + threadIdx.x];
}
__syncthreads();
// idat
const unsigned idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= ndat)
return;
//const uint64_t out_off = ndat * ichan;
// output in TF order (isamp * nchan) + ichan
const uint64_t odx = idx * gridDim.y + ichan;
const uint64_t in_off = ndat * ichan * nant;
// increment to the right channel
in += in_off;
//out += out_off;
// step through the antennas, forming tied array beam
for (unsigned iant=0; iant<nant; iant++)
{
val16 = in[idx];
// make a complex float from this input
cuFloatComplex val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
tied_beam = cuCfmaf (s_phasors[iant], val, tied_beam);
in += ndat;
}
// output ordering is good!
//out[idx] = tied_beam;
out[odx] = tied_beam;
}
/*
* create steer a tied array beam to the target position
*/
void mopsr_tie_beam (cudaStream_t stream, void * d_in, void * d_out, void * d_phasors,
uint64_t bytes, unsigned nant, unsigned nchan)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
// input order is FST, output is FT
unsigned nthreads = 1024;
dim3 blocks = dim3 (ndat / nthreads, nchan, 1);
size_t shm_bytes = nant * sizeof(cuFloatComplex);
if (ndat % nthreads)
blocks.x++;
#ifdef _GDEBUG
fprintf (stderr, "bytes=%lu ndat=%lu nthreads=%u blocks=(%u,%u,%u) shm_bytes=%ld\n",
bytes, ndat, nthreads, blocks.x, blocks.y, blocks.z, shm_bytes);
#endif
tie_beam_kernel<<<blocks, nthreads, shm_bytes, stream>>>((int16_t *) d_in, (cuFloatComplex *) d_out, (cuFloatComplex *) d_phasors, ndat, nant);
#ifdef _GDEBUG
check_error_stream("tie_beam_kernel", stream);
#endif
}
// integrate 64 samples together from each antenna
__global__ void mod_beam_kernel_64 (int16_t * in, float * out, uint64_t ndat, unsigned nant)
{
extern __shared__ float block_power_sums[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
// offset [ant_offset + block_offset]
const unsigned offset = (blockIdx.y * ndat) + (blockIdx.x * blockDim.x);
const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= ndat)
return;
int16_t val16;
int8_t * ptr8 = (int8_t *) &val16;
// load the value from this times sample into a local variable
val16 = in[offset + threadIdx.x];
// make a complex float from this input
cuFloatComplex val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
// detect
float power = val.x * val.x + val.y * val.y;
// add all of the time samples from this warp together
#ifdef HAVE_SHFL
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
#endif
if (warp_idx == 0)
block_power_sums[warp_num] = power;
__syncthreads();
if (warp_num == 0)
{
power = block_power_sums[warp_idx];
#ifdef HAVE_SHFL
power += __shfl_down (power, 16);
#endif
if (warp_idx < 16)
{
out[offset/64 + warp_idx] = power;
}
}
}
// integrate 512 time samples together from each antenna
__global__ void mod_beam_kernel_512 (int16_t * in, float * out, uint64_t ndat)
{
extern __shared__ float block_power_sums[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
// offset [ant_offset + block_offset]
const unsigned offset = (blockIdx.z * ndat) + (blockIdx.x * blockDim.x);
const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= ndat)
return;
int16_t val16;
int8_t * ptr8 = (int8_t *) &val16;
// load the value from this times sample into a local variable
val16 = in[offset + threadIdx.x];
// make a complex float from this input
cuFloatComplex val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
// detect
float power = val.x * val.x + val.y * val.y;
// add all of the time samples from this warp together
#ifdef HAVE_SHFL
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
#endif
if (warp_idx == 0)
block_power_sums[warp_num] = power;
__syncthreads();
if (warp_num == 0)
{
power = block_power_sums[warp_idx];
#ifdef HAVE_SHFL
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
#endif
if (warp_idx == 0 || warp_idx == 16)
{
out[offset/512 + warp_idx/16] = power;
}
}
}
// integrate 32 time samples together from each antenna, FST -> SFT
__global__ void mod_beam_kernel_32 (int16_t * in, float * out, uint64_t ndat)
{
extern __shared__ float block_power_sums[];
const unsigned warp_num = threadIdx.x / WARP_SIZE;
const unsigned warp_idx = threadIdx.x % WARP_SIZE;
const unsigned ichan = blockIdx.y;
const unsigned nchan = gridDim.y;
const unsigned iant = blockIdx.z;
const unsigned nant = gridDim.z;
// input sample number
const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= ndat)
return;
const uint64_t ndat_out = ndat / 32;
// input offset in FST [chan_offset + ant_offset + block_offset]
const unsigned in_offset = (ichan * ndat * nant) + (iant * ndat) + (blockIdx.x * blockDim.x);
// output offset in SFT [ant_offset + chan_offset + block_offset]
const unsigned out_offset = (iant * nchan * ndat_out) + (ichan * ndat_out) + (blockIdx.x * blockDim.x/32);
int16_t val16;
int8_t * ptr8 = (int8_t *) &val16;
// load the value from this times sample into a local variable
val16 = in[in_offset + threadIdx.x];
// make a complex float from this input
cuFloatComplex val = make_cuComplex ((float) ptr8[0], (float) ptr8[1]);
// detect
float power = val.x * val.x + val.y * val.y;
// add all of the time samples from this warp together
#ifdef HAVE_SHFL
power += __shfl_down (power, 16);
power += __shfl_down (power, 8);
power += __shfl_down (power, 4);
power += __shfl_down (power, 2);
power += __shfl_down (power, 1);
#endif
if (warp_idx == 0)
block_power_sums[warp_num] = power;
__syncthreads();
if (warp_num == 0)
out[out_offset + warp_idx] = block_power_sums[warp_idx];
}
void mopsr_mod_beams (cudaStream_t stream, void * d_in, void * d_out, uint64_t bytes,
unsigned nant, unsigned nchan, unsigned tdec)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
unsigned threads = 1024;
dim3 blocks (ndat / threads, nchan, nant);
size_t shm_bytes = sizeof(float) * WARP_SIZE;
if (ndat % threads)
blocks.x++;
#ifdef _GDEBUG
fprintf (stderr, "ndat=%lu threads=%u blocks=(%u,%u)\n", ndat, threads, blocks.x, blocks.y);
#endif
if (tdec == 512)
mod_beam_kernel_512<<<blocks, threads, shm_bytes, stream>>>((int16_t *) d_in, (float *) d_out, ndat);
else if (tdec == 32)
mod_beam_kernel_32<<<blocks, threads, shm_bytes, stream>>>((int16_t *) d_in, (float *) d_out, ndat);
else
fprintf (stderr, "mopsr_mod_beams: unrecognized TDEC\n");
}
__global__ void mopsr_transpose_BFST_FST_kernel (int16_t * in, int16_t * out, unsigned ndat)
{
const unsigned idat = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned in_chan_stride = ndat * 8;
// iblock * nchan * in_chan_stride + ichan * in_chan_stride + idat
uint64_t idx = (blockIdx.z * gridDim.y * in_chan_stride) + (blockIdx.y * in_chan_stride) + idat;
// ndat * nant (total)
const unsigned out_chan_stride = ndat * gridDim.z * 8;
// ichan * out_chan_stride + iant * out_ant_stride + idat
uint64_t odx = (blockIdx.y * out_chan_stride) + (blockIdx.z * 8 * ndat) + idat;
for (unsigned i=0; i<8; i++)
{
out[odx] = in[idx];
idx += ndat;
odx += ndat;
}
}
/* transpose from FST (in 8antenna blocks) to FST */
void mopsr_transpose_BFST_FST (cudaStream_t stream, void * d_in, void * d_out, uint64_t bytes,
unsigned nant, unsigned nchan)
{
const unsigned ndim = 2;
const uint64_t ndat = bytes / (nchan * nant * ndim);
unsigned nthreads = 1024;
dim3 blocks (ndat / nthreads, nchan, nant / 8);
mopsr_transpose_BFST_FST_kernel<<<blocks, nthreads, 0, stream>>>((int16_t *) d_in, (int16_t *) d_out, ndat);
}
/*
using namespace half_float;
void mopsr_convert_float_to_half(void * in, void * out, size_t num)
{
float * in_ptr = (float *) in;
half_float::half * out_ptr = (half_float::half *) out;
for (unsigned i=0; i<num; i++)
{
out_ptr[i] = half_cast<half_float::half,std::numeric_limits<float>::round_style>(in_ptr[i]);
}
}
*/
|
6f43d307e95425d5d99dcc5fd6de89fd609f65e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/device_functions.h>
#include "ma.h"
// Parameters for CUDA kernel executions
#define BLOCKX 16
#define BLOCKY 16
#define BLOCKSIZE 64
#define TILE_DIM 32
#define BLOCK_ROWS 8
/****** Global Variables *******/
short2 **maTextures; // Two textures used to compute 2D Voronoi Diagram
int *dindex,*dreindex;
int *hindex,*hreindex;
int *pnt,*npnt,*pppl,*icpl;
int maMemSize; // Size (in bytes) of a texture
int maTexSize; // Texture size (squared texture)
texture<short2> maTexColor;
texture<short2> maTexLinks;
texture<short2> maTexP1;
texture<short2> maTexIcp;
texture<short2> maTexPpp;
texture<int> maTexIndex;
texture<int> maTexReindex;
texture<int> maTexPnt;
texture<int> maTexNpnt;
texture<int> maTexPppl;
texture<int> maTexIcpl;
/********* Kernels ********/
#include "maKernel.h"
///////////////////////////////////////////////////////////////////////////
//
// Initialize necessary memory for 2D Voronoi Diagram computation
// - textureSize: The size of the Discrete Voronoi Diagram (width = height)
//
///////////////////////////////////////////////////////////////////////////
void makePattern(int step)
{
int mstep=step,mstep2;
int num=0;
for (int i=0;i<maTexSize;i+=step)
{
hindex[i]=num;
hreindex[num]=i;
num++;
}
while(mstep>=2)
{
mstep2 = mstep/2;
for (int i=mstep2;i<maTexSize;i+=mstep)
{
hindex[i]=num;
hreindex[num]=i;
num++;
}
mstep = mstep2;
}
hipMemcpy(dindex,hindex,maTexSize*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dreindex,hreindex,maTexSize*sizeof(int),hipMemcpyHostToDevice);
}
void maInitialization(int textureSize)
{
maTexSize = textureSize;
maMemSize = maTexSize *( maTexSize) * sizeof(short2) ;
maTextures = (short2 **) malloc(6 * sizeof(short2 *));
// Allocate 2 textures
hipMalloc((void **) &maTextures[0], maMemSize);
hipMalloc((void **) &maTextures[1], maMemSize);
hipMalloc((void **) &maTextures[2], maMemSize);
hipMalloc((void **) &maTextures[3], maMemSize);
hipMalloc((void **) &maTextures[4], maMemSize);
hipMalloc((void **) &maTextures[5], maMemSize);
hipMalloc((void **) &dindex, textureSize*sizeof(int));
hipMalloc((void **) &dreindex, textureSize*sizeof(int));
hipMalloc((void **) &pnt, textureSize*sizeof(int));
hipMalloc((void **) &npnt, textureSize*sizeof(int));
hipMalloc((void **) &pppl, textureSize*sizeof(int));
hipMalloc((void **) &icpl, textureSize*sizeof(int));
hindex = (int *) malloc(maTexSize*sizeof(int));
hreindex = (int *) malloc(maTexSize*sizeof(int));
hipMemset(icpl,-1,textureSize*sizeof(int));
hipMemset(maTextures[2],127,maMemSize);
}
///////////////////////////////////////////////////////////////////////////
//
// Deallocate all allocated memory
//
///////////////////////////////////////////////////////////////////////////
void maDeInitialization()
{
hipFree(maTextures[0]);
hipFree(maTextures[1]);
hipFree(maTextures[2]);
hipFree(maTextures[3]);
hipFree(maTextures[4]);
hipFree(maTextures[5]);
free(maTextures);
hipFree(dindex);
hipFree(dreindex);
hipFree(pnt);
hipFree(npnt);
hipFree(pppl);
hipFree(icpl);
}
// Copy input to GPU
void maInitializeInput(short *input)
{
hipMemcpy(maTextures[0], input, maMemSize, hipMemcpyHostToDevice);
}
// In-place transpose a squared texture.
// Block orders are modified to optimize memory access.
// Point coordinates are also swapped.
/*void maTranspose(short2 *texture)
{
dim3 block(TILE_DIM, BLOCK_ROWS);
dim3 grid(maTexSize / TILE_DIM, maTexSize / TILE_DIM);
hipBindTexture(0, maTexColor, texture);
maKernelTranspose<<< grid, block >>>(texture, maTexSize);
hipUnbindTexture(maTexColor);
}*/
void maTranspose(short2 *texture)
{
short *tmp1 = (short *)malloc(maMemSize);
short *tmp2 = (short *)malloc(maMemSize);
hipMemcpy(tmp1,texture,maMemSize,hipMemcpyDeviceToHost);
for (int i=0;i<maTexSize;i++)
{
for (int j=0;j<maTexSize;j++)
{
int idx1 = i + j*maTexSize;
int idx2 = j + i*maTexSize;
tmp2[idx2*2] = tmp1[idx1*2+1];
tmp2[idx2*2+1] = tmp1[idx1*2];
}
}
hipMemcpy(texture,tmp2,maMemSize,hipMemcpyHostToDevice);
}
// Phase 1 of PBA. m1 must divides texture size
void maPhase1()
{
dim3 block = dim3(BLOCKSIZE);
dim3 grid = dim3((maTexSize / block.x));
// Flood vertically in their own bands
hipBindTexture(0, maTexColor, maTextures[0]);
hipLaunchKernelGGL(( maKernelFloodDown), dim3(grid), dim3(block) , 0, 0, maTextures[1], maTexSize);
hipUnbindTexture(maTexColor);
hipBindTexture(0, maTexColor, maTextures[1]);
hipLaunchKernelGGL(( maKernelFloodUp), dim3(grid), dim3(block) , 0, 0, maTextures[1], maTexSize);
hipUnbindTexture(maTexColor);
}
// Phase 2 of PBA. m2 must divides texture size
void maPhase2(int STEP)
{
int step = STEP;
makePattern(step);
hipBindTexture(0,maTexIndex,dindex);
hipBindTexture(0,maTexReindex,dreindex);
hipBindTexture(0,maTexColor,maTextures[1]);
dim3 block,grid;
block = dim3(BLOCKSIZE);
grid = dim3(((maTexSize/STEP)/block.x) + 1 );
hipLaunchKernelGGL(( maKernelColorInit1), dim3(grid),dim3(block), 0, 0, maTextures[2],icpl,maTexSize,0,step);
hipUnbindTexture(maTexColor);
hipBindTexture(0,maTexColor,maTextures[2]);
hipBindTexture(0,maTexIcpl,icpl);
hipLaunchKernelGGL(( maKernelColorInit2), dim3(grid),dim3(block), 0, 0, maTextures[0],maTexSize,0,step);
hipUnbindTexture(maTexIcpl);
hipUnbindTexture(maTexColor);
while (step>=2)
{
int step2 = step/2;
int taskNum = (maTexSize - step2)/step;
taskNum++;
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE) );
hipBindTexture(0,maTexColor,maTextures[1]);
hipBindTexture(0,maTexLinks,maTextures[0]);
hipLaunchKernelGGL(( maKernelColor1), dim3(grid),dim3(block), 0, 0, maTextures[0],maTextures[3],pnt,maTextures[4],npnt,maTexSize,step2,step);
hipUnbindTexture(maTexLinks);
hipUnbindTexture(maTexColor);
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE) );
hipBindTexture(0,maTexNpnt,npnt);
hipBindTexture(0,maTexPnt,pnt);
hipBindTexture(0,maTexIcpl,icpl);
hipBindTexture(0,maTexColor,maTextures[3]);
hipBindTexture(0,maTexLinks,maTextures[4]);
hipBindTexture(0,maTexP1,maTextures[1]);
hipBindTexture(0,maTexIcp,maTextures[2]);
hipLaunchKernelGGL(( maKernelColor15), dim3(grid),dim3(block), 0, 0, maTextures[5],pppl,maTexSize,step2,step);
hipUnbindTexture(maTexNpnt);
hipUnbindTexture(maTexPnt);
hipUnbindTexture(maTexColor);
hipUnbindTexture(maTexLinks);
hipUnbindTexture(maTexIcp);
hipUnbindTexture(maTexIcpl);
hipUnbindTexture(maTexP1);
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE) );
hipBindTexture(0,maTexPppl,pppl);
hipBindTexture(0,maTexPpp,maTextures[5]);
hipLaunchKernelGGL(( maKernelColor2), dim3(grid),dim3(block), 0, 0, maTextures[2],icpl,maTexSize,step2,step);
hipUnbindTexture(maTexPppl);
hipUnbindTexture(maTexPpp);
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE) );
hipBindTexture(0,maTexIcp,maTextures[2]);
hipBindTexture(0,maTexIcpl,icpl);
hipBindTexture(0,maTexColor,maTextures[3]);
hipBindTexture(0,maTexPnt,pnt);
hipLaunchKernelGGL(( maKernelColor3), dim3(grid),dim3(block), 0, 0, maTextures[0],maTexSize,step2,step);
hipUnbindTexture(maTexColor);
hipUnbindTexture(maTexIcp);
hipUnbindTexture(maTexIcpl);
hipUnbindTexture(maTexPnt);
step = step2;
}
hipUnbindTexture(maTexIndex);
hipUnbindTexture(maTexReindex);
/**
int step = STEP;
hipBindTexture(0,maTexColor,maTextures[1]);
hipBindTexture(0,maTexIndex,index);
dim3 block,grid;
block = dim3(BLOCKSIZE);
grid = dim3(((maTexSize/STEP)/block.x)+1);
maKernelColorInit<<<grid,block>>>(maTextures[0],maTextures[2],maTexSize,0,step);
while(step>=2)
{
int step2 = step/2;
int taskNum = (maTexSize-step2)/step;
taskNum+=1;
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE)+1);
hipBindTexture(0,maTexLinks,maTextures[0]);
maKernelColorLine<<<grid,block>>>(maTextures[0],maTextures[2],maTexSize,step2,step);
hipUnbindTexture(maTexLinks);
step=step2;
}
hipUnbindTexture(maTexColor);
hipUnbindTexture(maTexIndex);
/**/
/**
hipBindTexture(0,maTexColor,maTextures[1]);
dim3 block,grid;
block = dim3(BLOCKSIZE);
grid = dim3(maTexSize/block.x);
maKernelTest1<<<grid,block>>>(maTextures[2],index,maTexSize);
hipBindTexture(0,maTexLinks,maTextures[2]);
hipBindTexture(0,maTexIndex,index);
maKernelTest2<<<grid,block>>>(maTextures[0],maTexSize);
hipUnbindTexture(maTexColor);
hipUnbindTexture(maTexLinks);
hipUnbindTexture(maTexIndex);/**/
}
void maCompute(int STEP,short* output)
{
//hipMemcpy(xx, maTextures[0], maMemSize, hipMemcpyDeviceToHost);
maPhase1();
//hipMemcpy(xx, maTextures[1], maMemSize, hipMemcpyDeviceToHost);
maTranspose(maTextures[1]);
// hipMemcpy(xx, maTextures[1], maMemSize, hipMemcpyDeviceToHost);
maPhase2(STEP);
maTranspose(maTextures[0]);
}
void rerange(short *outData)
{
short *tmp = (short *)malloc(maMemSize);
for (int i=0;i<maTexSize;i++)
{
int k = hreindex[i];
for (int j=0;j<maTexSize;j++)
{
int idtmp = j + k*maTexSize;
int id = j+ i*maTexSize;
tmp[idtmp*2] = outData[id*2];
tmp[idtmp*2+1] = outData[id*2+1];
}
}
memcpy(outData,tmp,maMemSize);
}
// Compute 2D Voronoi diagram
// Input: a 2D texture. Each pixel is represented as two "short" integer.
// For each site at (x, y), the pixel at coordinate (x, y) should contain
// the pair (x, y). Pixels that are not sites should contain the pair (MARKER, MARKER)
// See original paper for the effect of the three parameters:
// phase1Band, phase2Band, phase3Band
// Parameters must divide textureSize
void ma(short *input, short *output, int STEP)
{
// Initialization
maInitializeInput(input);
makePattern(STEP);
// Computation
maCompute(STEP,output);
// Copy back the result
hipMemcpy(output, maTextures[0], maMemSize, hipMemcpyDeviceToHost);
rerange(output);
} | 6f43d307e95425d5d99dcc5fd6de89fd609f65e3.cu | #include <device_functions.h>
#include "ma.h"
// Parameters for CUDA kernel executions
#define BLOCKX 16
#define BLOCKY 16
#define BLOCKSIZE 64
#define TILE_DIM 32
#define BLOCK_ROWS 8
/****** Global Variables *******/
short2 **maTextures; // Two textures used to compute 2D Voronoi Diagram
int *dindex,*dreindex;
int *hindex,*hreindex;
int *pnt,*npnt,*pppl,*icpl;
int maMemSize; // Size (in bytes) of a texture
int maTexSize; // Texture size (squared texture)
texture<short2> maTexColor;
texture<short2> maTexLinks;
texture<short2> maTexP1;
texture<short2> maTexIcp;
texture<short2> maTexPpp;
texture<int> maTexIndex;
texture<int> maTexReindex;
texture<int> maTexPnt;
texture<int> maTexNpnt;
texture<int> maTexPppl;
texture<int> maTexIcpl;
/********* Kernels ********/
#include "maKernel.h"
///////////////////////////////////////////////////////////////////////////
//
// Initialize necessary memory for 2D Voronoi Diagram computation
// - textureSize: The size of the Discrete Voronoi Diagram (width = height)
//
///////////////////////////////////////////////////////////////////////////
void makePattern(int step)
{
int mstep=step,mstep2;
int num=0;
for (int i=0;i<maTexSize;i+=step)
{
hindex[i]=num;
hreindex[num]=i;
num++;
}
while(mstep>=2)
{
mstep2 = mstep/2;
for (int i=mstep2;i<maTexSize;i+=mstep)
{
hindex[i]=num;
hreindex[num]=i;
num++;
}
mstep = mstep2;
}
cudaMemcpy(dindex,hindex,maTexSize*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dreindex,hreindex,maTexSize*sizeof(int),cudaMemcpyHostToDevice);
}
void maInitialization(int textureSize)
{
maTexSize = textureSize;
maMemSize = maTexSize *( maTexSize) * sizeof(short2) ;
maTextures = (short2 **) malloc(6 * sizeof(short2 *));
// Allocate 2 textures
cudaMalloc((void **) &maTextures[0], maMemSize);
cudaMalloc((void **) &maTextures[1], maMemSize);
cudaMalloc((void **) &maTextures[2], maMemSize);
cudaMalloc((void **) &maTextures[3], maMemSize);
cudaMalloc((void **) &maTextures[4], maMemSize);
cudaMalloc((void **) &maTextures[5], maMemSize);
cudaMalloc((void **) &dindex, textureSize*sizeof(int));
cudaMalloc((void **) &dreindex, textureSize*sizeof(int));
cudaMalloc((void **) &pnt, textureSize*sizeof(int));
cudaMalloc((void **) &npnt, textureSize*sizeof(int));
cudaMalloc((void **) &pppl, textureSize*sizeof(int));
cudaMalloc((void **) &icpl, textureSize*sizeof(int));
hindex = (int *) malloc(maTexSize*sizeof(int));
hreindex = (int *) malloc(maTexSize*sizeof(int));
cudaMemset(icpl,-1,textureSize*sizeof(int));
cudaMemset(maTextures[2],127,maMemSize);
}
///////////////////////////////////////////////////////////////////////////
//
// Deallocate all allocated memory
//
///////////////////////////////////////////////////////////////////////////
void maDeInitialization()
{
cudaFree(maTextures[0]);
cudaFree(maTextures[1]);
cudaFree(maTextures[2]);
cudaFree(maTextures[3]);
cudaFree(maTextures[4]);
cudaFree(maTextures[5]);
free(maTextures);
cudaFree(dindex);
cudaFree(dreindex);
cudaFree(pnt);
cudaFree(npnt);
cudaFree(pppl);
cudaFree(icpl);
}
// Copy input to GPU
void maInitializeInput(short *input)
{
cudaMemcpy(maTextures[0], input, maMemSize, cudaMemcpyHostToDevice);
}
// In-place transpose a squared texture.
// Block orders are modified to optimize memory access.
// Point coordinates are also swapped.
/*void maTranspose(short2 *texture)
{
dim3 block(TILE_DIM, BLOCK_ROWS);
dim3 grid(maTexSize / TILE_DIM, maTexSize / TILE_DIM);
cudaBindTexture(0, maTexColor, texture);
maKernelTranspose<<< grid, block >>>(texture, maTexSize);
cudaUnbindTexture(maTexColor);
}*/
void maTranspose(short2 *texture)
{
short *tmp1 = (short *)malloc(maMemSize);
short *tmp2 = (short *)malloc(maMemSize);
cudaMemcpy(tmp1,texture,maMemSize,cudaMemcpyDeviceToHost);
for (int i=0;i<maTexSize;i++)
{
for (int j=0;j<maTexSize;j++)
{
int idx1 = i + j*maTexSize;
int idx2 = j + i*maTexSize;
tmp2[idx2*2] = tmp1[idx1*2+1];
tmp2[idx2*2+1] = tmp1[idx1*2];
}
}
cudaMemcpy(texture,tmp2,maMemSize,cudaMemcpyHostToDevice);
}
// Phase 1 of PBA. m1 must divides texture size
void maPhase1()
{
dim3 block = dim3(BLOCKSIZE);
dim3 grid = dim3((maTexSize / block.x));
// Flood vertically in their own bands
cudaBindTexture(0, maTexColor, maTextures[0]);
maKernelFloodDown<<< grid, block >>>(maTextures[1], maTexSize);
cudaUnbindTexture(maTexColor);
cudaBindTexture(0, maTexColor, maTextures[1]);
maKernelFloodUp<<< grid, block >>>(maTextures[1], maTexSize);
cudaUnbindTexture(maTexColor);
}
// Phase 2 of PBA. m2 must divides texture size
void maPhase2(int STEP)
{
int step = STEP;
makePattern(step);
cudaBindTexture(0,maTexIndex,dindex);
cudaBindTexture(0,maTexReindex,dreindex);
cudaBindTexture(0,maTexColor,maTextures[1]);
dim3 block,grid;
block = dim3(BLOCKSIZE);
grid = dim3(((maTexSize/STEP)/block.x) + 1 );
maKernelColorInit1<<<grid,block>>>(maTextures[2],icpl,maTexSize,0,step);
cudaUnbindTexture(maTexColor);
cudaBindTexture(0,maTexColor,maTextures[2]);
cudaBindTexture(0,maTexIcpl,icpl);
maKernelColorInit2<<<grid,block>>>(maTextures[0],maTexSize,0,step);
cudaUnbindTexture(maTexIcpl);
cudaUnbindTexture(maTexColor);
while (step>=2)
{
int step2 = step/2;
int taskNum = (maTexSize - step2)/step;
taskNum++;
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE) );
cudaBindTexture(0,maTexColor,maTextures[1]);
cudaBindTexture(0,maTexLinks,maTextures[0]);
maKernelColor1<<<grid,block>>>(maTextures[0],maTextures[3],pnt,maTextures[4],npnt,maTexSize,step2,step);
cudaUnbindTexture(maTexLinks);
cudaUnbindTexture(maTexColor);
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE) );
cudaBindTexture(0,maTexNpnt,npnt);
cudaBindTexture(0,maTexPnt,pnt);
cudaBindTexture(0,maTexIcpl,icpl);
cudaBindTexture(0,maTexColor,maTextures[3]);
cudaBindTexture(0,maTexLinks,maTextures[4]);
cudaBindTexture(0,maTexP1,maTextures[1]);
cudaBindTexture(0,maTexIcp,maTextures[2]);
maKernelColor15<<<grid,block>>>(maTextures[5],pppl,maTexSize,step2,step);
cudaUnbindTexture(maTexNpnt);
cudaUnbindTexture(maTexPnt);
cudaUnbindTexture(maTexColor);
cudaUnbindTexture(maTexLinks);
cudaUnbindTexture(maTexIcp);
cudaUnbindTexture(maTexIcpl);
cudaUnbindTexture(maTexP1);
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE) );
cudaBindTexture(0,maTexPppl,pppl);
cudaBindTexture(0,maTexPpp,maTextures[5]);
maKernelColor2<<<grid,block>>>(maTextures[2],icpl,maTexSize,step2,step);
cudaUnbindTexture(maTexPppl);
cudaUnbindTexture(maTexPpp);
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE) );
cudaBindTexture(0,maTexIcp,maTextures[2]);
cudaBindTexture(0,maTexIcpl,icpl);
cudaBindTexture(0,maTexColor,maTextures[3]);
cudaBindTexture(0,maTexPnt,pnt);
maKernelColor3<<<grid,block>>>(maTextures[0],maTexSize,step2,step);
cudaUnbindTexture(maTexColor);
cudaUnbindTexture(maTexIcp);
cudaUnbindTexture(maTexIcpl);
cudaUnbindTexture(maTexPnt);
step = step2;
}
cudaUnbindTexture(maTexIndex);
cudaUnbindTexture(maTexReindex);
/**
int step = STEP;
cudaBindTexture(0,maTexColor,maTextures[1]);
cudaBindTexture(0,maTexIndex,index);
dim3 block,grid;
block = dim3(BLOCKSIZE);
grid = dim3(((maTexSize/STEP)/block.x)+1);
maKernelColorInit<<<grid,block>>>(maTextures[0],maTextures[2],maTexSize,0,step);
while(step>=2)
{
int step2 = step/2;
int taskNum = (maTexSize-step2)/step;
taskNum+=1;
block = dim3(BLOCKSIZE);
grid = dim3((taskNum/BLOCKSIZE)+1);
cudaBindTexture(0,maTexLinks,maTextures[0]);
maKernelColorLine<<<grid,block>>>(maTextures[0],maTextures[2],maTexSize,step2,step);
cudaUnbindTexture(maTexLinks);
step=step2;
}
cudaUnbindTexture(maTexColor);
cudaUnbindTexture(maTexIndex);
/**/
/**
cudaBindTexture(0,maTexColor,maTextures[1]);
dim3 block,grid;
block = dim3(BLOCKSIZE);
grid = dim3(maTexSize/block.x);
maKernelTest1<<<grid,block>>>(maTextures[2],index,maTexSize);
cudaBindTexture(0,maTexLinks,maTextures[2]);
cudaBindTexture(0,maTexIndex,index);
maKernelTest2<<<grid,block>>>(maTextures[0],maTexSize);
cudaUnbindTexture(maTexColor);
cudaUnbindTexture(maTexLinks);
cudaUnbindTexture(maTexIndex);/**/
}
void maCompute(int STEP,short* output)
{
//cudaMemcpy(xx, maTextures[0], maMemSize, cudaMemcpyDeviceToHost);
maPhase1();
//cudaMemcpy(xx, maTextures[1], maMemSize, cudaMemcpyDeviceToHost);
maTranspose(maTextures[1]);
// cudaMemcpy(xx, maTextures[1], maMemSize, cudaMemcpyDeviceToHost);
maPhase2(STEP);
maTranspose(maTextures[0]);
}
void rerange(short *outData)
{
short *tmp = (short *)malloc(maMemSize);
for (int i=0;i<maTexSize;i++)
{
int k = hreindex[i];
for (int j=0;j<maTexSize;j++)
{
int idtmp = j + k*maTexSize;
int id = j+ i*maTexSize;
tmp[idtmp*2] = outData[id*2];
tmp[idtmp*2+1] = outData[id*2+1];
}
}
memcpy(outData,tmp,maMemSize);
}
// Compute 2D Voronoi diagram
// Input: a 2D texture. Each pixel is represented as two "short" integer.
// For each site at (x, y), the pixel at coordinate (x, y) should contain
// the pair (x, y). Pixels that are not sites should contain the pair (MARKER, MARKER)
// See original paper for the effect of the three parameters:
// phase1Band, phase2Band, phase3Band
// Parameters must divide textureSize
void ma(short *input, short *output, int STEP)
{
// Initialization
maInitializeInput(input);
makePattern(STEP);
// Computation
maCompute(STEP,output);
// Copy back the result
cudaMemcpy(output, maTextures[0], maMemSize, cudaMemcpyDeviceToHost);
rerange(output);
} |
180b4e95b18181cf408dcaf7613f17ff160f1be3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom = bottom_data + ((n * channels + c) * height * width);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if ((float)(bottom[h * width + w]) > maxval) {
maxidx = h * width + w;
maxval = (float)(bottom[maxidx]);
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = (*top)[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
| 180b4e95b18181cf408dcaf7613f17ff160f1be3.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom = bottom_data + ((n * channels + c) * height * width);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if ((float)(bottom[h * width + w]) > maxval) {
maxidx = h * width + w;
maxval = (float)(bottom[maxidx]);
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = (*top)[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
|
fa216ec00c24182fe1821ab780c6f7627755021a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// 2D Ising model simulation via Metropolis-Hastings algorithm
// parallel setup ~ single checkboard: preventing race conditions
// include header(s)
#include <random>
#include <cmath>
#include <numeric>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <stdio.h>
// time measurement
#include <chrono>
// cuRAND
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// constants
// spatial size of simulation table (use > 1 and even)
const int spatialSize = 1024;
// integration time
const int intTime = (int)1e4;
// coupling
const float coupling = (float)0.45;
// file name to save data
const std::string fileName = "C:\\Users\\david\\Desktop\\MSc\\Ising model\\RENORM_HW1\\magnetisation.txt";
// number of threads per block
const int nThread = 64;
// block size
const int sizeInBlocks = 16;
// number of blocks
const int nBlock = sizeInBlocks * sizeInBlocks;
// size of a single block
const int blockSize = spatialSize / sizeInBlocks;
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// calculate the energy difference due to a single flip
__device__ int DeltaE(int *table, int row, int col, int dim)
{
// spin in question
int s = table[row * dim + col];
// periodic boundary conditions
int rowRight = (row + 1) % dim, rowLeft = (row + dim - 1) % dim, colDown = (col + 1) % dim, colUp = (col + dim - 1) % dim;
// neighbours
int right = table[rowRight * spatialSize + col], left = table[rowLeft * spatialSize + col], down = table[row * spatialSize + colDown], up = table[row * spatialSize + colUp];
// return energy difference (divided by J)
return 2 * s * (up + down + left + right);
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// calculate rate
__device__ float Rate(int *table, int row, int col, int dim, float coupling)
{
// energy difference due to flip (divided by J)
int deltaE = DeltaE(table, row, col, dim);
// calculate rate
if (deltaE < 0)
return 1.;
else if (deltaE == 0)
return 0.5;
else
return expf(-coupling * deltaE);
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// square function for integers
__host__ __device__ int Square(int x) { return x * x; }
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// spin flip ~ site visit for given (row, col)
__device__ void SpinFlip(int *table, float coupling, hiprandState_t &state, int row, int col)
{
// random number for flipping
float randVal = hiprand_uniform(&state);
// rate
float rate = Rate(table, row, col, spatialSize, coupling);
// flip or not to flip...
if (rate > randVal)
table[row * spatialSize + col] *= -1;
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// kernel for Metropolis sweep ~ even sites
__global__ void KernelMetropolisEven(int *table, hiprandState_t *states, float coupling, int sweep)
{
// thread index inside the block
int id = threadIdx.x;
// block index
int bid = blockIdx.x;
// thread index
int tid = bid * blockDim.x + id;
// initialize cuRAND
hiprand_init(2 * sweep, tid, 0, &states[tid]);
// locate block and thread
int minRow = (int)(bid / sizeInBlocks) * blockSize;
int minCol = bid * blockSize - sizeInBlocks * minRow;
// move to thread
minRow += id * blockSize / nThread;
for (int irow = minRow; irow < minRow + blockSize / nThread; irow++)
{
// columns for even sites only
for (int icol = (((irow % 2) == 0) ? minCol : minCol + 1); icol < minCol + blockSize; icol += 2)
{
SpinFlip(table, coupling, states[tid], irow, icol);
}
}
}
// kernel for Metropolis sweep ~ odd sites
__global__ void KernelMetropolisOdd(int *table, hiprandState_t *states, float coupling, int sweep)
{
// thread index inside the block
int id = threadIdx.x;
// block index
int bid = blockIdx.x;
// thread index
int tid = bid * blockDim.x + id;
// initialize cuRAND
hiprand_init(2 * sweep + 1, tid, 0, &states[tid]);
// locate block and thread
int minRow = (int)(bid / sizeInBlocks) * blockSize;
int minCol = bid * blockSize - sizeInBlocks * minRow;
// move to thread
minRow += id * blockSize / nThread;
for (int irow = minRow; irow < minRow + blockSize / nThread; irow++)
{
// columns for odd sites only
for (int icol = (((irow % 2) == 0) ? minCol + 1 : minCol); icol < minCol + blockSize; icol += 2)
{
SpinFlip(table, coupling, states[tid], irow, icol);
}
}
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// main function
int main(int, char **)
{
// random number generation
std::random_device rd{};
std::mt19937 gen(rd());
// [0, 1] ~ real
std::uniform_real_distribution<double> distrReal(0., 1.);
// vector of time measurements
std::vector<float> timeMeasurement;
// initialize spins (cold start)
// host
std::vector<int> table(Square(spatialSize), 1);
// device
int *tableDev = nullptr;
// cuRAND states
hiprandState_t *statesDev = nullptr;
// container for magnetisation values
std::vector<double> m(intTime, 0.);
// simulation
// Metropolis sweeps
for (int iSweep = 0; iSweep < intTime; iSweep++)
{
// device
tableDev = nullptr;
// cuRAND states
statesDev = nullptr;
// CUDA error handling
hipError_t err = hipSuccess;
// memory allocation for the device
err = hipMalloc((void **)&tableDev, Square(spatialSize) * sizeof(int));
if (err != hipSuccess)
{
std::cout << "Error allocating CUDA memory (TABLE): " << hipGetErrorString(err) << std::endl;
return -1;
}
err = hipMalloc((void **)&statesDev, nBlock * nThread * sizeof(hiprandState_t));
if (err != hipSuccess)
{
std::cout << "Error allocating CUDA memory (cuRAND): " << hipGetErrorString(err) << std::endl;
return -1;
}
// copy data onto device
err = hipMemcpy(tableDev, table.data(), Square(spatialSize) * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
std::cout << "Error copying memory to device (TABLE): " << hipGetErrorString(err) << std::endl;
return -1;
}
// even kernel
hipLaunchKernelGGL(( KernelMetropolisEven), dim3(nBlock), dim3(nThread), 0, 0, tableDev, statesDev, coupling, iSweep);
// odd kernel
hipLaunchKernelGGL(( KernelMetropolisOdd), dim3(nBlock), dim3(nThread), 0, 0, tableDev, statesDev, coupling, iSweep);
// get errors from run
err = hipGetLastError();
if (err != hipSuccess)
{
std::cout << "CUDA error in kernel call: " << hipGetErrorString(err) << std::endl;
return -1;
}
// copy data from device
err = hipMemcpy(table.data(), tableDev, Square(spatialSize) * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
std::cout << "Error copying memory to host: " << hipGetErrorString(err) << std::endl;
return -1;
}
// free memory
err = hipFree(tableDev);
if (err != hipSuccess)
{
std::cout << "Error freeing allocation (TABLE): " << hipGetErrorString(err) << std::endl;
return -1;
}
err = hipFree(statesDev);
if (err != hipSuccess)
{
std::cout << "Error freeing allocation (cuRAND): " << hipGetErrorString(err) << std::endl;
return -1;
}
// compute magnetisation
m[iSweep] = std::accumulate(table.begin(), table.end(), 0.) / Square(spatialSize);
}
// write magnetisation results to file
// file
std::ofstream file;
file.open(fileName);
for (int im = 0; im < intTime; im++)
{
file << m[im] << std::endl;
}
file.close();
} | fa216ec00c24182fe1821ab780c6f7627755021a.cu | // 2D Ising model simulation via Metropolis-Hastings algorithm
// parallel setup ~ single checkboard: preventing race conditions
// include header(s)
#include <random>
#include <cmath>
#include <numeric>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <stdio.h>
// time measurement
#include <chrono>
// cuRAND
#include <curand_kernel.h>
#include <curand.h>
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// constants
// spatial size of simulation table (use > 1 and even)
const int spatialSize = 1024;
// integration time
const int intTime = (int)1e4;
// coupling
const float coupling = (float)0.45;
// file name to save data
const std::string fileName = "C:\\Users\\david\\Desktop\\MSc\\Ising model\\RENORM_HW1\\magnetisation.txt";
// number of threads per block
const int nThread = 64;
// block size
const int sizeInBlocks = 16;
// number of blocks
const int nBlock = sizeInBlocks * sizeInBlocks;
// size of a single block
const int blockSize = spatialSize / sizeInBlocks;
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// calculate the energy difference due to a single flip
__device__ int DeltaE(int *table, int row, int col, int dim)
{
// spin in question
int s = table[row * dim + col];
// periodic boundary conditions
int rowRight = (row + 1) % dim, rowLeft = (row + dim - 1) % dim, colDown = (col + 1) % dim, colUp = (col + dim - 1) % dim;
// neighbours
int right = table[rowRight * spatialSize + col], left = table[rowLeft * spatialSize + col], down = table[row * spatialSize + colDown], up = table[row * spatialSize + colUp];
// return energy difference (divided by J)
return 2 * s * (up + down + left + right);
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// calculate rate
__device__ float Rate(int *table, int row, int col, int dim, float coupling)
{
// energy difference due to flip (divided by J)
int deltaE = DeltaE(table, row, col, dim);
// calculate rate
if (deltaE < 0)
return 1.;
else if (deltaE == 0)
return 0.5;
else
return expf(-coupling * deltaE);
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// square function for integers
__host__ __device__ int Square(int x) { return x * x; }
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// spin flip ~ site visit for given (row, col)
__device__ void SpinFlip(int *table, float coupling, curandState &state, int row, int col)
{
// random number for flipping
float randVal = curand_uniform(&state);
// rate
float rate = Rate(table, row, col, spatialSize, coupling);
// flip or not to flip...
if (rate > randVal)
table[row * spatialSize + col] *= -1;
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// kernel for Metropolis sweep ~ even sites
__global__ void KernelMetropolisEven(int *table, curandState *states, float coupling, int sweep)
{
// thread index inside the block
int id = threadIdx.x;
// block index
int bid = blockIdx.x;
// thread index
int tid = bid * blockDim.x + id;
// initialize cuRAND
curand_init(2 * sweep, tid, 0, &states[tid]);
// locate block and thread
int minRow = (int)(bid / sizeInBlocks) * blockSize;
int minCol = bid * blockSize - sizeInBlocks * minRow;
// move to thread
minRow += id * blockSize / nThread;
for (int irow = minRow; irow < minRow + blockSize / nThread; irow++)
{
// columns for even sites only
for (int icol = (((irow % 2) == 0) ? minCol : minCol + 1); icol < minCol + blockSize; icol += 2)
{
SpinFlip(table, coupling, states[tid], irow, icol);
}
}
}
// kernel for Metropolis sweep ~ odd sites
__global__ void KernelMetropolisOdd(int *table, curandState *states, float coupling, int sweep)
{
// thread index inside the block
int id = threadIdx.x;
// block index
int bid = blockIdx.x;
// thread index
int tid = bid * blockDim.x + id;
// initialize cuRAND
curand_init(2 * sweep + 1, tid, 0, &states[tid]);
// locate block and thread
int minRow = (int)(bid / sizeInBlocks) * blockSize;
int minCol = bid * blockSize - sizeInBlocks * minRow;
// move to thread
minRow += id * blockSize / nThread;
for (int irow = minRow; irow < minRow + blockSize / nThread; irow++)
{
// columns for odd sites only
for (int icol = (((irow % 2) == 0) ? minCol + 1 : minCol); icol < minCol + blockSize; icol += 2)
{
SpinFlip(table, coupling, states[tid], irow, icol);
}
}
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// main function
int main(int, char **)
{
// random number generation
std::random_device rd{};
std::mt19937 gen(rd());
// [0, 1] ~ real
std::uniform_real_distribution<double> distrReal(0., 1.);
// vector of time measurements
std::vector<float> timeMeasurement;
// initialize spins (cold start)
// host
std::vector<int> table(Square(spatialSize), 1);
// device
int *tableDev = nullptr;
// cuRAND states
curandState *statesDev = nullptr;
// container for magnetisation values
std::vector<double> m(intTime, 0.);
// simulation
// Metropolis sweeps
for (int iSweep = 0; iSweep < intTime; iSweep++)
{
// device
tableDev = nullptr;
// cuRAND states
statesDev = nullptr;
// CUDA error handling
cudaError_t err = cudaSuccess;
// memory allocation for the device
err = cudaMalloc((void **)&tableDev, Square(spatialSize) * sizeof(int));
if (err != cudaSuccess)
{
std::cout << "Error allocating CUDA memory (TABLE): " << cudaGetErrorString(err) << std::endl;
return -1;
}
err = cudaMalloc((void **)&statesDev, nBlock * nThread * sizeof(curandState));
if (err != cudaSuccess)
{
std::cout << "Error allocating CUDA memory (cuRAND): " << cudaGetErrorString(err) << std::endl;
return -1;
}
// copy data onto device
err = cudaMemcpy(tableDev, table.data(), Square(spatialSize) * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
std::cout << "Error copying memory to device (TABLE): " << cudaGetErrorString(err) << std::endl;
return -1;
}
// even kernel
KernelMetropolisEven<<<nBlock, nThread>>>(tableDev, statesDev, coupling, iSweep);
// odd kernel
KernelMetropolisOdd<<<nBlock, nThread>>>(tableDev, statesDev, coupling, iSweep);
// get errors from run
err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cout << "CUDA error in kernel call: " << cudaGetErrorString(err) << std::endl;
return -1;
}
// copy data from device
err = cudaMemcpy(table.data(), tableDev, Square(spatialSize) * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
std::cout << "Error copying memory to host: " << cudaGetErrorString(err) << std::endl;
return -1;
}
// free memory
err = cudaFree(tableDev);
if (err != cudaSuccess)
{
std::cout << "Error freeing allocation (TABLE): " << cudaGetErrorString(err) << std::endl;
return -1;
}
err = cudaFree(statesDev);
if (err != cudaSuccess)
{
std::cout << "Error freeing allocation (cuRAND): " << cudaGetErrorString(err) << std::endl;
return -1;
}
// compute magnetisation
m[iSweep] = std::accumulate(table.begin(), table.end(), 0.) / Square(spatialSize);
}
// write magnetisation results to file
// file
std::ofstream file;
file.open(fileName);
for (int im = 0; im < intTime; im++)
{
file << m[im] << std::endl;
}
file.close();
} |
53c8048fb637f418f3a78d4c75ab10a0396c46bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
__shared__ float Mds[BUFFER_SIZE][TILED_WIDTH][TILED_WIDTH];
__shared__ float Nds[BUFFER_SIZE][TILED_WIDTH][TILED_WIDTH];
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int row = by * TILED_WIDTH + ty;
int col = bx * TILED_WIDTH + tx;
float p_sum=0.0f;
int m=0;
Mds[m%BUFFER_SIZE][ty][tx] = M.elements[row*M.width + (m*TILED_WIDTH + tx)];
Nds[m%BUFFER_SIZE][ty][tx] = N.elements[(m*TILED_WIDTH + ty) * N.width + col];
__syncthreads();
for(m=1; m<(M.width+TILED_WIDTH-1)/TILED_WIDTH; m++)
{
if((m*TILED_WIDTH+tx)<M.width&&row<M.height)
Mds[m%BUFFER_SIZE][ty][tx] = M.elements[row*M.width + (m*TILED_WIDTH + tx)];
else
Mds[m%BUFFER_SIZE][ty][tx] = 0.0;
if((m*TILED_WIDTH+ty)<N.height&&col<N.width)
Nds[m%BUFFER_SIZE][ty][tx] = N.elements[(m*TILED_WIDTH + ty) * N.width + col];
else
Nds[m%BUFFER_SIZE][ty][tx] = 0.0;
for(int n=0; n<TILED_WIDTH; n++)
p_sum += Mds[(m-1)%BUFFER_SIZE][ty][n] * Nds[(m-1)%BUFFER_SIZE][n][tx];
__syncthreads();
}
for(int k=0; k<TILED_WIDTH; k++)
p_sum += Mds[(m-1)%BUFFER_SIZE][ty][k]*Nds[(m-1)%BUFFER_SIZE][k][tx];
__syncthreads();
if(row<P.height&&col<P.width)
P.elements[ row*P.width + col ] = p_sum;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 53c8048fb637f418f3a78d4c75ab10a0396c46bc.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
__shared__ float Mds[BUFFER_SIZE][TILED_WIDTH][TILED_WIDTH];
__shared__ float Nds[BUFFER_SIZE][TILED_WIDTH][TILED_WIDTH];
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int row = by * TILED_WIDTH + ty;
int col = bx * TILED_WIDTH + tx;
float p_sum=0.0f;
int m=0;
Mds[m%BUFFER_SIZE][ty][tx] = M.elements[row*M.width + (m*TILED_WIDTH + tx)];
Nds[m%BUFFER_SIZE][ty][tx] = N.elements[(m*TILED_WIDTH + ty) * N.width + col];
__syncthreads();
for(m=1; m<(M.width+TILED_WIDTH-1)/TILED_WIDTH; m++)
{
if((m*TILED_WIDTH+tx)<M.width&&row<M.height)
Mds[m%BUFFER_SIZE][ty][tx] = M.elements[row*M.width + (m*TILED_WIDTH + tx)];
else
Mds[m%BUFFER_SIZE][ty][tx] = 0.0;
if((m*TILED_WIDTH+ty)<N.height&&col<N.width)
Nds[m%BUFFER_SIZE][ty][tx] = N.elements[(m*TILED_WIDTH + ty) * N.width + col];
else
Nds[m%BUFFER_SIZE][ty][tx] = 0.0;
for(int n=0; n<TILED_WIDTH; n++)
p_sum += Mds[(m-1)%BUFFER_SIZE][ty][n] * Nds[(m-1)%BUFFER_SIZE][n][tx];
__syncthreads();
}
for(int k=0; k<TILED_WIDTH; k++)
p_sum += Mds[(m-1)%BUFFER_SIZE][ty][k]*Nds[(m-1)%BUFFER_SIZE][k][tx];
__syncthreads();
if(row<P.height&&col<P.width)
P.elements[ row*P.width + col ] = p_sum;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
d2ef8500a48a20d30fc6ed5c0467cc4eb0d56c9c.hip | // !!! This is a file automatically generated by hipify!!!
#define LIMIT -999
#define BLOCK_SIZE 16
#define MAX_SEQ_LEN 2100
#define MAX_SEQ_NUM 1024
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "needle.h"
#include "needle_cpu.h"
// includes, kernels
#include "needle_cpu.c"
#include "needle_kernel_dynamic.cu"
#include "needle_kernel_diagonal.cu"
inline void cudaCheckError(int line, hipError_t ce)
{
if (ce != hipSuccess){
printf("Error: line %d %s\n", line, hipGetErrorString(ce));
exit(1);
}
}
int validation(int *score_matrix_cpu, int *score_matrix, unsigned int length)
{
unsigned int i = 0;
while (i!=length){
if ( score_matrix_cpu[i]==score_matrix[i] ){
++i;
continue;
}
else {
printf("i = %d\n",i);
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
double gettime(){
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <pair number> <penalty> \n", argv[0]);
fprintf(stderr, "\t<pair number> - times of comparison\n");
fprintf(stderr, "\t<penalty> - penalty(negative integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
double time, end_time;
int pair_num;
short penalty;
char sequence_set1[MAX_SEQ_LEN*MAX_SEQ_NUM] = {0}, sequence_set2[MAX_SEQ_LEN*MAX_SEQ_NUM] = {0};
unsigned int pos1[MAX_SEQ_NUM] = {0}, pos2[MAX_SEQ_NUM] = {0}, pos_matrix[MAX_SEQ_NUM] = {0};
int *score_matrix;
int *score_matrix_cpu;
char *d_sequence_set1, *d_sequence_set2;
unsigned int *d_pos1, *d_pos2, *d_pos_matrix;
int *d_score_matrix;
int seq1_len, seq2_len;
if (argc == 3)
{
pair_num = atoi(argv[1]);
penalty = atoi(argv[2]);
if (pair_num>MAX_SEQ_NUM){
fprintf(stderr, "\t<pair number> - times of comparison should be less than %d\n",MAX_SEQ_NUM);
exit(1);
}
}
else{
usage(argc, argv);
}
// first API
time = gettime();
cudaCheckError( __LINE__, hipSetDevice(0) );
end_time = gettime();
fprintf(stdout,"First API,%lf\n",end_time-time);
time = end_time;
// Get input data
srand ( 7 );
pos_matrix[0] = pos1[0] = pos2[0] = 0;
for (int i=0; i<pair_num; ++i){
//please define your own sequence 1
seq1_len = 2048; //64+rand() % 20;
//printf("Seq1 length: %d\n", seq1_len);
for (int j=0; j<seq1_len; ++j)
sequence_set1[ pos1[i] + j ] = rand() % 20 + 1;
pos1[i+1] = pos1[i] + seq1_len;
//please define your own sequence 2.
seq2_len = 2048;//64+rand() % 20;
//printf("Seq2 length: %d\n\n", seq2_len);
for (int j=0; j<seq2_len; ++j)
sequence_set2[ pos2[i] +j ] = rand() % 20 + 1;
pos2[i+1] = pos2[i] + seq2_len;
//printf("Matrix size increase: %d\n", (seq1_len+1) * (seq2_len+1));
pos_matrix[i+1] = pos_matrix[i] + (seq1_len+1) * (seq2_len+1);
}
score_matrix = (int *)malloc( pos_matrix[pair_num]*sizeof(int));
// score_matrix_cpu = (int *)malloc( pos_matrix[pair_num]*sizeof(int));
// needleman_cpu(sequence_set1, sequence_set2, pos1, pos2, score_matrix_cpu, pos_matrix, pair_num, penalty);
// printf("Start Needleman-Wunsch\n");
cudaCheckError( __LINE__, hipMalloc( (void**)&d_sequence_set1, sizeof(char)*pos1[pair_num] ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_sequence_set2, sizeof(char)*pos2[pair_num] ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_score_matrix, sizeof(int)*pos_matrix[pair_num]) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos1, sizeof(unsigned int)*(pair_num+1) ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos2, sizeof(unsigned int)*(pair_num+1) ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos_matrix, sizeof(unsigned int)*(pair_num+1) ) );
// CPU phases
end_time = gettime();
fprintf(stdout,"CPU,%lf\n",end_time-time);
time = end_time;
// Memcpy to device
cudaCheckError( __LINE__, hipMemcpy( d_sequence_set1, sequence_set1, sizeof(char)*pos1[pair_num], hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_sequence_set2, sequence_set2, sizeof(char)*pos2[pair_num], hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_pos1, pos1, sizeof(unsigned int)*(pair_num+1), hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_pos2, pos2, sizeof(unsigned int)*(pair_num+1), hipMemcpyHostToDevice ) );
cudaCheckError( __LINE__, hipMemcpy( d_pos_matrix, pos_matrix, sizeof(unsigned int)*(pair_num+1), hipMemcpyHostToDevice ) );
//end_time = gettime();
//fprintf(stdout,"Memcpy to device,%lf\n",end_time-time);
//time = end_time;
// the threads in block should equal to the STRIDE_SIZE
/* needleman_cuda_dynamic<<<14, 128>>>(d_sequence_set1, d_sequence_set2,
d_pos1, d_pos2,
d_score_matrix, d_pos_matrix,
pair_num, penalty);
*/
hipLaunchKernelGGL(( needleman_cuda_diagonal), dim3(pair_num),dim3(512), 0, 0, d_sequence_set1, d_sequence_set2,
d_pos1, d_pos2,
d_score_matrix, d_pos_matrix,
pair_num, penalty);
cudaCheckError( __LINE__, hipDeviceSynchronize() );
//end_time = gettime();
//fprintf(stdout,"kernel,%lf\n",end_time-time);
//time = end_time;
// Memcpy to host
cudaCheckError( __LINE__, hipMemcpy( score_matrix, d_score_matrix, sizeof(int)*pos_matrix[pair_num], hipMemcpyDeviceToHost ) );
end_time = gettime();
//fprintf(stdout,"Memcpy to host,%lf\n",end_time-time);
fprintf(stdout,"Total CUDA implementation time, %lf\n",end_time-time);
time = end_time;
/* if ( validation(score_matrix_cpu, score_matrix, pos_matrix[pair_num]) )
printf("Validation: PASS\n");
else
printf("Validation: FAIL\n");
*/
#ifdef TRACEBACK
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
//fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
//traceback = maximum(nw, w, n);
int new_nw, new_w, new_n;
new_nw = nw + referrence[i * max_cols + j];
new_w = w - penalty;
new_n = n - penalty;
traceback = maximum(new_nw, new_w, new_n);
if(traceback == new_nw)
traceback = nw;
if(traceback == new_w)
traceback = w;
if(traceback == new_n)
traceback = n;
//fprintf(fpo, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;};
}
#endif
// fclose(fpo);
hipFree(d_sequence_set1);
hipFree(d_sequence_set2);
hipFree(d_pos1);
hipFree(d_pos2);
hipFree(d_pos_matrix);
hipFree(d_score_matrix);
free(score_matrix);
}
| d2ef8500a48a20d30fc6ed5c0467cc4eb0d56c9c.cu | #define LIMIT -999
#define BLOCK_SIZE 16
#define MAX_SEQ_LEN 2100
#define MAX_SEQ_NUM 1024
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
#include "needle.h"
#include "needle_cpu.h"
// includes, kernels
#include "needle_cpu.c"
#include "needle_kernel_dynamic.cu"
#include "needle_kernel_diagonal.cu"
inline void cudaCheckError(int line, cudaError_t ce)
{
if (ce != cudaSuccess){
printf("Error: line %d %s\n", line, cudaGetErrorString(ce));
exit(1);
}
}
int validation(int *score_matrix_cpu, int *score_matrix, unsigned int length)
{
unsigned int i = 0;
while (i!=length){
if ( score_matrix_cpu[i]==score_matrix[i] ){
++i;
continue;
}
else {
printf("i = %d\n",i);
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
double gettime(){
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <pair number> <penalty> \n", argv[0]);
fprintf(stderr, "\t<pair number> - times of comparison\n");
fprintf(stderr, "\t<penalty> - penalty(negative integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
double time, end_time;
int pair_num;
short penalty;
char sequence_set1[MAX_SEQ_LEN*MAX_SEQ_NUM] = {0}, sequence_set2[MAX_SEQ_LEN*MAX_SEQ_NUM] = {0};
unsigned int pos1[MAX_SEQ_NUM] = {0}, pos2[MAX_SEQ_NUM] = {0}, pos_matrix[MAX_SEQ_NUM] = {0};
int *score_matrix;
int *score_matrix_cpu;
char *d_sequence_set1, *d_sequence_set2;
unsigned int *d_pos1, *d_pos2, *d_pos_matrix;
int *d_score_matrix;
int seq1_len, seq2_len;
if (argc == 3)
{
pair_num = atoi(argv[1]);
penalty = atoi(argv[2]);
if (pair_num>MAX_SEQ_NUM){
fprintf(stderr, "\t<pair number> - times of comparison should be less than %d\n",MAX_SEQ_NUM);
exit(1);
}
}
else{
usage(argc, argv);
}
// first API
time = gettime();
cudaCheckError( __LINE__, cudaSetDevice(0) );
end_time = gettime();
fprintf(stdout,"First API,%lf\n",end_time-time);
time = end_time;
// Get input data
srand ( 7 );
pos_matrix[0] = pos1[0] = pos2[0] = 0;
for (int i=0; i<pair_num; ++i){
//please define your own sequence 1
seq1_len = 2048; //64+rand() % 20;
//printf("Seq1 length: %d\n", seq1_len);
for (int j=0; j<seq1_len; ++j)
sequence_set1[ pos1[i] + j ] = rand() % 20 + 1;
pos1[i+1] = pos1[i] + seq1_len;
//please define your own sequence 2.
seq2_len = 2048;//64+rand() % 20;
//printf("Seq2 length: %d\n\n", seq2_len);
for (int j=0; j<seq2_len; ++j)
sequence_set2[ pos2[i] +j ] = rand() % 20 + 1;
pos2[i+1] = pos2[i] + seq2_len;
//printf("Matrix size increase: %d\n", (seq1_len+1) * (seq2_len+1));
pos_matrix[i+1] = pos_matrix[i] + (seq1_len+1) * (seq2_len+1);
}
score_matrix = (int *)malloc( pos_matrix[pair_num]*sizeof(int));
// score_matrix_cpu = (int *)malloc( pos_matrix[pair_num]*sizeof(int));
// needleman_cpu(sequence_set1, sequence_set2, pos1, pos2, score_matrix_cpu, pos_matrix, pair_num, penalty);
// printf("Start Needleman-Wunsch\n");
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_sequence_set1, sizeof(char)*pos1[pair_num] ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_sequence_set2, sizeof(char)*pos2[pair_num] ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_score_matrix, sizeof(int)*pos_matrix[pair_num]) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos1, sizeof(unsigned int)*(pair_num+1) ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos2, sizeof(unsigned int)*(pair_num+1) ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos_matrix, sizeof(unsigned int)*(pair_num+1) ) );
// CPU phases
end_time = gettime();
fprintf(stdout,"CPU,%lf\n",end_time-time);
time = end_time;
// Memcpy to device
cudaCheckError( __LINE__, cudaMemcpy( d_sequence_set1, sequence_set1, sizeof(char)*pos1[pair_num], cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_sequence_set2, sequence_set2, sizeof(char)*pos2[pair_num], cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_pos1, pos1, sizeof(unsigned int)*(pair_num+1), cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_pos2, pos2, sizeof(unsigned int)*(pair_num+1), cudaMemcpyHostToDevice ) );
cudaCheckError( __LINE__, cudaMemcpy( d_pos_matrix, pos_matrix, sizeof(unsigned int)*(pair_num+1), cudaMemcpyHostToDevice ) );
//end_time = gettime();
//fprintf(stdout,"Memcpy to device,%lf\n",end_time-time);
//time = end_time;
// the threads in block should equal to the STRIDE_SIZE
/* needleman_cuda_dynamic<<<14, 128>>>(d_sequence_set1, d_sequence_set2,
d_pos1, d_pos2,
d_score_matrix, d_pos_matrix,
pair_num, penalty);
*/
needleman_cuda_diagonal<<<pair_num,512>>>(d_sequence_set1, d_sequence_set2,
d_pos1, d_pos2,
d_score_matrix, d_pos_matrix,
pair_num, penalty);
cudaCheckError( __LINE__, cudaDeviceSynchronize() );
//end_time = gettime();
//fprintf(stdout,"kernel,%lf\n",end_time-time);
//time = end_time;
// Memcpy to host
cudaCheckError( __LINE__, cudaMemcpy( score_matrix, d_score_matrix, sizeof(int)*pos_matrix[pair_num], cudaMemcpyDeviceToHost ) );
end_time = gettime();
//fprintf(stdout,"Memcpy to host,%lf\n",end_time-time);
fprintf(stdout,"Total CUDA implementation time, %lf\n",end_time-time);
time = end_time;
/* if ( validation(score_matrix_cpu, score_matrix, pos_matrix[pair_num]) )
printf("Validation: PASS\n");
else
printf("Validation: FAIL\n");
*/
#ifdef TRACEBACK
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
//fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
//traceback = maximum(nw, w, n);
int new_nw, new_w, new_n;
new_nw = nw + referrence[i * max_cols + j];
new_w = w - penalty;
new_n = n - penalty;
traceback = maximum(new_nw, new_w, new_n);
if(traceback == new_nw)
traceback = nw;
if(traceback == new_w)
traceback = w;
if(traceback == new_n)
traceback = n;
//fprintf(fpo, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;};
}
#endif
// fclose(fpo);
cudaFree(d_sequence_set1);
cudaFree(d_sequence_set2);
cudaFree(d_pos1);
cudaFree(d_pos2);
cudaFree(d_pos_matrix);
cudaFree(d_score_matrix);
free(score_matrix);
}
|
069186f1db275879c8ea16e0ecb0630b62f7154b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include "naive_gpu_multiply.cuh"
void DoNaiveGpuMatrixMult()
{
const size_t kMatWidth = MAT_WIDTH;
const size_t kMatHeight = MAT_HEIGHT;
hipError_t cuErr = NaiveGpuMult( MatrixUtil::GetMatrix1(), kMatWidth, kMatHeight,
MatrixUtil::GetMatrix2(), kMatWidth, kMatHeight,
MatrixUtil::GetResultMat() );
if( cuErr != hipSuccess )
{
fprintf( stderr, "NaiveGpuMult Failed!\n" );
return;
}
}
hipError_t NaiveGpuMult(float* matA, const size_t A_width, const size_t A_height,
float* matB, const size_t B_width, const size_t B_height,
float* resMat)
{
hipError_t cuErr;
// Device pointers to matrix data
float* dev_A;
float* dev_B;
float* dev_res;
// Choose which GPU to run on, change this on a multi-GPU system.
cuErr = hipSetDevice(0);
if (cuErr != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return cuErr;
}
/////////////////////////////////////////////
// Allocate memory for matrices on the device
cuErr = hipMalloc((void**)&dev_A, A_width * A_height * sizeof(float));
if (cuErr != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cuErr = hipMalloc((void**)&dev_B, B_width * B_height * sizeof(float));
if (cuErr != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cuErr = hipMalloc((void**)&dev_res, A_width * B_height * sizeof(float));
if (cuErr != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
////////////////////////////////////////////
// Copy matrices A and B to device
cuErr = hipMemcpy(dev_A, matA, A_width * A_height * sizeof(float), hipMemcpyHostToDevice);
if (cuErr != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cuErr = hipMemcpy(dev_B, matB, B_width * B_height * sizeof(float), hipMemcpyHostToDevice);
if (cuErr != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
dim3 BlockDimensions( NAIVE_BLOCK_X, NAIVE_BLOCK_Y, NAIVE_BLOCK_Z );
dim3 NumBlocks( ceil(MAT_WIDTH / (float)NAIVE_BLOCK_X), ceil(MAT_HEIGHT / (float)NAIVE_BLOCK_Y), 1);
/////////////////////////////////////////
// Launch the Naive kernel
hipLaunchKernelGGL(( NaiveMatMultKern), dim3(NumBlocks), dim3(BlockDimensions), 0, 0, dev_A,
dev_B,
dev_res,
MAT_WIDTH);
// Check for any errors launching the kernel
cuErr = hipGetLastError();
if (cuErr != hipSuccess) {
fprintf(stderr, "Naive kernel launch failed: %s\n", hipGetErrorString(cuErr));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cuErr = hipDeviceSynchronize();
if (cuErr != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching Naive Kernel!\n", cuErr);
goto Error;
}
// Copy result from GPU buffer to host memory.
cuErr = hipMemcpy(resMat, dev_res, A_width * B_height * sizeof(float), hipMemcpyDeviceToHost);
if (cuErr != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cuErr = hipDeviceSynchronize();
MatrixUtil::PrintMatrix(resMat, MAT_WIDTH, MAT_HEIGHT);
// Labels are stupid
Error:
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_res);
return cuErr;
}
// The Kernel code that runs on the device
__global__ void NaiveMatMultKern(float* dev_A_in, float* dev_B_in, float* dev_res_in, size_t width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0.0f;
for (int m = 0; m < width/TILE_WIDTH; ++m)
{
// Collaborative loading into shared memory
Mds[ty][tx] = dev_A_in[row * width + m * TILE_WIDTH + tx];
Nds[ty][tx] = dev_B_in[ (m * TILE_WIDTH + ty) * width + col ];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
{
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
dev_res_in[row * width + col] = Pvalue;
} | 069186f1db275879c8ea16e0ecb0630b62f7154b.cu | #include <cmath>
#include "naive_gpu_multiply.cuh"
void DoNaiveGpuMatrixMult()
{
const size_t kMatWidth = MAT_WIDTH;
const size_t kMatHeight = MAT_HEIGHT;
cudaError_t cuErr = NaiveGpuMult( MatrixUtil::GetMatrix1(), kMatWidth, kMatHeight,
MatrixUtil::GetMatrix2(), kMatWidth, kMatHeight,
MatrixUtil::GetResultMat() );
if( cuErr != cudaSuccess )
{
fprintf( stderr, "NaiveGpuMult Failed!\n" );
return;
}
}
cudaError_t NaiveGpuMult(float* matA, const size_t A_width, const size_t A_height,
float* matB, const size_t B_width, const size_t B_height,
float* resMat)
{
cudaError_t cuErr;
// Device pointers to matrix data
float* dev_A;
float* dev_B;
float* dev_res;
// Choose which GPU to run on, change this on a multi-GPU system.
cuErr = cudaSetDevice(0);
if (cuErr != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return cuErr;
}
/////////////////////////////////////////////
// Allocate memory for matrices on the device
cuErr = cudaMalloc((void**)&dev_A, A_width * A_height * sizeof(float));
if (cuErr != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cuErr = cudaMalloc((void**)&dev_B, B_width * B_height * sizeof(float));
if (cuErr != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cuErr = cudaMalloc((void**)&dev_res, A_width * B_height * sizeof(float));
if (cuErr != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
////////////////////////////////////////////
// Copy matrices A and B to device
cuErr = cudaMemcpy(dev_A, matA, A_width * A_height * sizeof(float), cudaMemcpyHostToDevice);
if (cuErr != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cuErr = cudaMemcpy(dev_B, matB, B_width * B_height * sizeof(float), cudaMemcpyHostToDevice);
if (cuErr != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
dim3 BlockDimensions( NAIVE_BLOCK_X, NAIVE_BLOCK_Y, NAIVE_BLOCK_Z );
dim3 NumBlocks( ceil(MAT_WIDTH / (float)NAIVE_BLOCK_X), ceil(MAT_HEIGHT / (float)NAIVE_BLOCK_Y), 1);
/////////////////////////////////////////
// Launch the Naive kernel
NaiveMatMultKern<<< NumBlocks, BlockDimensions>>>(dev_A,
dev_B,
dev_res,
MAT_WIDTH);
// Check for any errors launching the kernel
cuErr = cudaGetLastError();
if (cuErr != cudaSuccess) {
fprintf(stderr, "Naive kernel launch failed: %s\n", cudaGetErrorString(cuErr));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cuErr = cudaDeviceSynchronize();
if (cuErr != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching Naive Kernel!\n", cuErr);
goto Error;
}
// Copy result from GPU buffer to host memory.
cuErr = cudaMemcpy(resMat, dev_res, A_width * B_height * sizeof(float), cudaMemcpyDeviceToHost);
if (cuErr != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cuErr = cudaDeviceSynchronize();
MatrixUtil::PrintMatrix(resMat, MAT_WIDTH, MAT_HEIGHT);
// Labels are stupid
Error:
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_res);
return cuErr;
}
// The Kernel code that runs on the device
__global__ void NaiveMatMultKern(float* dev_A_in, float* dev_B_in, float* dev_res_in, size_t width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0.0f;
for (int m = 0; m < width/TILE_WIDTH; ++m)
{
// Collaborative loading into shared memory
Mds[ty][tx] = dev_A_in[row * width + m * TILE_WIDTH + tx];
Nds[ty][tx] = dev_B_in[ (m * TILE_WIDTH + ty) * width + col ];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
{
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
dev_res_in[row * width + col] = Pvalue;
} |
1bfcbc06c0fb7695ba7ee296c5d80fc52a02a2d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "psc_cuda.h"
#define BLOCKSIZE_X 1
#define BLOCKSIZE_Y 4
#define BLOCKSIZE_Z 4
#define PFX(x) cuda_bnd_##x
#include "constants.c"
#define SW (2) // FIXME
// OPT lots of optimization opportunity in the single-proc/patch ones,
// but they may not be that important for real production
__global__ static void
fill_ghosts_periodic_yz(real *d_flds, int mb, int me)
{
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = blockIdx.y * blockDim.y + threadIdx.y;
if (!(iy < d_consts.mx[1] && iz < d_consts.mx[2]))
return;
bool inside = true;
int jy = iy, jz = iz;
if (jy < SW ) { jy += d_consts.mx[1] - 2*SW; inside = false; }
if (jy >= d_consts.mx[1] - SW) { jy -= d_consts.mx[1] - 2*SW; inside = false; }
if (jz < SW ) { jz += d_consts.mx[2] - 2*SW; inside = false; }
if (jz >= d_consts.mx[2] - SW) { jz -= d_consts.mx[2] - 2*SW; inside = false; }
if (inside)
return;
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy-SW,iz-SW) = F3_DEV(m, 0,jy-SW,jz-SW);
}
}
EXTERN_C void
cuda_fill_ghosts_periodic_yz(int p, struct psc_fields *pf, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
struct psc_patch *patch = &ppsc->patch[p];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int dimGrid[2] = { (patch->ldims[1] + 2*SW + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
RUN_KERNEL(dimGrid, dimBlock,
fill_ghosts_periodic_yz, (pfc->d_flds, mb, me));
}
__global__ static void
fill_ghosts_periodic_z(real *d_flds, int mb, int me)
{
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = blockIdx.y * blockDim.y + threadIdx.y;
if (!(iy < d_consts.mx[1] && iz < d_consts.mx[2]))
return;
bool inside = true;
int jy = iy, jz = iz;
if (jz < SW ) { jz += d_consts.mx[2] - 2*SW; inside = false; }
if (jz >= d_consts.mx[2] - SW) { jz -= d_consts.mx[2] - 2*SW; inside = false; }
if (inside)
return;
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy-SW,iz-SW) = F3_DEV(m, 0,jy-SW,jz-SW);
}
}
EXTERN_C void
cuda_fill_ghosts_periodic_z(int p, struct psc_fields *pf, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
struct psc_patch *patch = &ppsc->patch[p];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int dimGrid[2] = { (patch->ldims[1] + 2*SW + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
RUN_KERNEL(dimGrid, dimBlock,
fill_ghosts_periodic_z, (pfc->d_flds, mb, me));
}
__global__ static void
add_ghosts_periodic_yz(real *d_flds, int mb, int me)
{
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = blockIdx.y * blockDim.y + threadIdx.y;
if (!(iy < d_consts.mx[1] - 2*SW && iz < d_consts.mx[2] - 2*SW))
return;
if (iy < SW) {
int jy = iy + (d_consts.mx[1] - 2*SW);
int jz = iz;
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
if (iz < SW) {
jz = iz + (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
if (iz >= d_consts.mx[2] - 3*SW) {
jz = iz - (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
}
if (iy >= d_consts.mx[1] - 3*SW) {
int jy = iy - (d_consts.mx[1] - 2*SW);
int jz = iz;
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
if (iz < SW) {
jz = iz + (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
if (iz >= d_consts.mx[2] - 3*SW) {
jz = iz - (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
}
if (iz < SW) {
int jy = iy, jz = iz + (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
if (iz >= d_consts.mx[2] - 3*SW) {
int jy = iy, jz = iz - (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
}
EXTERN_C void
cuda_add_ghosts_periodic_yz(int p, struct psc_fields *pf, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
struct psc_patch *patch = &ppsc->patch[p];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int dimGrid[2] = { (patch->ldims[1] + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
RUN_KERNEL(dimGrid, dimBlock,
add_ghosts_periodic_yz, (pfc->d_flds, mb, me));
}
__global__ static void
add_ghosts_periodic_z(real *d_flds, int mb, int me)
{
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = blockIdx.y * blockDim.y + threadIdx.y;
if (!(iy < d_consts.mx[1] - 2*SW && iz < d_consts.mx[2] - 2*SW))
return;
if (iz < SW) {
int jy = iy, jz = iz + (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
if (iz >= d_consts.mx[2] - 3*SW) {
int jy = iy, jz = iz - (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
}
EXTERN_C void
cuda_add_ghosts_periodic_z(int p, struct psc_fields *pf, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
struct psc_patch *patch = &ppsc->patch[p];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int dimGrid[2] = { (patch->ldims[1] + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
RUN_KERNEL(dimGrid, dimBlock,
add_ghosts_periodic_z, (pfc->d_flds, mb, me));
}
template<bool lo, bool hi>
__global__ static void
conducting_wall_H_y(real *d_flds)
{
int iz = blockIdx.x * blockDim.x + threadIdx.x - SW;
if (iz >= d_consts.mx[2] - SW)
return;
int my = d_consts.mx[1] - 2*SW;
if (lo) {
F3_DEV(HY, 0,-1,iz) = F3_DEV(HY, 0, 1,iz);
F3_DEV(HX, 0,-1,iz) = -F3_DEV(HX, 0, 0,iz);
F3_DEV(HZ, 0,-1,iz) = -F3_DEV(HZ, 0, 0,iz);
}
if (hi) {
F3_DEV(HY, 0,my+1,iz) = F3_DEV(HY, 0,my-1,iz);
F3_DEV(HX, 0,my ,iz) = -F3_DEV(HX, 0,my-1,iz);
F3_DEV(HZ, 0,my ,iz) = -F3_DEV(HZ, 0,my-1,iz);
}
}
template<bool lo, bool hi>
__global__ static void
conducting_wall_E_y(real *d_flds)
{
int iz = blockIdx.x * blockDim.x + threadIdx.x - SW;
if (iz >= d_consts.mx[2] - SW)
return;
int my = d_consts.mx[1] - 2*SW;
if (lo) {
F3_DEV(EX, 0, 0,iz) = 0.;
F3_DEV(EX, 0,-1,iz) = F3_DEV(EX, 0, 1,iz);
F3_DEV(EY, 0,-1,iz) = -F3_DEV(EY, 0, 0,iz);
F3_DEV(EZ, 0, 0,iz) = 0.;
F3_DEV(EZ, 0,-1,iz) = F3_DEV(EZ, 0, 1,iz);
}
if (hi) {
F3_DEV(EX, 0,my ,iz) = 0.;
F3_DEV(EX, 0,my+1,iz) = F3_DEV(EX, 0, my-1,iz);
F3_DEV(EY, 0,my,iz) = -F3_DEV(EY, 0, my-1,iz);
F3_DEV(EZ, 0,my,iz) = 0.;
F3_DEV(EZ, 0,my+1,iz) = F3_DEV(EZ, 0, my-1,iz);
}
}
template<bool lo, bool hi>
__global__ static void
conducting_wall_J_y(real *d_flds)
{
int iz = blockIdx.x * blockDim.x + threadIdx.x - SW;
if (iz >= d_consts.mx[2] - SW)
return;
int my = d_consts.mx[1] - 2*SW;
if (lo) {
F3_DEV(JYI, 0, 0,iz) -= F3_DEV(JYI, 0,-1,iz);
F3_DEV(JYI, 0,-1,iz) = 0.;
}
if (hi) {
F3_DEV(JYI, 0,my-1,iz) -= F3_DEV(JYI, 0,my,iz);
F3_DEV(JYI, 0,my ,iz) = 0.;
}
// FIXME, JXI/JZI?
}
template<bool lo, bool hi>
static void
cuda_conducting_wall_H_y(int p, struct psc_fields *pf)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
int dimGrid = (ppsc->patch[p].ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z;
hipLaunchKernelGGL(( conducting_wall_H_y<lo, hi>) , dim3(dimGrid), dim3(BLOCKSIZE_Z), 0, 0, pfc->d_flds);
cuda_sync_if_enabled();
}
template<bool lo, bool hi>
static void
cuda_conducting_wall_E_y(int p, struct psc_fields *pf)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
int dimGrid = (ppsc->patch[p].ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z;
hipLaunchKernelGGL(( conducting_wall_E_y<lo, hi>) , dim3(dimGrid), dim3(BLOCKSIZE_Z), 0, 0, pfc->d_flds);
cuda_sync_if_enabled();
}
template<bool lo, bool hi>
static void
cuda_conducting_wall_J_y(int p, struct psc_fields *pf)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
int dimGrid = (ppsc->patch[p].ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z;
hipLaunchKernelGGL(( conducting_wall_J_y<lo, hi>) , dim3(dimGrid), dim3(BLOCKSIZE_Z), 0, 0, pfc->d_flds);
cuda_sync_if_enabled();
}
EXTERN_C void
cuda_conducting_wall_H_lo_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_H_y<true, false>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_H_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_H_y<false, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_H_lo_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_H_y<true, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_E_lo_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_E_y<true, false>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_E_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_E_y<false, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_E_lo_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_E_y<true, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_J_lo_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_J_y<true, false>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_J_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_J_y<false, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_J_lo_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_J_y<true, true>(p, pf);
}
| 1bfcbc06c0fb7695ba7ee296c5d80fc52a02a2d4.cu |
#include "psc_cuda.h"
#define BLOCKSIZE_X 1
#define BLOCKSIZE_Y 4
#define BLOCKSIZE_Z 4
#define PFX(x) cuda_bnd_##x
#include "constants.c"
#define SW (2) // FIXME
// OPT lots of optimization opportunity in the single-proc/patch ones,
// but they may not be that important for real production
__global__ static void
fill_ghosts_periodic_yz(real *d_flds, int mb, int me)
{
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = blockIdx.y * blockDim.y + threadIdx.y;
if (!(iy < d_consts.mx[1] && iz < d_consts.mx[2]))
return;
bool inside = true;
int jy = iy, jz = iz;
if (jy < SW ) { jy += d_consts.mx[1] - 2*SW; inside = false; }
if (jy >= d_consts.mx[1] - SW) { jy -= d_consts.mx[1] - 2*SW; inside = false; }
if (jz < SW ) { jz += d_consts.mx[2] - 2*SW; inside = false; }
if (jz >= d_consts.mx[2] - SW) { jz -= d_consts.mx[2] - 2*SW; inside = false; }
if (inside)
return;
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy-SW,iz-SW) = F3_DEV(m, 0,jy-SW,jz-SW);
}
}
EXTERN_C void
cuda_fill_ghosts_periodic_yz(int p, struct psc_fields *pf, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
struct psc_patch *patch = &ppsc->patch[p];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int dimGrid[2] = { (patch->ldims[1] + 2*SW + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
RUN_KERNEL(dimGrid, dimBlock,
fill_ghosts_periodic_yz, (pfc->d_flds, mb, me));
}
__global__ static void
fill_ghosts_periodic_z(real *d_flds, int mb, int me)
{
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = blockIdx.y * blockDim.y + threadIdx.y;
if (!(iy < d_consts.mx[1] && iz < d_consts.mx[2]))
return;
bool inside = true;
int jy = iy, jz = iz;
if (jz < SW ) { jz += d_consts.mx[2] - 2*SW; inside = false; }
if (jz >= d_consts.mx[2] - SW) { jz -= d_consts.mx[2] - 2*SW; inside = false; }
if (inside)
return;
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy-SW,iz-SW) = F3_DEV(m, 0,jy-SW,jz-SW);
}
}
EXTERN_C void
cuda_fill_ghosts_periodic_z(int p, struct psc_fields *pf, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
struct psc_patch *patch = &ppsc->patch[p];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int dimGrid[2] = { (patch->ldims[1] + 2*SW + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
RUN_KERNEL(dimGrid, dimBlock,
fill_ghosts_periodic_z, (pfc->d_flds, mb, me));
}
__global__ static void
add_ghosts_periodic_yz(real *d_flds, int mb, int me)
{
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = blockIdx.y * blockDim.y + threadIdx.y;
if (!(iy < d_consts.mx[1] - 2*SW && iz < d_consts.mx[2] - 2*SW))
return;
if (iy < SW) {
int jy = iy + (d_consts.mx[1] - 2*SW);
int jz = iz;
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
if (iz < SW) {
jz = iz + (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
if (iz >= d_consts.mx[2] - 3*SW) {
jz = iz - (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
}
if (iy >= d_consts.mx[1] - 3*SW) {
int jy = iy - (d_consts.mx[1] - 2*SW);
int jz = iz;
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
if (iz < SW) {
jz = iz + (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
if (iz >= d_consts.mx[2] - 3*SW) {
jz = iz - (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
}
if (iz < SW) {
int jy = iy, jz = iz + (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
if (iz >= d_consts.mx[2] - 3*SW) {
int jy = iy, jz = iz - (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
}
EXTERN_C void
cuda_add_ghosts_periodic_yz(int p, struct psc_fields *pf, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
struct psc_patch *patch = &ppsc->patch[p];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int dimGrid[2] = { (patch->ldims[1] + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
RUN_KERNEL(dimGrid, dimBlock,
add_ghosts_periodic_yz, (pfc->d_flds, mb, me));
}
__global__ static void
add_ghosts_periodic_z(real *d_flds, int mb, int me)
{
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = blockIdx.y * blockDim.y + threadIdx.y;
if (!(iy < d_consts.mx[1] - 2*SW && iz < d_consts.mx[2] - 2*SW))
return;
if (iz < SW) {
int jy = iy, jz = iz + (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
if (iz >= d_consts.mx[2] - 3*SW) {
int jy = iy, jz = iz - (d_consts.mx[2] - 2*SW);
for (int m = mb; m < me; m++) {
F3_DEV(m, 0,iy,iz) += F3_DEV(m, 0,jy,jz);
}
}
}
EXTERN_C void
cuda_add_ghosts_periodic_z(int p, struct psc_fields *pf, int mb, int me)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
struct psc_patch *patch = &ppsc->patch[p];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int dimGrid[2] = { (patch->ldims[1] + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
RUN_KERNEL(dimGrid, dimBlock,
add_ghosts_periodic_z, (pfc->d_flds, mb, me));
}
template<bool lo, bool hi>
__global__ static void
conducting_wall_H_y(real *d_flds)
{
int iz = blockIdx.x * blockDim.x + threadIdx.x - SW;
if (iz >= d_consts.mx[2] - SW)
return;
int my = d_consts.mx[1] - 2*SW;
if (lo) {
F3_DEV(HY, 0,-1,iz) = F3_DEV(HY, 0, 1,iz);
F3_DEV(HX, 0,-1,iz) = -F3_DEV(HX, 0, 0,iz);
F3_DEV(HZ, 0,-1,iz) = -F3_DEV(HZ, 0, 0,iz);
}
if (hi) {
F3_DEV(HY, 0,my+1,iz) = F3_DEV(HY, 0,my-1,iz);
F3_DEV(HX, 0,my ,iz) = -F3_DEV(HX, 0,my-1,iz);
F3_DEV(HZ, 0,my ,iz) = -F3_DEV(HZ, 0,my-1,iz);
}
}
template<bool lo, bool hi>
__global__ static void
conducting_wall_E_y(real *d_flds)
{
int iz = blockIdx.x * blockDim.x + threadIdx.x - SW;
if (iz >= d_consts.mx[2] - SW)
return;
int my = d_consts.mx[1] - 2*SW;
if (lo) {
F3_DEV(EX, 0, 0,iz) = 0.;
F3_DEV(EX, 0,-1,iz) = F3_DEV(EX, 0, 1,iz);
F3_DEV(EY, 0,-1,iz) = -F3_DEV(EY, 0, 0,iz);
F3_DEV(EZ, 0, 0,iz) = 0.;
F3_DEV(EZ, 0,-1,iz) = F3_DEV(EZ, 0, 1,iz);
}
if (hi) {
F3_DEV(EX, 0,my ,iz) = 0.;
F3_DEV(EX, 0,my+1,iz) = F3_DEV(EX, 0, my-1,iz);
F3_DEV(EY, 0,my,iz) = -F3_DEV(EY, 0, my-1,iz);
F3_DEV(EZ, 0,my,iz) = 0.;
F3_DEV(EZ, 0,my+1,iz) = F3_DEV(EZ, 0, my-1,iz);
}
}
template<bool lo, bool hi>
__global__ static void
conducting_wall_J_y(real *d_flds)
{
int iz = blockIdx.x * blockDim.x + threadIdx.x - SW;
if (iz >= d_consts.mx[2] - SW)
return;
int my = d_consts.mx[1] - 2*SW;
if (lo) {
F3_DEV(JYI, 0, 0,iz) -= F3_DEV(JYI, 0,-1,iz);
F3_DEV(JYI, 0,-1,iz) = 0.;
}
if (hi) {
F3_DEV(JYI, 0,my-1,iz) -= F3_DEV(JYI, 0,my,iz);
F3_DEV(JYI, 0,my ,iz) = 0.;
}
// FIXME, JXI/JZI?
}
template<bool lo, bool hi>
static void
cuda_conducting_wall_H_y(int p, struct psc_fields *pf)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
int dimGrid = (ppsc->patch[p].ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z;
conducting_wall_H_y<lo, hi> <<<dimGrid, BLOCKSIZE_Z>>> (pfc->d_flds);
cuda_sync_if_enabled();
}
template<bool lo, bool hi>
static void
cuda_conducting_wall_E_y(int p, struct psc_fields *pf)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
int dimGrid = (ppsc->patch[p].ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z;
conducting_wall_E_y<lo, hi> <<<dimGrid, BLOCKSIZE_Z>>> (pfc->d_flds);
cuda_sync_if_enabled();
}
template<bool lo, bool hi>
static void
cuda_conducting_wall_J_y(int p, struct psc_fields *pf)
{
struct psc_fields_cuda *pfc = psc_fields_cuda(pf);
cuda_bnd_set_constants(NULL, pf);
int dimGrid = (ppsc->patch[p].ldims[2] + 2*SW + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z;
conducting_wall_J_y<lo, hi> <<<dimGrid, BLOCKSIZE_Z>>> (pfc->d_flds);
cuda_sync_if_enabled();
}
EXTERN_C void
cuda_conducting_wall_H_lo_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_H_y<true, false>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_H_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_H_y<false, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_H_lo_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_H_y<true, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_E_lo_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_E_y<true, false>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_E_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_E_y<false, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_E_lo_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_E_y<true, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_J_lo_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_J_y<true, false>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_J_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_J_y<false, true>(p, pf);
}
EXTERN_C void
cuda_conducting_wall_J_lo_hi_y(int p, struct psc_fields *pf)
{
cuda_conducting_wall_J_y<true, true>(p, pf);
}
|
4d304bdd375fd08808f4b64f9c77ae57cf13eded.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_medianfilter.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const element *signal = NULL;
hipMalloc(&signal, XSIZE*YSIZE);
element *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_medianfilter), dim3(gridBlock),dim3(threadBlock), 0, 0, signal,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_medianfilter), dim3(gridBlock),dim3(threadBlock), 0, 0, signal,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_medianfilter), dim3(gridBlock),dim3(threadBlock), 0, 0, signal,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4d304bdd375fd08808f4b64f9c77ae57cf13eded.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_medianfilter.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const element *signal = NULL;
cudaMalloc(&signal, XSIZE*YSIZE);
element *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_medianfilter<<<gridBlock,threadBlock>>>(signal,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_medianfilter<<<gridBlock,threadBlock>>>(signal,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_medianfilter<<<gridBlock,threadBlock>>>(signal,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
23fafe0314d15f678b1ffeb5e16dcc0a2f236db8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//============================================================================
#include<cstdlib>
#include<time.h>
#include<cuda.h>
#include<iostream>
#include<math.h> //Included just to use the Power function
#define BLOCK_SIZE 32
#define TILE_SIZE 32
#define MAX_MASK_WIDTH 10
__constant__ float M[MAX_MASK_WIDTH];
using namespace std;
//====== Function made to print vector =========================================
void printVector(float *A, int length)
{
for (int i=0; i<length; i++)
{
cout<<A[i]<<" | ";
}
cout<<endl;
}
//====== Function made to fill the vector with some given value ================
void fillVector(float *A, float value, int length)
{
for (int i=0; i<length; i++)
{
A[i] = value;
}
}
//====== Compare results =======================================================
void compareVector (float *A, float *B,int n)
{
for (int i=0; i<n; i++ )
{
if (A[i]!=B[i])
{
cout<<"## Secuential and Parallel results are NOT equal ##"<<endl;
}
}
cout<<"== Secuential and Parallel results are equal =="<<endl;
}
//====== Serial Convolution ====================================================
void serialConvolution(float *input, float *output, float *mask, int mask_length, int length)
{
int start = 0;
float temp = 0.0;
for (int i = 0; i < length; i++)
{
for (int j = 0; j < mask_length; j++)
{
start = i - (mask_length / 2);
if (start + j >= 0 && start + j < length)
temp += input[start + j] * mask[j];
}
output[i] = temp;
temp = 0.0;
}
}
//====== Basic convolution kernel ==============================================
__global__ void convolutionBasicKernel(float *N, float *M, float *P,
int Mask_Width, int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
//====== Convolution kernel using constant memory and caching ==================
__global__ void convolutionKernelConstant(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
//===== Tiled Convolution kernel using shared memory ===========================
__global__ void convolutionKernelShared(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float N_ds[TILE_SIZE + MAX_MASK_WIDTH - 1];
int n = Mask_Width/2;
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - n)
{
N_ds[threadIdx.x - (blockDim.x - n)] =
(halo_index_left < 0) ? 0 : N[halo_index_left];
}
N_ds[n + threadIdx.x] = N[blockIdx.x*blockDim.x + threadIdx.x];
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
if (threadIdx.x < n)
{
N_ds[n + blockDim.x + threadIdx.x] =
(halo_index_right >= Width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0;
for(int j = 0; j < Mask_Width; j++)
{
Pvalue += N_ds[threadIdx.x + j]*M[j];
}
P[i] = Pvalue;
}
//====== A simplier tiled convolution kernel using shared memory and general cahching
__global__ void convolutionKernelSharedSimplier(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float N_ds[TILE_SIZE];
N_ds[threadIdx.x] = N[i];
__syncthreads();
int This_tile_start_point = blockIdx.x * blockDim.x;
int Next_tile_start_point = (blockIdx.x + 1) * blockDim.x;
int N_start_point = i - (Mask_Width/2);
float Pvalue = 0;
for (int j = 0; j < Mask_Width; j ++)
{
int N_index = N_start_point + j;
if (N_index >= 0 && N_index < Width)
{
if ((N_index >= This_tile_start_point)
&& (N_index < Next_tile_start_point))
{
Pvalue += N_ds[threadIdx.x+j-(Mask_Width/2)]*M[j];
} else
{
Pvalue += N[N_index] * M[j];
}
}
}
P[i] = Pvalue;
}
//===== Convolution kernel call ================================================
void convolutionCall (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_mask;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
hipMalloc(&d_input, length * sizeof(float));
hipMalloc(&d_mask, mask_length * sizeof(float));
hipMalloc(&d_output, length * sizeof(float));
hipMemcpy (d_input, input, length * sizeof (float), hipMemcpyHostToDevice);
hipMemcpy (d_mask, mask, mask_length * sizeof (float), hipMemcpyHostToDevice);
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
hipLaunchKernelGGL(( convolutionBasicKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_mask, d_output, mask_length, length);
hipDeviceSynchronize();
hipMemcpy (output, d_output, length * sizeof (float), hipMemcpyDeviceToHost);
hipFree (d_input);
hipFree (d_mask);
hipFree (d_output);
}
//==============================================================================
void convolutionCallConstant (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
hipMalloc(&d_input, length * sizeof(float));
hipMalloc(&d_output, length * sizeof(float));
hipMemcpy (d_input, input, length * sizeof (float), hipMemcpyHostToDevice);
hipMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
hipLaunchKernelGGL(( convolutionKernelConstant), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input,d_output, mask_length, length);
hipDeviceSynchronize();
hipMemcpy (output, d_output, length * sizeof (float), hipMemcpyDeviceToHost);
hipFree (d_input);
hipFree (d_output);
}
//==============================================================================
void convolutionCallWithTilesComplex (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
hipMalloc(&d_input, length * sizeof(float));
hipMalloc(&d_output, length * sizeof(float));
hipMemcpy (d_input, input, length * sizeof (float), hipMemcpyHostToDevice);
hipMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
hipLaunchKernelGGL(( convolutionKernelShared), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input,d_output, mask_length, length);
hipDeviceSynchronize();
hipMemcpy (output, d_output, length * sizeof (float), hipMemcpyDeviceToHost);
hipFree (d_input);
hipFree (d_output);
}
//====== Convolution kernel call tiled version (the simplified one) ============
void convolutionCallWithTiles (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
hipMalloc(&d_input, length * sizeof(float));
hipMalloc(&d_output, length * sizeof(float));
hipMemcpy (d_input, input, length * sizeof (float), hipMemcpyHostToDevice);
hipMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
hipLaunchKernelGGL(( convolutionKernelSharedSimplier), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input,d_output, mask_length, length);
hipDeviceSynchronize();
hipMemcpy (output, d_output, length * sizeof (float), hipMemcpyDeviceToHost);
hipFree (d_input);
hipFree (d_output);
}
//================= MAIN =======================================================
int main ()
{
for(int i=0; i<=29;i++)//to execute the program many times just to get all the test values
{
cout<<"=> EXECUTION #"<<i<<endl;
unsigned int length = pow(2,i);
int mask_length = 5;
int op = 1; //To select which parallel version we want to execute
//1 basic parallel - 2 parallel with constant memory
//3 parallel with shared memory - 4 parallel with shared memory simplified
clock_t start, finish; //Clock variables
double elapsedSecuential, elapsedParallel, elapsedParallelConstant,
elapsedParallelSharedComplex,elapsedParallelSharedTiles, optimization;
float *A = (float *) malloc(length * sizeof(float));
float *mask = (float *) malloc(mask_length * sizeof(float));
float *Cserial = (float *) malloc(length * sizeof(float));
float *Cparallel = (float *) malloc(length * sizeof(float));
float *CparallelWithTiles = (float *) malloc(length * sizeof(float));
float *CparallelConstant = (float *) malloc (length * sizeof(float));
float *CparallelWithTilesComplex = (float *) malloc(length * sizeof(float));
fillVector(A,1.0,length);
fillVector(mask,2.0,mask_length);
fillVector(Cserial,0.0,length);
fillVector(Cparallel,0.0,length);
fillVector(CparallelWithTiles,0.0,length);
fillVector(CparallelConstant,0.0,length);
fillVector(CparallelWithTilesComplex,0.0,length);
//============================================================================
cout<<"Serial result"<<endl;
start = clock();
serialConvolution(A,Cserial,mask,mask_length,length);
finish = clock();
elapsedSecuential = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The Secuential process took: " << elapsedSecuential << " seconds to execute "<< endl;
//printVector(Cserial,length);
cout<<endl;
//============================================================================
switch (op)
{
case 1:
cout<<"==============================================================="<<endl;
cout<<"Parallel result"<<endl;
start = clock();
convolutionCall(A,Cparallel,mask,mask_length,length);
finish = clock();
elapsedParallel = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallel << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallel;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(Cparallel,length);
compareVector(Cserial,Cparallel,length);
cout<<endl;
break;
case 2:
cout<<"==============================================================="<<endl;
cout<<"Parallel with constant memory"<<endl;
start = clock();
convolutionCallConstant(A,CparallelConstant,mask,mask_length,length);
finish = clock();
elapsedParallelConstant = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelConstant << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelConstant;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelConstant,length);
compareVector(Cserial,CparallelConstant,length);
cout<<endl;
break;
case 3:
cout<<"==============================================================="<<endl;
cout<<"Parallel with shared memory result"<<endl;
start = clock();
convolutionCallWithTilesComplex(A,CparallelWithTilesComplex,mask,mask_length,length);
finish = clock();
elapsedParallelSharedComplex = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelSharedComplex << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelSharedComplex;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelWithTilesComplex,length);
compareVector(Cserial,CparallelWithTilesComplex,length);
cout<<endl;
break;
case 4:
cout<<"==============================================================="<<endl;
cout<<"Parallel with shared memory result simplified"<<endl;
start = clock();
convolutionCallWithTiles(A,CparallelWithTiles,mask,mask_length,length);
finish = clock();
elapsedParallelSharedTiles = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelSharedTiles << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelSharedTiles;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelWithTiles,length);
compareVector(Cserial,CparallelWithTiles,length);
cout<<endl;
break;
}
free(A);
free(mask);
free(Cserial);
free(Cparallel);
free(CparallelWithTiles);
free(CparallelConstant);
free(CparallelWithTilesComplex);
}
}
| 23fafe0314d15f678b1ffeb5e16dcc0a2f236db8.cu | //============================================================================
#include<cstdlib>
#include<time.h>
#include<cuda.h>
#include<iostream>
#include<math.h> //Included just to use the Power function
#define BLOCK_SIZE 32
#define TILE_SIZE 32
#define MAX_MASK_WIDTH 10
__constant__ float M[MAX_MASK_WIDTH];
using namespace std;
//====== Function made to print vector =========================================
void printVector(float *A, int length)
{
for (int i=0; i<length; i++)
{
cout<<A[i]<<" | ";
}
cout<<endl;
}
//====== Function made to fill the vector with some given value ================
void fillVector(float *A, float value, int length)
{
for (int i=0; i<length; i++)
{
A[i] = value;
}
}
//====== Compare results =======================================================
void compareVector (float *A, float *B,int n)
{
for (int i=0; i<n; i++ )
{
if (A[i]!=B[i])
{
cout<<"## Secuential and Parallel results are NOT equal ##"<<endl;
}
}
cout<<"== Secuential and Parallel results are equal =="<<endl;
}
//====== Serial Convolution ====================================================
void serialConvolution(float *input, float *output, float *mask, int mask_length, int length)
{
int start = 0;
float temp = 0.0;
for (int i = 0; i < length; i++)
{
for (int j = 0; j < mask_length; j++)
{
start = i - (mask_length / 2);
if (start + j >= 0 && start + j < length)
temp += input[start + j] * mask[j];
}
output[i] = temp;
temp = 0.0;
}
}
//====== Basic convolution kernel ==============================================
__global__ void convolutionBasicKernel(float *N, float *M, float *P,
int Mask_Width, int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
//====== Convolution kernel using constant memory and caching ==================
__global__ void convolutionKernelConstant(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
//===== Tiled Convolution kernel using shared memory ===========================
__global__ void convolutionKernelShared(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float N_ds[TILE_SIZE + MAX_MASK_WIDTH - 1];
int n = Mask_Width/2;
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - n)
{
N_ds[threadIdx.x - (blockDim.x - n)] =
(halo_index_left < 0) ? 0 : N[halo_index_left];
}
N_ds[n + threadIdx.x] = N[blockIdx.x*blockDim.x + threadIdx.x];
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
if (threadIdx.x < n)
{
N_ds[n + blockDim.x + threadIdx.x] =
(halo_index_right >= Width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0;
for(int j = 0; j < Mask_Width; j++)
{
Pvalue += N_ds[threadIdx.x + j]*M[j];
}
P[i] = Pvalue;
}
//====== A simplier tiled convolution kernel using shared memory and general cahching
__global__ void convolutionKernelSharedSimplier(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float N_ds[TILE_SIZE];
N_ds[threadIdx.x] = N[i];
__syncthreads();
int This_tile_start_point = blockIdx.x * blockDim.x;
int Next_tile_start_point = (blockIdx.x + 1) * blockDim.x;
int N_start_point = i - (Mask_Width/2);
float Pvalue = 0;
for (int j = 0; j < Mask_Width; j ++)
{
int N_index = N_start_point + j;
if (N_index >= 0 && N_index < Width)
{
if ((N_index >= This_tile_start_point)
&& (N_index < Next_tile_start_point))
{
Pvalue += N_ds[threadIdx.x+j-(Mask_Width/2)]*M[j];
} else
{
Pvalue += N[N_index] * M[j];
}
}
}
P[i] = Pvalue;
}
//===== Convolution kernel call ================================================
void convolutionCall (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_mask;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
cudaMalloc(&d_input, length * sizeof(float));
cudaMalloc(&d_mask, mask_length * sizeof(float));
cudaMalloc(&d_output, length * sizeof(float));
cudaMemcpy (d_input, input, length * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy (d_mask, mask, mask_length * sizeof (float), cudaMemcpyHostToDevice);
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
convolutionBasicKernel<<<dimGrid, dimBlock>>> (d_input, d_mask, d_output, mask_length, length);
cudaDeviceSynchronize();
cudaMemcpy (output, d_output, length * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_input);
cudaFree (d_mask);
cudaFree (d_output);
}
//==============================================================================
void convolutionCallConstant (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
cudaMalloc(&d_input, length * sizeof(float));
cudaMalloc(&d_output, length * sizeof(float));
cudaMemcpy (d_input, input, length * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
convolutionKernelConstant<<<dimGrid, dimBlock>>> (d_input,d_output, mask_length, length);
cudaDeviceSynchronize();
cudaMemcpy (output, d_output, length * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_input);
cudaFree (d_output);
}
//==============================================================================
void convolutionCallWithTilesComplex (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
cudaMalloc(&d_input, length * sizeof(float));
cudaMalloc(&d_output, length * sizeof(float));
cudaMemcpy (d_input, input, length * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
convolutionKernelShared<<<dimGrid, dimBlock>>> (d_input,d_output, mask_length, length);
cudaDeviceSynchronize();
cudaMemcpy (output, d_output, length * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_input);
cudaFree (d_output);
}
//====== Convolution kernel call tiled version (the simplified one) ============
void convolutionCallWithTiles (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
cudaMalloc(&d_input, length * sizeof(float));
cudaMalloc(&d_output, length * sizeof(float));
cudaMemcpy (d_input, input, length * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
convolutionKernelSharedSimplier<<<dimGrid, dimBlock>>> (d_input,d_output, mask_length, length);
cudaDeviceSynchronize();
cudaMemcpy (output, d_output, length * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_input);
cudaFree (d_output);
}
//================= MAIN =======================================================
int main ()
{
for(int i=0; i<=29;i++)//to execute the program many times just to get all the test values
{
cout<<"=> EXECUTION #"<<i<<endl;
unsigned int length = pow(2,i);
int mask_length = 5;
int op = 1; //To select which parallel version we want to execute
//1 basic parallel - 2 parallel with constant memory
//3 parallel with shared memory - 4 parallel with shared memory simplified
clock_t start, finish; //Clock variables
double elapsedSecuential, elapsedParallel, elapsedParallelConstant,
elapsedParallelSharedComplex,elapsedParallelSharedTiles, optimization;
float *A = (float *) malloc(length * sizeof(float));
float *mask = (float *) malloc(mask_length * sizeof(float));
float *Cserial = (float *) malloc(length * sizeof(float));
float *Cparallel = (float *) malloc(length * sizeof(float));
float *CparallelWithTiles = (float *) malloc(length * sizeof(float));
float *CparallelConstant = (float *) malloc (length * sizeof(float));
float *CparallelWithTilesComplex = (float *) malloc(length * sizeof(float));
fillVector(A,1.0,length);
fillVector(mask,2.0,mask_length);
fillVector(Cserial,0.0,length);
fillVector(Cparallel,0.0,length);
fillVector(CparallelWithTiles,0.0,length);
fillVector(CparallelConstant,0.0,length);
fillVector(CparallelWithTilesComplex,0.0,length);
//============================================================================
cout<<"Serial result"<<endl;
start = clock();
serialConvolution(A,Cserial,mask,mask_length,length);
finish = clock();
elapsedSecuential = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The Secuential process took: " << elapsedSecuential << " seconds to execute "<< endl;
//printVector(Cserial,length);
cout<<endl;
//============================================================================
switch (op)
{
case 1:
cout<<"==============================================================="<<endl;
cout<<"Parallel result"<<endl;
start = clock();
convolutionCall(A,Cparallel,mask,mask_length,length);
finish = clock();
elapsedParallel = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallel << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallel;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(Cparallel,length);
compareVector(Cserial,Cparallel,length);
cout<<endl;
break;
case 2:
cout<<"==============================================================="<<endl;
cout<<"Parallel with constant memory"<<endl;
start = clock();
convolutionCallConstant(A,CparallelConstant,mask,mask_length,length);
finish = clock();
elapsedParallelConstant = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelConstant << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelConstant;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelConstant,length);
compareVector(Cserial,CparallelConstant,length);
cout<<endl;
break;
case 3:
cout<<"==============================================================="<<endl;
cout<<"Parallel with shared memory result"<<endl;
start = clock();
convolutionCallWithTilesComplex(A,CparallelWithTilesComplex,mask,mask_length,length);
finish = clock();
elapsedParallelSharedComplex = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelSharedComplex << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelSharedComplex;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelWithTilesComplex,length);
compareVector(Cserial,CparallelWithTilesComplex,length);
cout<<endl;
break;
case 4:
cout<<"==============================================================="<<endl;
cout<<"Parallel with shared memory result simplified"<<endl;
start = clock();
convolutionCallWithTiles(A,CparallelWithTiles,mask,mask_length,length);
finish = clock();
elapsedParallelSharedTiles = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelSharedTiles << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelSharedTiles;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelWithTiles,length);
compareVector(Cserial,CparallelWithTiles,length);
cout<<endl;
break;
}
free(A);
free(mask);
free(Cserial);
free(Cparallel);
free(CparallelWithTiles);
free(CparallelConstant);
free(CparallelWithTilesComplex);
}
}
|
22f53ccaac432af0bf4fbff03672f0833ac68428.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Kernels.h"
__global__ void kernel_propagate_sig(nm_float * d_neurones, NetMath::Sigmoid * d_sigmoids,
Network::Dimension::Parameter * param, int *value, int *pitch, int *sig_pitch)
{
int neu_src = threadIdx.x / value[param->cur_layer] + pitch[param->cur_layer],
sig = sig_pitch[param->cur_layer] + threadIdx.x;
d_sigmoids[sig].set(d_neurones[neu_src]);
}
__global__ void kernel_propagate_neu(nm_float * d_neurones, NetMath::Sigmoid * d_sigmoids,
Network::Dimension::Parameter * param, int *value, int *pitch, int *sig_value)
{
int neu = threadIdx.x + pitch[param->cur_layer];
d_neurones[neu] = 0.0;
for (int i = threadIdx.x; i < sig_value[param->cur_layer - 1]; i += value[param->cur_layer - 1])
d_neurones[neu] += d_sigmoids[i]();
}
CUDA_ERROR KernelCallers::check_exec(const std::string &s)
{
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
Log(ERROR, "kernel function ", s, " failed : ", hipGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
Log(ERROR, "function ", s, " failed to synchronize ", cudaStatus);
}
return cudaStatus;
}
CUDA_ERROR KernelCallers::propagate_sig(nm_float * d_neu, NetMath::Sigmoid * d_sig, uint dimension,
Network::Dimension::Parameter * param, int *value, int *pitch, int *sig_pitch)
{
hipLaunchKernelGGL(( kernel_propagate_sig), dim3(1), dim3(dimension), 1 , 0, d_neu, d_sig, param, value, pitch, sig_pitch);
return check_exec("propagate_sig");
}
CUDA_ERROR KernelCallers::propagate_neu(nm_float * d_neu, NetMath::Sigmoid * d_sig, uint dimension,
Network::Dimension::Parameter * param, int *value, int *pitch, int *sig_value)
{
hipLaunchKernelGGL(( kernel_propagate_neu), dim3(1), dim3(dimension), 1 , 0, d_neu, d_sig, param, value, pitch, sig_value);
return check_exec("propagate_neu");
}
CUDA_ERROR KernelChecker::propagate_sig(Network::Dimension *d_dim, const uint & poolsize)
{
for(int i=0; i<poolsize; i++)
std::cout << i / d_dim->value[d_dim->param.cur_layer] + d_dim->pitch[d_dim->param.cur_layer]
<< " " << d_dim->sig_pitch[d_dim->param.cur_layer] + i << std::endl;
return hipSuccess;
}
| 22f53ccaac432af0bf4fbff03672f0833ac68428.cu | #include "Kernels.h"
__global__ void kernel_propagate_sig(nm_float * d_neurones, NetMath::Sigmoid * d_sigmoids,
Network::Dimension::Parameter * param, int *value, int *pitch, int *sig_pitch)
{
int neu_src = threadIdx.x / value[param->cur_layer] + pitch[param->cur_layer],
sig = sig_pitch[param->cur_layer] + threadIdx.x;
d_sigmoids[sig].set(d_neurones[neu_src]);
}
__global__ void kernel_propagate_neu(nm_float * d_neurones, NetMath::Sigmoid * d_sigmoids,
Network::Dimension::Parameter * param, int *value, int *pitch, int *sig_value)
{
int neu = threadIdx.x + pitch[param->cur_layer];
d_neurones[neu] = 0.0;
for (int i = threadIdx.x; i < sig_value[param->cur_layer - 1]; i += value[param->cur_layer - 1])
d_neurones[neu] += d_sigmoids[i]();
}
CUDA_ERROR KernelCallers::check_exec(const std::string &s)
{
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != CUDA_SUCCESS) {
Log(ERROR, "kernel function ", s, " failed : ", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != CUDA_SUCCESS) {
Log(ERROR, "function ", s, " failed to synchronize ", cudaStatus);
}
return cudaStatus;
}
CUDA_ERROR KernelCallers::propagate_sig(nm_float * d_neu, NetMath::Sigmoid * d_sig, uint dimension,
Network::Dimension::Parameter * param, int *value, int *pitch, int *sig_pitch)
{
kernel_propagate_sig<<<1, dimension, 1 >>>(d_neu, d_sig, param, value, pitch, sig_pitch);
return check_exec("propagate_sig");
}
CUDA_ERROR KernelCallers::propagate_neu(nm_float * d_neu, NetMath::Sigmoid * d_sig, uint dimension,
Network::Dimension::Parameter * param, int *value, int *pitch, int *sig_value)
{
kernel_propagate_neu<<<1, dimension, 1 >>>(d_neu, d_sig, param, value, pitch, sig_value);
return check_exec("propagate_neu");
}
CUDA_ERROR KernelChecker::propagate_sig(Network::Dimension *d_dim, const uint & poolsize)
{
for(int i=0; i<poolsize; i++)
std::cout << i / d_dim->value[d_dim->param.cur_layer] + d_dim->pitch[d_dim->param.cur_layer]
<< " " << d_dim->sig_pitch[d_dim->param.cur_layer] + i << std::endl;
return CUDA_SUCCESS;
}
|
0933b9cc4761babd3a14be57d4a4ed0337b4690b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"common_structs.h"
#include<stdio.h>
#include"imageOperations.h"
#define BLOCK_WIDTH 10
#define BLOCK_HEIGHT 60
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define cudaCheckErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__global__ void rgb2GreyImage(tUInt_8* input, tUInt_8* output, tUInt_32 InputPitch, tUInt_32 outputPitch, tUInt_32 inputChannels,tUInt_32 outputChannels) {
tUInt_32 idxValX = blockDim.x * blockIdx.x + threadIdx.x;
tUInt_32 idxValY = blockDim.y * blockIdx.y + threadIdx.y;
tUInt_32 inputIdx = idxValY * InputPitch + idxValX * inputChannels;
tUInt_32 outputIdx = idxValY * outputPitch + idxValX * outputChannels;
output[outputIdx] = (tUInt_8) (0.2989 * input[inputIdx + 2] + 0.5870 * input[inputIdx + 1] + 0.1140 * input[inputIdx]);
}
extern void rgb2GreyImageFunctionHost(tUInt_8* d_inputBuffer, tUInt_8* outputBuffer, IMAGE_INFO inputImageInfo, IMAGE_INFO outputImageInfo,tUInt_8** d_greyImage) {
size_t sizeInput = inputImageInfo.width * inputImageInfo.height * inputImageInfo.channnels * sizeof(tUInt_8);
size_t sizeOutput = outputImageInfo.width * outputImageInfo.height * outputImageInfo.channnels * sizeof(tUInt_8);
tUInt_32 inputPitch = inputImageInfo.width * inputImageInfo.channnels;
tUInt_32 outputPitch = outputImageInfo.width * outputImageInfo.channnels;
cudaCheckErrors(hipMalloc(d_greyImage, sizeOutput));
dim3 threadsPerBlock(10, 6);
dim3 numBlocks(outputImageInfo.width / threadsPerBlock.x, outputImageInfo.height / threadsPerBlock.y);
hipLaunchKernelGGL(( rgb2GreyImage), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_inputBuffer,*d_greyImage,inputPitch,outputPitch,inputImageInfo.channnels,outputImageInfo.channnels);
cudaCheckErrors(hipMemcpy(outputBuffer,*d_greyImage,sizeOutput,hipMemcpyDeviceToHost));
}
| 0933b9cc4761babd3a14be57d4a4ed0337b4690b.cu | #include"common_structs.h"
#include<stdio.h>
#include"imageOperations.h"
#define BLOCK_WIDTH 10
#define BLOCK_HEIGHT 60
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define cudaCheckErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__global__ void rgb2GreyImage(tUInt_8* input, tUInt_8* output, tUInt_32 InputPitch, tUInt_32 outputPitch, tUInt_32 inputChannels,tUInt_32 outputChannels) {
tUInt_32 idxValX = blockDim.x * blockIdx.x + threadIdx.x;
tUInt_32 idxValY = blockDim.y * blockIdx.y + threadIdx.y;
tUInt_32 inputIdx = idxValY * InputPitch + idxValX * inputChannels;
tUInt_32 outputIdx = idxValY * outputPitch + idxValX * outputChannels;
output[outputIdx] = (tUInt_8) (0.2989 * input[inputIdx + 2] + 0.5870 * input[inputIdx + 1] + 0.1140 * input[inputIdx]);
}
extern void rgb2GreyImageFunctionHost(tUInt_8* d_inputBuffer, tUInt_8* outputBuffer, IMAGE_INFO inputImageInfo, IMAGE_INFO outputImageInfo,tUInt_8** d_greyImage) {
size_t sizeInput = inputImageInfo.width * inputImageInfo.height * inputImageInfo.channnels * sizeof(tUInt_8);
size_t sizeOutput = outputImageInfo.width * outputImageInfo.height * outputImageInfo.channnels * sizeof(tUInt_8);
tUInt_32 inputPitch = inputImageInfo.width * inputImageInfo.channnels;
tUInt_32 outputPitch = outputImageInfo.width * outputImageInfo.channnels;
cudaCheckErrors(cudaMalloc(d_greyImage, sizeOutput));
dim3 threadsPerBlock(10, 6);
dim3 numBlocks(outputImageInfo.width / threadsPerBlock.x, outputImageInfo.height / threadsPerBlock.y);
rgb2GreyImage<<<numBlocks,threadsPerBlock>>>(d_inputBuffer,*d_greyImage,inputPitch,outputPitch,inputImageInfo.channnels,outputImageInfo.channnels);
cudaCheckErrors(cudaMemcpy(outputBuffer,*d_greyImage,sizeOutput,cudaMemcpyDeviceToHost));
}
|
sssp_standard.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sssp_standard.h"
SSSP_Standard::SSSP_Standard(std::shared_ptr<Graph> graph) : SSSP(std::move(graph)) {
}
std::shared_ptr<Paths> SSSP_Standard::compute(int source_node) {
size_t numNodes = graph->edges.size();
size_t numEdges = graph->destinations.size();
// Sizes
size_t sizeNodes = numNodes * sizeof(pos_t);
size_t sizeEdges = numEdges * sizeof(pos_t);
size_t sizeWeights = numEdges * sizeof(weight_t);
size_t sizeMask = numNodes * sizeof(mask_t);
size_t sizeCost = numNodes * sizeof(weight_t);
// Device memory
pos_t *d_edges = nullptr;
pos_t *d_destinations = nullptr;
weight_t *d_weights = nullptr;
mask_t *d_mask = nullptr;
pos_t *d_previous_node = nullptr;
weight_t *d_cost = nullptr;
// Allocate host memory
auto *mask = new mask_t[numNodes];
auto *previous_nodes = new pos_t[numNodes];
auto *cost = new weight_t[numNodes];
// Allocate d_previous_node and d_cost no matter the mode
M_C(hipMalloc((void **) &d_previous_node, sizeNodes));
M_C(hipMalloc((void **) &d_cost, sizeCost));
M_C(hipMalloc((void **) &d_mask, sizeMask));
M_C(hipMalloc((void **) &d_edges, sizeNodes));
M_C(hipMalloc((void **) &d_destinations, sizeEdges));
M_C(hipMalloc((void **) &d_weights, sizeWeights));
M_C(hipMemcpy(d_edges, graph->edges.data(), sizeNodes, hipMemcpyHostToDevice));
M_C(hipMemcpy(d_destinations, graph->destinations.data(), sizeEdges, hipMemcpyHostToDevice));
M_C(hipMemcpy(d_weights, graph->weights.data(), sizeWeights, hipMemcpyHostToDevice));
alg::fill_parcu(d_mask, numNodes, M_MASK_FALSE);
alg::fill_parcu(d_previous_node, numNodes, M_INVALID_POSITION);
alg::fill_parcu(d_cost, numNodes, std::numeric_limits<weight_t>::max());
alg::set_parcu(d_mask, source_node, M_MASK_TRUE);
alg::set_parcu(d_cost, source_node, 0);
// while we still find true in the mask (Ma not empty)
const mask_t *maskFirst = &mask[0];
const mask_t *maskLast = &mask[numNodes];
do {
int numBlocks = ceil((double) graph->edges.size() / M_BLOCKSIZE);
hipLaunchKernelGGL(( M_CFUN((alg::SSSP_Kernel), dim3(numBlocks), dim3(M_BLOCKSIZE), 0, 0, d_edges, d_destinations, d_weights,
d_previous_node, d_mask, d_cost, graph->edges.size(), graph->destinations.size())));
//copy back mask
M_C(hipMemcpy(mask, d_mask, sizeMask, hipMemcpyDeviceToHost));
} while (std::find(maskFirst, maskLast, true) != maskLast);
M_C(hipMemcpy(previous_nodes, d_previous_node, sizeNodes, hipMemcpyDeviceToHost));
M_C(hipMemcpy(cost, d_cost, sizeCost, hipMemcpyDeviceToHost));
std::vector<pos_t> ret_previous_nodes(previous_nodes, previous_nodes + graph->edges.size());
std::vector<weight_t> ret_cost(cost, cost + graph->edges.size());
M_C(hipFree(d_edges));
M_C(hipFree(d_destinations));
M_C(hipFree(d_weights));
M_C(hipFree(d_previous_node));
M_C(hipFree(d_cost));
M_C(hipFree(d_mask));
delete[] mask;
delete[] previous_nodes;
delete[] cost;
std::shared_ptr<Paths> paths = std::make_shared<Paths>(Paths(ret_previous_nodes, ret_cost, source_node, graph));
return paths;
} | sssp_standard.cu | #include "sssp_standard.h"
SSSP_Standard::SSSP_Standard(std::shared_ptr<Graph> graph) : SSSP(std::move(graph)) {
}
std::shared_ptr<Paths> SSSP_Standard::compute(int source_node) {
size_t numNodes = graph->edges.size();
size_t numEdges = graph->destinations.size();
// Sizes
size_t sizeNodes = numNodes * sizeof(pos_t);
size_t sizeEdges = numEdges * sizeof(pos_t);
size_t sizeWeights = numEdges * sizeof(weight_t);
size_t sizeMask = numNodes * sizeof(mask_t);
size_t sizeCost = numNodes * sizeof(weight_t);
// Device memory
pos_t *d_edges = nullptr;
pos_t *d_destinations = nullptr;
weight_t *d_weights = nullptr;
mask_t *d_mask = nullptr;
pos_t *d_previous_node = nullptr;
weight_t *d_cost = nullptr;
// Allocate host memory
auto *mask = new mask_t[numNodes];
auto *previous_nodes = new pos_t[numNodes];
auto *cost = new weight_t[numNodes];
// Allocate d_previous_node and d_cost no matter the mode
M_C(cudaMalloc((void **) &d_previous_node, sizeNodes));
M_C(cudaMalloc((void **) &d_cost, sizeCost));
M_C(cudaMalloc((void **) &d_mask, sizeMask));
M_C(cudaMalloc((void **) &d_edges, sizeNodes));
M_C(cudaMalloc((void **) &d_destinations, sizeEdges));
M_C(cudaMalloc((void **) &d_weights, sizeWeights));
M_C(cudaMemcpy(d_edges, graph->edges.data(), sizeNodes, cudaMemcpyHostToDevice));
M_C(cudaMemcpy(d_destinations, graph->destinations.data(), sizeEdges, cudaMemcpyHostToDevice));
M_C(cudaMemcpy(d_weights, graph->weights.data(), sizeWeights, cudaMemcpyHostToDevice));
alg::fill_parcu(d_mask, numNodes, M_MASK_FALSE);
alg::fill_parcu(d_previous_node, numNodes, M_INVALID_POSITION);
alg::fill_parcu(d_cost, numNodes, std::numeric_limits<weight_t>::max());
alg::set_parcu(d_mask, source_node, M_MASK_TRUE);
alg::set_parcu(d_cost, source_node, 0);
// while we still find true in the mask (Ma not empty)
const mask_t *maskFirst = &mask[0];
const mask_t *maskLast = &mask[numNodes];
do {
int numBlocks = ceil((double) graph->edges.size() / M_BLOCKSIZE);
M_CFUN((alg::SSSP_Kernel<<<numBlocks, M_BLOCKSIZE>>>(d_edges, d_destinations, d_weights,
d_previous_node, d_mask, d_cost, graph->edges.size(), graph->destinations.size())));
//copy back mask
M_C(cudaMemcpy(mask, d_mask, sizeMask, cudaMemcpyDeviceToHost));
} while (std::find(maskFirst, maskLast, true) != maskLast);
M_C(cudaMemcpy(previous_nodes, d_previous_node, sizeNodes, cudaMemcpyDeviceToHost));
M_C(cudaMemcpy(cost, d_cost, sizeCost, cudaMemcpyDeviceToHost));
std::vector<pos_t> ret_previous_nodes(previous_nodes, previous_nodes + graph->edges.size());
std::vector<weight_t> ret_cost(cost, cost + graph->edges.size());
M_C(cudaFree(d_edges));
M_C(cudaFree(d_destinations));
M_C(cudaFree(d_weights));
M_C(cudaFree(d_previous_node));
M_C(cudaFree(d_cost));
M_C(cudaFree(d_mask));
delete[] mask;
delete[] previous_nodes;
delete[] cost;
std::shared_ptr<Paths> paths = std::make_shared<Paths>(Paths(ret_previous_nodes, ret_cost, source_node, graph));
return paths;
} |
bd47104ed43228ce07691e68d73bfd2d6b16a1d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
// Enabled to print a bunch of junk during solving
#define DEBUG_PRINT_SOLVER_INFO 0
#include "WarpingSolverParameters.h"
#include "WarpingSolverState.h"
#include "WarpingSolverUtil.h"
#include "WarpingSolverEquations.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "../../shared/CUDATimer.h"
#ifdef _WIN32
#include <conio.h>
#endif
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
#define WARP_SIZE 32u
#define WARP_MASK (WARP_SIZE-1u)
/////////////////////////////////////////////////////////////////////////
// Eval Residual
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of residuals
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N)
{
residual = evalFDevice(x, input, state, parameters);
}
residual = warpReduce(residual);
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
if (laneid == 0) {
atomicAdd(&state.d_sumResidual[0], residual);
}
}
float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
float residual = 0.0f;
const unsigned int N = input.N; // Number of residuals
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
cudaSafeCall(hipDeviceSynchronize());
//timer.startEvent("EvalResidual");
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(hipDeviceSynchronize());
residual = state.getSumResidual();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
return residual;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
__global__ void PCGInit_Kernel0(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_delta[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_deltaA[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_r[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_rA[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_p[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_pA[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_z[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_zA[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_X[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_A[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_precondioner[x] = make_float3(1.0f, 1.0f, 1.0f);
state.d_precondionerA[x] = make_float3(1.0f, 1.0f, 1.0f);
}
}
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 residuumA;
const float3 residuum = evalMinusJTFDevice(x, input, state, parameters, residuumA); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
//state.d_r[x] = residuum; // store for next iteration
//state.d_rA[x] = residuumA; // store for next iteration
atomicAdd(&state.d_r[0].x, residuum.x);
atomicAdd(&state.d_r[0].y, residuum.y);
atomicAdd(&state.d_r[0].z, residuum.z);
atomicAdd(&state.d_rA[0].x, residuumA.x);
atomicAdd(&state.d_rA[0].y, residuumA.y);
atomicAdd(&state.d_rA[0].z, residuumA.z);
const float3 p = state.d_precondioner[0] * residuum; // apply preconditioner M^-1
//state.d_p[x] = p;
const float3 pA = state.d_precondionerA[0] * residuumA; // apply preconditioner M^-1
//state.d_pA[x] = pA;
atomicAdd(&state.d_p[0].x, p.x);
atomicAdd(&state.d_p[0].y, p.y);
atomicAdd(&state.d_p[0].z, p.z);
atomicAdd(&state.d_pA[0].x, pA.x);
atomicAdd(&state.d_pA[0].y, pA.y);
atomicAdd(&state.d_pA[0].z, pA.z);
d = dot(residuum, p) + dot(residuumA, pA); // x-th term of nomimator for computing alpha and denominator for computing beta
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_rDotzOld[x] = state.d_scanAlpha[0];
//state.d_delta[x] = make_float3(0.0f, 0.0f, 0.0f);
//state.d_deltaA[x] = make_float3(0.0f, 0.0f, 0.0f);
//
//state.d_r[x] = make_float3(0.0f, 0.0f, 0.0f);
//state.d_rA[x] = make_float3(0.0f, 0.0f, 0.0f);
//
//state.d_p[x] = make_float3(0.0f, 0.0f, 0.0f);
//state.d_pA[x] = make_float3(0.0f, 0.0f, 0.0f);
//
////state.d_z[x] = make_float3(0.0f, 0.0f, 0.0f);
////state.d_zA[x] = make_float3(0.0f, 0.0f, 0.0f);
//
//state.d_Ap_X[x] = make_float3(0.0f, 0.0f, 0.0f);
//state.d_Ap_A[x] = make_float3(0.0f, 0.0f, 0.0f);
}
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
//timer.startEvent("PCGInit_Kernel0");
PCGInit_Kernel0 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(1, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
//timer.startEvent("PCGInit_Kernel1");
PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
//timer.startEvent("PCGInit_Kernel2");
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(1, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( hipMemcpy(&temp, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost) );
printf("ScanAlpha (Init): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 tmpA;
const float3 tmp = applyJTJDevice(x, input, state, parameters, tmpA); // A x p_k => J^T x J x p_k
//if (x < 1)
{
atomicAdd(&state.d_Ap_X[0].x, tmp.x);
atomicAdd(&state.d_Ap_X[0].y, tmp.y);
atomicAdd(&state.d_Ap_X[0].z, tmp.z);
atomicAdd(&state.d_Ap_A[0].x, tmpA.x);
atomicAdd(&state.d_Ap_A[0].y, tmpA.y);
atomicAdd(&state.d_Ap_A[0].z, tmpA.z);
//state.d_Ap_X[x] = tmp; // store for next kernel call
//state.d_Ap_A[x] = tmpA; // store for next kernel call
}
d = dot(state.d_p[0], tmp) + dot(state.d_pA[0], tmpA); // x-th term of denominator of alpha
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = 1;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step
state.d_deltaA[x] = state.d_deltaA[x] + alpha*state.d_pA[x]; // do a decent step
float3 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum
state.d_r[x] = r; // store for next kernel call
float3 rA = state.d_rA[x] - alpha*state.d_Ap_A[x]; // update residuum
state.d_rA[x] = rA; // store for next kernel call
float3 z = state.d_precondioner[x] * r; // apply preconditioner M^-1
state.d_z[x] = z; // save for next kernel call
float3 zA = state.d_precondionerA[x] * rA; // apply preconditioner M^-1
state.d_zA[x] = zA; // save for next kernel call
b = dot(z, r) + dot(zA, rA); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = 1;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float rDotzNew = state.d_scanBeta[0]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction
state.d_pA[x] = state.d_zA[x] + beta*state.d_pA[x]; // update decent direction
}
}
void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int blocksPerGridNew = (1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
//timer.startEvent("PCGStep_Kernel1");
PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
cudaSafeCall(hipMemset(state.d_scanBeta, 0, sizeof(float)));
//timer.startEvent("PCGStep_Kernel2");
PCGStep_Kernel2 << <blocksPerGridNew, THREADS_PER_BLOCK, shmem_size >> >(input, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
//timer.startEvent("PCGStep_Kernel3");
PCGStep_Kernel3 << <blocksPerGridNew, THREADS_PER_BLOCK, shmem_size >> >(input, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( hipMemcpy(&temp, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost) );
printf("ScanAlpha (Step): %f\n", temp);
cudaSafeCall( hipMemcpy(&temp, state.d_scanBeta, sizeof(float), hipMemcpyDeviceToHost) );
printf("ScanBeta (Step): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// Apply Update
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < 1) {
state.d_translation[0] = state.d_translation[0] + state.d_delta[0];
state.d_angles[0] = state.d_angles[0] + state.d_deltaA[0];
}
}
void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = 1; // This is different to all sparse solvers !!!
//timer.startEvent("ApplyLinearUpdateDevice");
ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(hipDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
}
/////////////////////////////////////////////////////////////////////////
// Transform Mesh
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyTransformMeshDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
float3x3 R = evalRMat(state.d_angles[0]);
state.d_mesh[x] = R*state.d_mesh[x] + state.d_translation[0];
}
}
void TransformMesh(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // This is different to all sparse solvers !!! number of vertices
//timer.startEvent("Transform Mesh");
ApplyTransformMeshDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(hipDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
}
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" double ProcrustesSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverPerformanceSummary& stats)
{
CUDATimer timer;
timer.startEvent("Total");
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
timer.startEvent("Nonlinear Iteration");
timer.startEvent("Nonlinear Setup");
float residual = EvalResidual(input, state, parameters, timer);
printf("%i: cost: %f\n", nIter, residual);
Initialization(input, state, parameters, timer);
timer.endEvent();
timer.startEvent("Linear Solve");
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
PCGIteration(input, state, parameters, timer);
}
timer.endEvent();
timer.startEvent("Nonlinear Finish");
ApplyLinearUpdate(input, state, parameters, timer);
timer.nextIteration();
timer.endEvent();
timer.endEvent();
}
float residual = EvalResidual(input, state, parameters, timer);
printf("final cost: %f\n", residual);
timer.endEvent();
timer.evaluate(stats);
return (double)residual;
}
| bd47104ed43228ce07691e68d73bfd2d6b16a1d0.cu | #include <iostream>
// Enabled to print a bunch of junk during solving
#define DEBUG_PRINT_SOLVER_INFO 0
#include "WarpingSolverParameters.h"
#include "WarpingSolverState.h"
#include "WarpingSolverUtil.h"
#include "WarpingSolverEquations.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "../../shared/CUDATimer.h"
#ifdef _WIN32
#include <conio.h>
#endif
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
#define WARP_SIZE 32u
#define WARP_MASK (WARP_SIZE-1u)
/////////////////////////////////////////////////////////////////////////
// Eval Residual
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of residuals
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N)
{
residual = evalFDevice(x, input, state, parameters);
}
residual = warpReduce(residual);
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
if (laneid == 0) {
atomicAdd(&state.d_sumResidual[0], residual);
}
}
float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
float residual = 0.0f;
const unsigned int N = input.N; // Number of residuals
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
cudaSafeCall(cudaDeviceSynchronize());
//timer.startEvent("EvalResidual");
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize());
residual = state.getSumResidual();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
return residual;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
__global__ void PCGInit_Kernel0(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_delta[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_deltaA[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_r[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_rA[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_p[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_pA[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_z[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_zA[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_X[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_A[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_precondioner[x] = make_float3(1.0f, 1.0f, 1.0f);
state.d_precondionerA[x] = make_float3(1.0f, 1.0f, 1.0f);
}
}
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 residuumA;
const float3 residuum = evalMinusJTFDevice(x, input, state, parameters, residuumA); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
//state.d_r[x] = residuum; // store for next iteration
//state.d_rA[x] = residuumA; // store for next iteration
atomicAdd(&state.d_r[0].x, residuum.x);
atomicAdd(&state.d_r[0].y, residuum.y);
atomicAdd(&state.d_r[0].z, residuum.z);
atomicAdd(&state.d_rA[0].x, residuumA.x);
atomicAdd(&state.d_rA[0].y, residuumA.y);
atomicAdd(&state.d_rA[0].z, residuumA.z);
const float3 p = state.d_precondioner[0] * residuum; // apply preconditioner M^-1
//state.d_p[x] = p;
const float3 pA = state.d_precondionerA[0] * residuumA; // apply preconditioner M^-1
//state.d_pA[x] = pA;
atomicAdd(&state.d_p[0].x, p.x);
atomicAdd(&state.d_p[0].y, p.y);
atomicAdd(&state.d_p[0].z, p.z);
atomicAdd(&state.d_pA[0].x, pA.x);
atomicAdd(&state.d_pA[0].y, pA.y);
atomicAdd(&state.d_pA[0].z, pA.z);
d = dot(residuum, p) + dot(residuumA, pA); // x-th term of nomimator for computing alpha and denominator for computing beta
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_rDotzOld[x] = state.d_scanAlpha[0];
//state.d_delta[x] = make_float3(0.0f, 0.0f, 0.0f);
//state.d_deltaA[x] = make_float3(0.0f, 0.0f, 0.0f);
//
//state.d_r[x] = make_float3(0.0f, 0.0f, 0.0f);
//state.d_rA[x] = make_float3(0.0f, 0.0f, 0.0f);
//
//state.d_p[x] = make_float3(0.0f, 0.0f, 0.0f);
//state.d_pA[x] = make_float3(0.0f, 0.0f, 0.0f);
//
////state.d_z[x] = make_float3(0.0f, 0.0f, 0.0f);
////state.d_zA[x] = make_float3(0.0f, 0.0f, 0.0f);
//
//state.d_Ap_X[x] = make_float3(0.0f, 0.0f, 0.0f);
//state.d_Ap_A[x] = make_float3(0.0f, 0.0f, 0.0f);
}
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
//timer.startEvent("PCGInit_Kernel0");
PCGInit_Kernel0 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(1, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
//timer.startEvent("PCGInit_Kernel1");
PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
//timer.startEvent("PCGInit_Kernel2");
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(1, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( cudaMemcpy(&temp, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost) );
printf("ScanAlpha (Init): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 tmpA;
const float3 tmp = applyJTJDevice(x, input, state, parameters, tmpA); // A x p_k => J^T x J x p_k
//if (x < 1)
{
atomicAdd(&state.d_Ap_X[0].x, tmp.x);
atomicAdd(&state.d_Ap_X[0].y, tmp.y);
atomicAdd(&state.d_Ap_X[0].z, tmp.z);
atomicAdd(&state.d_Ap_A[0].x, tmpA.x);
atomicAdd(&state.d_Ap_A[0].y, tmpA.y);
atomicAdd(&state.d_Ap_A[0].z, tmpA.z);
//state.d_Ap_X[x] = tmp; // store for next kernel call
//state.d_Ap_A[x] = tmpA; // store for next kernel call
}
d = dot(state.d_p[0], tmp) + dot(state.d_pA[0], tmpA); // x-th term of denominator of alpha
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = 1;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step
state.d_deltaA[x] = state.d_deltaA[x] + alpha*state.d_pA[x]; // do a decent step
float3 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum
state.d_r[x] = r; // store for next kernel call
float3 rA = state.d_rA[x] - alpha*state.d_Ap_A[x]; // update residuum
state.d_rA[x] = rA; // store for next kernel call
float3 z = state.d_precondioner[x] * r; // apply preconditioner M^-1
state.d_z[x] = z; // save for next kernel call
float3 zA = state.d_precondionerA[x] * rA; // apply preconditioner M^-1
state.d_zA[x] = zA; // save for next kernel call
b = dot(z, r) + dot(zA, rA); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = 1;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float rDotzNew = state.d_scanBeta[0]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction
state.d_pA[x] = state.d_zA[x] + beta*state.d_pA[x]; // update decent direction
}
}
void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int blocksPerGridNew = (1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
//timer.startEvent("PCGStep_Kernel1");
PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
cudaSafeCall(cudaMemset(state.d_scanBeta, 0, sizeof(float)));
//timer.startEvent("PCGStep_Kernel2");
PCGStep_Kernel2 << <blocksPerGridNew, THREADS_PER_BLOCK, shmem_size >> >(input, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
//timer.startEvent("PCGStep_Kernel3");
PCGStep_Kernel3 << <blocksPerGridNew, THREADS_PER_BLOCK, shmem_size >> >(input, state);
//timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( cudaMemcpy(&temp, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost) );
printf("ScanAlpha (Step): %f\n", temp);
cudaSafeCall( cudaMemcpy(&temp, state.d_scanBeta, sizeof(float), cudaMemcpyDeviceToHost) );
printf("ScanBeta (Step): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// Apply Update
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < 1) {
state.d_translation[0] = state.d_translation[0] + state.d_delta[0];
state.d_angles[0] = state.d_angles[0] + state.d_deltaA[0];
}
}
void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = 1; // This is different to all sparse solvers !!!
//timer.startEvent("ApplyLinearUpdateDevice");
ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
}
/////////////////////////////////////////////////////////////////////////
// Transform Mesh
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyTransformMeshDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
float3x3 R = evalRMat(state.d_angles[0]);
state.d_mesh[x] = R*state.d_mesh[x] + state.d_translation[0];
}
}
void TransformMesh(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // This is different to all sparse solvers !!! number of vertices
//timer.startEvent("Transform Mesh");
ApplyTransformMeshDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
//timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
}
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" double ProcrustesSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverPerformanceSummary& stats)
{
CUDATimer timer;
timer.startEvent("Total");
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
timer.startEvent("Nonlinear Iteration");
timer.startEvent("Nonlinear Setup");
float residual = EvalResidual(input, state, parameters, timer);
printf("%i: cost: %f\n", nIter, residual);
Initialization(input, state, parameters, timer);
timer.endEvent();
timer.startEvent("Linear Solve");
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
PCGIteration(input, state, parameters, timer);
}
timer.endEvent();
timer.startEvent("Nonlinear Finish");
ApplyLinearUpdate(input, state, parameters, timer);
timer.nextIteration();
timer.endEvent();
timer.endEvent();
}
float residual = EvalResidual(input, state, parameters, timer);
printf("final cost: %f\n", residual);
timer.endEvent();
timer.evaluate(stats);
return (double)residual;
}
|
5956706533b1d94a84062a57d363574252e3640d.hip | // !!! This is a file automatically generated by hipify!!!
/*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "hip/hip_runtime.h"
#include <string.h>
#include <math.h>
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
void VerifyResult();
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 0;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
VerifyResult();
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
hipDeviceProp_t deviceProp;
int nDevCount = 0;
hipGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( hipSuccess == hipGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", hipGetErrorString(hipGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
*(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
float *m_cuda,*a_cuda,*b_cuda;
// allocate memory on GPU
hipMalloc((void **) &m_cuda, Size * Size * sizeof(float));
hipMalloc((void **) &a_cuda, Size * Size * sizeof(float));
hipMalloc((void **) &b_cuda, Size * sizeof(float));
// copy memory to GPU
hipMemcpy(m_cuda, m, Size * Size * sizeof(float),hipMemcpyHostToDevice );
hipMemcpy(a_cuda, a, Size * Size * sizeof(float),hipMemcpyHostToDevice );
hipMemcpy(b_cuda, b, Size * sizeof(float),hipMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
for (t=0; t<(Size-1); t++) {
hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, m_cuda,a_cuda,Size,t);
hipDeviceSynchronize();
hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, m_cuda,a_cuda,b_cuda,Size,Size-t,t);
hipDeviceSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
hipMemcpy(m, m_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost );
hipMemcpy(a, a_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost );
hipMemcpy(b, b_cuda, Size * sizeof(float),hipMemcpyDeviceToHost );
hipFree(m_cuda);
hipFree(a_cuda);
hipFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void VerifyResult()
{
int i,j;
float tmp_out = 0;
for(i=0;i<Size;i++)
{
for(j=0,tmp_out=0;j<Size;j++)
tmp_out += ( *(a + Size*i + j) * finalVec[j] );
if( abs(tmp_out - b[i]) > 0.01)
{
printf("Test Failed\n");
printf("out[%d]: %f; b[%d]:%f; diff:%f\n",i, tmp_out, i, b[i], b[i]-tmp_out);
return;
}
}
printf("Test Pass\n");
return;
}
| 5956706533b1d94a84062a57d363574252e3640d.cu | /*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "cuda.h"
#include <string.h>
#include <math.h>
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
void VerifyResult();
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 0;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
VerifyResult();
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
cudaDeviceProp deviceProp;
int nDevCount = 0;
cudaGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", cudaGetErrorString(cudaGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
*(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
float *m_cuda,*a_cuda,*b_cuda;
// allocate memory on GPU
cudaMalloc((void **) &m_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &a_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &b_cuda, Size * sizeof(float));
// copy memory to GPU
cudaMemcpy(m_cuda, m, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(a_cuda, a, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(b_cuda, b, Size * sizeof(float),cudaMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
for (t=0; t<(Size-1); t++) {
Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t);
cudaThreadSynchronize();
Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
cudaThreadSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
cudaMemcpy(m, m_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(a, a_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(b, b_cuda, Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaFree(m_cuda);
cudaFree(a_cuda);
cudaFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void VerifyResult()
{
int i,j;
float tmp_out = 0;
for(i=0;i<Size;i++)
{
for(j=0,tmp_out=0;j<Size;j++)
tmp_out += ( *(a + Size*i + j) * finalVec[j] );
if( abs(tmp_out - b[i]) > 0.01)
{
printf("Test Failed\n");
printf("out[%d]: %f; b[%d]:%f; diff:%f\n",i, tmp_out, i, b[i], b[i]-tmp_out);
return;
}
}
printf("Test Pass\n");
return;
}
|
ebce284c867f9a9846e8bb192f09de20f89bab80.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_array_dot_product_r8__.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t tsize = XSIZE*YSIZE;
const double *arr1 = NULL;
hipMalloc(&arr1, XSIZE*YSIZE);
const double *arr2 = NULL;
hipMalloc(&arr2, XSIZE*YSIZE);
volatile double *dprod = NULL;
hipMalloc(&dprod, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_array_dot_product_r8__), dim3(gridBlock),dim3(threadBlock), 0, 0, tsize,arr1,arr2,dprod);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_array_dot_product_r8__), dim3(gridBlock),dim3(threadBlock), 0, 0, tsize,arr1,arr2,dprod);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_array_dot_product_r8__), dim3(gridBlock),dim3(threadBlock), 0, 0, tsize,arr1,arr2,dprod);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ebce284c867f9a9846e8bb192f09de20f89bab80.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_array_dot_product_r8__.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t tsize = XSIZE*YSIZE;
const double *arr1 = NULL;
cudaMalloc(&arr1, XSIZE*YSIZE);
const double *arr2 = NULL;
cudaMalloc(&arr2, XSIZE*YSIZE);
volatile double *dprod = NULL;
cudaMalloc(&dprod, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_array_dot_product_r8__<<<gridBlock,threadBlock>>>(tsize,arr1,arr2,dprod);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_array_dot_product_r8__<<<gridBlock,threadBlock>>>(tsize,arr1,arr2,dprod);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_array_dot_product_r8__<<<gridBlock,threadBlock>>>(tsize,arr1,arr2,dprod);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
42c52cd3234d752363d7eba1fa5b4de6ee10c4a8.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
// setting the number of trials in the monte carlo simulation:
#ifndef NUMTRIALS
#define NUMTRIALS 32768
#endif
#ifndef BLOCKSIZE
#define BLOCKSIZE 16 // number of threads per block
#endif
#define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE )
// ranges for the random numbers:
const float XCMIN = 0.0;
const float XCMAX = 2.0;
const float YCMIN = 0.0;
const float YCMAX = 2.0;
const float RMIN = 0.5;
const float RMAX = 2.0;
// function prototypes:
float Ranf( float, float );
int Ranf( int, int );
void TimeOfDaySeed( );
__global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits )
{
unsigned int wgNumber = blockIdx.x;
unsigned int wgDimension = blockDim.x;
unsigned int threadNum = threadIdx.x;
unsigned int gid = wgNumber*wgDimension + threadNum;
// all the monte carlo stuff goes in here
// if we make it all the way through, then Hits[gid] = 1
// randomize the location and radius of the circle:
float xc = Xcs[gid];
float yc = Ycs[gid];
float r = Rs[gid];
float tn = tanf( (float)( (M_PI/180.) * 30. ) );
Hits[gid] = 0;
// solve for the intersection using the quadratic formula:
float a = 1. + tn*tn;
float b = -2.*( xc + yc*tn );
float c = xc*xc + yc*yc - r*r;
float d = b*b - 4.*a*c;
// cascading if-statements:
// if you used "continue;" in project #1, change to this style because,
// if there is no for-loop, then there is nowhere to continue to
if( d >= 0.)
{
// hits the circle:
// get the first intersection:
d = sqrt( d );
float t1 = (-b + d ) / ( 2.*a ); // time to intersect the circle
float t2 = (-b - d ) / ( 2.*a ); // time to intersect the circle
float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection
if(tmin >= 0.)
{
// where does it intersect the circle?
float xcir = tmin;
float ycir = tmin*tn;
// get the unitized normal vector at the point of intersection:
float nx = xcir - xc;
float ny = ycir - yc;
float n = sqrt( nx*nx + ny*ny );
nx /= n; // unit vector
ny /= n; // unit vector
// get the unitized incoming vector:
float inx = xcir - 0.;
float iny = ycir - 0.;
float in = sqrt( inx*inx + iny*iny );
inx /= in; // unit vector
iny /= in; // unit vector
// get the outgoing (bounced) vector:
float dot = inx*nx + iny*ny;
float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence
// find out if it hits the infinite plate:
float t = ( 0. - ycir ) / outy;
if( t >= 0. )
{
Hits[gid] = 1;
}
}
}
}
// main program:
int
main( int argc, char* argv[ ] )
{
TimeOfDaySeed( );
int dev = findCudaDevice(argc, (const char **)argv);
// allocate host memory:
float *hXcs = new float[NUMTRIALS];
float *hYcs = new float[NUMTRIALS];
float * hRs = new float[NUMTRIALS];
int *hHits = new int[NUMTRIALS];
// fill the random-value arrays:
for( int n = 0; n < NUMTRIALS; n++ )
{
hXcs[n] = Ranf( XCMIN, XCMAX );
hYcs[n] = Ranf( YCMIN, YCMAX );
hRs[n] = Ranf( RMIN, RMAX );
}
// allocate device memory:
float *dXcs, *dYcs, *dRs;
int *dHits;
dim3 dimsXcs( NUMTRIALS, 1, 1 );
dim3 dimsYcs( NUMTRIALS, 1, 1 );
dim3 dimsRs( NUMTRIALS, 1, 1 );
dim3 dimsHits( NUMTRIALS, 1, 1 );
hipError_t status;
status = hipMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) );
checkCudaErrors( status );
// copy host memory to the device:
status = hipMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
status = hipMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
status = hipMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1 );
dim3 grid(NUMBLOCKS, 1, 1 );
// create and start timer
hipDeviceSynchronize( );
// allocate CUDA events that we'll use for timing:
hipEvent_t start, stop;
status = hipEventCreate( &start );
checkCudaErrors( status );
status = hipEventCreate( &stop );
checkCudaErrors( status );
// record the start event:
status = hipEventRecord( start, NULL );
checkCudaErrors( status );
// execute the kernel:
hipLaunchKernelGGL(( MonteCarlo), dim3(grid), dim3(threads) , 0, 0, dXcs, dYcs, dRs, dHits );
// record the stop event:
status = hipEventRecord( stop, NULL );
checkCudaErrors( status );
// wait for the stop event to complete:
status = hipEventSynchronize( stop );
checkCudaErrors( status );
float msecTotal = 0.0f;
status = hipEventElapsedTime( &msecTotal, start, stop );
checkCudaErrors( status );
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double trialsPerSecond = (float)NUMTRIALS / secondsTotal;
double megaTrialsPerSecond = trialsPerSecond / 1000000.;
//fprintf( stderr, "Number of Trials = %10d, MegaTrials/Second = %10.4lf\n", NUMTRIALS, megaTrialsPerSecond );
FILE *ptr = fopen("data.txt", "a+");
fprintf(ptr, "%lf", megaTrialsPerSecond);
fclose(ptr);
// copy result from the device to the host:
status = hipMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), hipMemcpyDeviceToHost );
checkCudaErrors( status );
hipDeviceSynchronize( );
// compute the probability:
//int numHits = 0;
//for(int i = 0; i < NUMTRIALS; i++ )
//{
// numHits += hHits[i];
//}
//float probability = 100.f * (float)numHits / (float)NUMTRIALS;
//fprintf(stderr, "\nProbability = %6.3f %%\n", probability );
// clean up memory:
delete [ ] hXcs;
delete [ ] hYcs;
delete [ ] hRs;
delete [ ] hHits;
status = hipFree( dXcs );
status = hipFree( dYcs );
status = hipFree( dRs );
status = hipFree( dHits );
checkCudaErrors( status );
return 0;
}
float
Ranf( float low, float high )
{
float r = (float) rand(); // 0 - RAND_MAX
float t = r / (float) RAND_MAX; // 0. - 1.
return low + t * ( high - low );
}
int
Ranf( int ilow, int ihigh )
{
float low = (float)ilow;
float high = ceil( (float)ihigh );
return (int) Ranf(low,high);
}
void
TimeOfDaySeed( )
{
struct tm y2k = { 0 };
y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0;
y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1;
time_t timer;
time( &timer );
double seconds = difftime( timer, mktime(&y2k) );
unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds
srand( seed );
}
| 42c52cd3234d752363d7eba1fa5b4de6ee10c4a8.cu | // System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
// setting the number of trials in the monte carlo simulation:
#ifndef NUMTRIALS
#define NUMTRIALS 32768
#endif
#ifndef BLOCKSIZE
#define BLOCKSIZE 16 // number of threads per block
#endif
#define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE )
// ranges for the random numbers:
const float XCMIN = 0.0;
const float XCMAX = 2.0;
const float YCMIN = 0.0;
const float YCMAX = 2.0;
const float RMIN = 0.5;
const float RMAX = 2.0;
// function prototypes:
float Ranf( float, float );
int Ranf( int, int );
void TimeOfDaySeed( );
__global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits )
{
unsigned int wgNumber = blockIdx.x;
unsigned int wgDimension = blockDim.x;
unsigned int threadNum = threadIdx.x;
unsigned int gid = wgNumber*wgDimension + threadNum;
// all the monte carlo stuff goes in here
// if we make it all the way through, then Hits[gid] = 1
// randomize the location and radius of the circle:
float xc = Xcs[gid];
float yc = Ycs[gid];
float r = Rs[gid];
float tn = tanf( (float)( (M_PI/180.) * 30. ) );
Hits[gid] = 0;
// solve for the intersection using the quadratic formula:
float a = 1. + tn*tn;
float b = -2.*( xc + yc*tn );
float c = xc*xc + yc*yc - r*r;
float d = b*b - 4.*a*c;
// cascading if-statements:
// if you used "continue;" in project #1, change to this style because,
// if there is no for-loop, then there is nowhere to continue to
if( d >= 0.)
{
// hits the circle:
// get the first intersection:
d = sqrt( d );
float t1 = (-b + d ) / ( 2.*a ); // time to intersect the circle
float t2 = (-b - d ) / ( 2.*a ); // time to intersect the circle
float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection
if(tmin >= 0.)
{
// where does it intersect the circle?
float xcir = tmin;
float ycir = tmin*tn;
// get the unitized normal vector at the point of intersection:
float nx = xcir - xc;
float ny = ycir - yc;
float n = sqrt( nx*nx + ny*ny );
nx /= n; // unit vector
ny /= n; // unit vector
// get the unitized incoming vector:
float inx = xcir - 0.;
float iny = ycir - 0.;
float in = sqrt( inx*inx + iny*iny );
inx /= in; // unit vector
iny /= in; // unit vector
// get the outgoing (bounced) vector:
float dot = inx*nx + iny*ny;
float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence
// find out if it hits the infinite plate:
float t = ( 0. - ycir ) / outy;
if( t >= 0. )
{
Hits[gid] = 1;
}
}
}
}
// main program:
int
main( int argc, char* argv[ ] )
{
TimeOfDaySeed( );
int dev = findCudaDevice(argc, (const char **)argv);
// allocate host memory:
float *hXcs = new float[NUMTRIALS];
float *hYcs = new float[NUMTRIALS];
float * hRs = new float[NUMTRIALS];
int *hHits = new int[NUMTRIALS];
// fill the random-value arrays:
for( int n = 0; n < NUMTRIALS; n++ )
{
hXcs[n] = Ranf( XCMIN, XCMAX );
hYcs[n] = Ranf( YCMIN, YCMAX );
hRs[n] = Ranf( RMIN, RMAX );
}
// allocate device memory:
float *dXcs, *dYcs, *dRs;
int *dHits;
dim3 dimsXcs( NUMTRIALS, 1, 1 );
dim3 dimsYcs( NUMTRIALS, 1, 1 );
dim3 dimsRs( NUMTRIALS, 1, 1 );
dim3 dimsHits( NUMTRIALS, 1, 1 );
cudaError_t status;
status = cudaMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) );
checkCudaErrors( status );
// copy host memory to the device:
status = cudaMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
status = cudaMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
status = cudaMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1 );
dim3 grid(NUMBLOCKS, 1, 1 );
// create and start timer
cudaDeviceSynchronize( );
// allocate CUDA events that we'll use for timing:
cudaEvent_t start, stop;
status = cudaEventCreate( &start );
checkCudaErrors( status );
status = cudaEventCreate( &stop );
checkCudaErrors( status );
// record the start event:
status = cudaEventRecord( start, NULL );
checkCudaErrors( status );
// execute the kernel:
MonteCarlo<<< grid, threads >>>( dXcs, dYcs, dRs, dHits );
// record the stop event:
status = cudaEventRecord( stop, NULL );
checkCudaErrors( status );
// wait for the stop event to complete:
status = cudaEventSynchronize( stop );
checkCudaErrors( status );
float msecTotal = 0.0f;
status = cudaEventElapsedTime( &msecTotal, start, stop );
checkCudaErrors( status );
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double trialsPerSecond = (float)NUMTRIALS / secondsTotal;
double megaTrialsPerSecond = trialsPerSecond / 1000000.;
//fprintf( stderr, "Number of Trials = %10d, MegaTrials/Second = %10.4lf\n", NUMTRIALS, megaTrialsPerSecond );
FILE *ptr = fopen("data.txt", "a+");
fprintf(ptr, "%lf", megaTrialsPerSecond);
fclose(ptr);
// copy result from the device to the host:
status = cudaMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), cudaMemcpyDeviceToHost );
checkCudaErrors( status );
cudaDeviceSynchronize( );
// compute the probability:
//int numHits = 0;
//for(int i = 0; i < NUMTRIALS; i++ )
//{
// numHits += hHits[i];
//}
//float probability = 100.f * (float)numHits / (float)NUMTRIALS;
//fprintf(stderr, "\nProbability = %6.3f %%\n", probability );
// clean up memory:
delete [ ] hXcs;
delete [ ] hYcs;
delete [ ] hRs;
delete [ ] hHits;
status = cudaFree( dXcs );
status = cudaFree( dYcs );
status = cudaFree( dRs );
status = cudaFree( dHits );
checkCudaErrors( status );
return 0;
}
float
Ranf( float low, float high )
{
float r = (float) rand(); // 0 - RAND_MAX
float t = r / (float) RAND_MAX; // 0. - 1.
return low + t * ( high - low );
}
int
Ranf( int ilow, int ihigh )
{
float low = (float)ilow;
float high = ceil( (float)ihigh );
return (int) Ranf(low,high);
}
void
TimeOfDaySeed( )
{
struct tm y2k = { 0 };
y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0;
y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1;
time_t timer;
time( &timer );
double seconds = difftime( timer, mktime(&y2k) );
unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds
srand( seed );
}
|
822e6a4e3fe27f452ef5375bf3dc4d920591918d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
//#include <math.h>
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include <rocblas.h>
#include <hipfft.h>
#include "Utilities.cuh"
#define DEBUG
#define PI_R 3.14159265358979323846f
/*******************/
/* iDivUp FUNCTION */
/*******************/
//extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
__host__ __device__ int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
extern "C" void gpuErrchk(hipError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/**************************/
/* CUSOLVE ERROR CHECKING */
/**************************/
static const char *_cusolverGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %d, error: %s \nterminating!\n", __FILE__, __LINE__, \
_cusolverGetErrorEnum(err)); \
assert(0); \
}
}
extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); }
/*************************/
/* CUBLAS ERROR CHECKING */
/*************************/
static const char *_cublasGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
case HIPBLAS_STATUS_NOT_SUPPORTED:
return "HIPBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
inline void __cublasSafeCall(hipblasStatus_t err, const char *file, const int line)
{
if (HIPBLAS_STATUS_SUCCESS != err) {
fprintf(stderr, "CUBLAS error in file '%s', line %d, error: %s\nterminating!\n", __FILE__, __LINE__, \
_cublasGetErrorEnum(err)); \
assert(0); \
}
}
extern "C" void cublasSafeCall(hipblasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); }
/************************/
/* CUFFT ERROR CHECKING */
/************************/
// See http://stackoverflow.com/questions/16267149/cufft-error-handling
static const char *_cudaGetErrorEnum(hipfftResult error)
{
switch (error)
{
case HIPFFT_SUCCESS:
return "HIPFFT_SUCCESS - The cuFFT operation was successful";
case HIPFFT_INVALID_PLAN:
return "HIPFFT_INVALID_PLAN - cuFFT was passed an invalid plan handle";
case HIPFFT_ALLOC_FAILED:
return "HIPFFT_ALLOC_FAILED - cuFFT failed to allocate GPU or CPU memory";
case HIPFFT_INVALID_TYPE:
return "HIPFFT_INVALID_TYPE - No longer used";
case HIPFFT_INVALID_VALUE:
return "HIPFFT_INVALID_VALUE - User specified an invalid pointer or parameter";
case HIPFFT_INTERNAL_ERROR:
return "HIPFFT_INTERNAL_ERROR - Driver or internal cuFFT library error";
case HIPFFT_EXEC_FAILED:
return "HIPFFT_EXEC_FAILED - Failed to execute an FFT on the GPU";
case HIPFFT_SETUP_FAILED:
return "HIPFFT_SETUP_FAILED - The cuFFT library failed to initialize";
case HIPFFT_INVALID_SIZE:
return "HIPFFT_INVALID_SIZE - User specified an invalid transform size";
case HIPFFT_UNALIGNED_DATA:
return "HIPFFT_UNALIGNED_DATA - No longer used";
case HIPFFT_INCOMPLETE_PARAMETER_LIST:
return "HIPFFT_INCOMPLETE_PARAMETER_LIST - Missing parameters in call";
case HIPFFT_INVALID_DEVICE:
return "HIPFFT_INVALID_DEVICE - Execution of a plan was on different GPU than plan creation";
case HIPFFT_PARSE_ERROR:
return "HIPFFT_PARSE_ERROR - Internal plan database error";
case HIPFFT_NO_WORKSPACE:
return "HIPFFT_NO_WORKSPACE - No workspace has been provided prior to plan execution";
case HIPFFT_NOT_IMPLEMENTED:
return "HIPFFT_NOT_IMPLEMENTED - Function does not implement functionality for parameters given";
case HIPFFT_LICENSE_ERROR:
return "HIPFFT_LICENSE_ERROR - Used in previous versions";
case HIPFFT_NOT_SUPPORTED:
return "HIPFFT_NOT_SUPPORTED - Operation is not supported for parameters given";
}
return "<unknown>";
}
// --- CUFFTSAFECALL
inline void cufftAssert(hipfftResult err, const char *file, const int line, bool abort = true)
{
if (HIPFFT_SUCCESS != err) {
fprintf(stderr, "CUFFTassert: Error nr. %d - %s %s %d\n", err, _cudaGetErrorEnum(err), __FILE__, __LINE__);
if (abort) exit(err);
}
}
extern "C" void cufftSafeCall(hipfftResult err) { cufftAssert(err, __FILE__, __LINE__); }
/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(hipsparseStatus_t error)
{
switch (error)
{
case HIPSPARSE_STATUS_SUCCESS:
return "HIPSPARSE_STATUS_SUCCESS";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "HIPSPARSE_STATUS_NOT_INITIALIZED";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "HIPSPARSE_STATUS_ALLOC_FAILED";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "HIPSPARSE_STATUS_INVALID_VALUE";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "HIPSPARSE_STATUS_ARCH_MISMATCH";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "HIPSPARSE_STATUS_MAPPING_ERROR";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "HIPSPARSE_STATUS_EXECUTION_FAILED";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "HIPSPARSE_STATUS_INTERNAL_ERROR";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case HIPSPARSE_STATUS_ZERO_PIVOT:
return "HIPSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void __cusparseSafeCall(hipsparseStatus_t err, const char *file, const int line)
{
if (HIPSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %d, error %s\nterminating!\n", __FILE__, __LINE__, \
_cusparseGetErrorEnum(err)); \
assert(0); \
}
}
extern "C" void cusparseSafeCall(hipsparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
#define BLOCKSIZE_REVERSE 256
// --- Credit to http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2
template <class T>
__global__ void reverseArrayKernel(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a)
{
// --- Credit to the simpleTemplates CUDA sample
SharedMemory<T> smem;
T* s_data = smem.getPointer();
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int id = threadIdx.x;
const int offset = blockDim.x * (blockIdx.x + 1);
// --- Load one element per thread from device memory and store it *in reversed order* into shared memory
if (tid < N) s_data[BLOCKSIZE_REVERSE - (id + 1)] = a * d_in[tid];
// --- Block until all threads in the block have written their data to shared memory
__syncthreads();
// --- Write the data from shared memory in forward order
if ((N - offset + id) >= 0) d_out[N - offset + id] = s_data[threadIdx.x];
}
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
template <class T>
void reverseArray(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) {
reverseArrayKernel << <iDivUp(N, BLOCKSIZE_REVERSE), BLOCKSIZE_REVERSE, BLOCKSIZE_REVERSE * sizeof(T) >> >(d_in, d_out, N, a);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void reverseArray<float>(const float * __restrict__, float * __restrict__, const int, const float);
template void reverseArray<double>(const double * __restrict__, double * __restrict__, const int, const double);
/********************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION KERNEL */
/********************************************************/
#define BLOCKSIZE_CART2POL 256
template <class T>
__global__ void Cartesian2PolarKernel(const T * __restrict__ d_x, const T * __restrict__ d_y, T * __restrict__ d_rho, T * __restrict__ d_theta,
const int N, const T a) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
d_rho[tid] = a * hypot(d_x[tid], d_y[tid]);
d_theta[tid] = atan2(d_y[tid], d_x[tid]);
}
}
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - GPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> Cartesian2Polar(const T * __restrict__ d_x, const T * __restrict__ d_y, const int N, const T a) {
//
// T *d_rho; gpuErrchk(hipMalloc((void**)&d_rho, N * sizeof(T)));
// T *d_theta; gpuErrchk(hipMalloc((void**)&d_theta, N * sizeof(T)));
//
// Cartesian2PolarKernel<<<iDivUp(N, BLOCKSIZE_CART2POL), BLOCKSIZE_CART2POL>>>(d_x, d_y, d_rho, d_theta, N, a);
//#ifdef DEBUG
// gpuErrchk(hipPeekAtLastError());
// gpuErrchk(hipDeviceSynchronize());
//#endif
//
// return thrust::make_pair(d_rho, d_theta);
//}
//
//template thrust::pair<float *, float *> Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - CPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> h_Cartesian2Polar(const T * __restrict__ h_x, const T * __restrict__ h_y, const int N, const T a) {
//
// T *h_rho = (T *)malloc(N * sizeof(T));
// T *h_theta = (T *)malloc(N * sizeof(T));
//
// for (int i = 0; i < N; i++) {
// h_rho[i] = a * hypot(h_x[i], h_y[i]);
// h_theta[i] = atan2(h_y[i], h_x[i]);
// }
//
// return thrust::make_pair(h_rho, h_theta);
//}
//
//template thrust::pair<float *, float *> h_Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> h_Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************/
/* COMPUTE L2 NORM OF A VECTOR */
/*******************************/
template<class T>
T h_l2_norm(T *v1, T *v2, const int N) {
T norm = (T)0;
for (int i = 0; i < N; ++i)
{
T d = v1[i] - v2[i];
norm = norm + d * d;
}
return sqrt(norm);
}
template float h_l2_norm<float>(float *, float *, const int);
template double h_l2_norm<double>(double *, double *, const int);
/*******************************/
/* LINEAR COMBINATION FUNCTION */
/*******************************/
void linearCombination(const float * __restrict__ d_coeff, const float * __restrict__ d_basis_functions_real, float * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const hipblasHandle_t handle) {
float alpha = 1.f;
float beta = 0.f;
cublasSafeCall(hipblasSgemv(handle, HIPBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
void linearCombination(const double * __restrict__ d_coeff, const double * __restrict__ d_basis_functions_real, double * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const hipblasHandle_t handle) {
double alpha = 1.;
double beta = 0.;
cublasSafeCall(hipblasDgemv(handle, HIPBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
/******************************/
/* ADD A CONSTANT TO A VECTOR */
/******************************/
#define BLOCKSIZE_VECTORADDCONSTANT 256
template<class T>
__global__ void vectorAddConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] += scalar;
}
template<class T>
void vectorAddConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorAddConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORADDCONSTANT), BLOCKSIZE_VECTORADDCONSTANT >> >(d_in, scalar, N);
}
template void vectorAddConstant<float>(float * __restrict__, const float, const int);
template void vectorAddConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - GPU */
/*****************************************/
#define BLOCKSIZE_VECTORMULCONSTANT 256
template<class T>
__global__ void vectorMulConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] *= scalar;
}
template<class T>
void vectorMulConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorMulConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORMULCONSTANT), BLOCKSIZE_VECTORMULCONSTANT >> >(d_in, scalar, N);
}
template void vectorMulConstant<float>(float * __restrict__, const float, const int);
template void vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - CPU */
/*****************************************/
template<class T>
void h_vectorMulConstant(T * __restrict__ h_in, const T scalar, const int N) {
for (int i = 0; i < N; i++) h_in[i] *= scalar;
}
template void h_vectorMulConstant<float>(float * __restrict__, const float, const int);
template void h_vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************************/
/* FUSED MULTIPLY ADD OPERATIONS FOR HOST AND DEVICE */
/*****************************************************/
template<class T>
__host__ __device__ T fma2(T x, T y, T z) { return x * y + z; }
template float fma2<float >(float, float, float);
template double fma2<double>(double, double, double);
/*******************/
/* MODULO FUNCTION */
/*******************/
__device__ int modulo(int val, int _mod)
{
int P;
if (val > 0) { (!(_mod & (_mod - 1)) ? P = val&(_mod - 1) : P = val % (_mod)); return P; }
else
{
(!(_mod & (_mod - 1)) ? P = (-val)&(_mod - 1) : P = (-val) % (_mod));
if (P > 0) return _mod - P;
else return 0;
}
}
/***************************************/
/* ATOMIC ADDITION FUNCTION ON DOUBLES */
/***************************************/
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
register unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/*********************************/
/* ATOMIC MIN FUNCTION ON FLOATS */
/*********************************/
__device__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*)address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
/*********************/
/* DEGREE TO RADIANS */
/*********************/
double deg2rad(double deg) { return deg*PI_R / 180; }
/*********************/
/* CUDA MEMORY USAGE */
/*********************/
void cudaMemoryUsage() {
size_t free_byte;
size_t total_byte;
gpuErrchk(hipMemGetInfo(&free_byte, &total_byte));
double free_db = (double)free_byte;
double total_db = (double)total_byte;
double used_db = total_db - free_db;
printf("GPU memory: used = %f, free = %f MB, total available = %f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0);
} | 822e6a4e3fe27f452ef5375bf3dc4d920591918d.cu | #include <stdio.h>
#include <assert.h>
//#include <math.h>
#include "cuda_runtime.h"
#include <cuda.h>
#include <cusolverDn.h>
#include <cublas_v2.h>
#include <cufft.h>
#include "Utilities.cuh"
#define DEBUG
#define PI_R 3.14159265358979323846f
/*******************/
/* iDivUp FUNCTION */
/*******************/
//extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
__host__ __device__ int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
extern "C" void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/**************************/
/* CUSOLVE ERROR CHECKING */
/**************************/
static const char *_cusolverGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %d, error: %s \nterminating!\n", __FILE__, __LINE__, \
_cusolverGetErrorEnum(err)); \
assert(0); \
}
}
extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); }
/*************************/
/* CUBLAS ERROR CHECKING */
/*************************/
static const char *_cublasGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
inline void __cublasSafeCall(cublasStatus_t err, const char *file, const int line)
{
if (CUBLAS_STATUS_SUCCESS != err) {
fprintf(stderr, "CUBLAS error in file '%s', line %d, error: %s\nterminating!\n", __FILE__, __LINE__, \
_cublasGetErrorEnum(err)); \
assert(0); \
}
}
extern "C" void cublasSafeCall(cublasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); }
/************************/
/* CUFFT ERROR CHECKING */
/************************/
// See http://stackoverflow.com/questions/16267149/cufft-error-handling
static const char *_cudaGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS - The cuFFT operation was successful";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN - cuFFT was passed an invalid plan handle";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED - cuFFT failed to allocate GPU or CPU memory";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE - No longer used";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE - User specified an invalid pointer or parameter";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR - Driver or internal cuFFT library error";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED - Failed to execute an FFT on the GPU";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED - The cuFFT library failed to initialize";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE - User specified an invalid transform size";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA - No longer used";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST - Missing parameters in call";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE - Execution of a plan was on different GPU than plan creation";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR - Internal plan database error";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE - No workspace has been provided prior to plan execution";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED - Function does not implement functionality for parameters given";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR - Used in previous versions";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED - Operation is not supported for parameters given";
}
return "<unknown>";
}
// --- CUFFTSAFECALL
inline void cufftAssert(cufftResult err, const char *file, const int line, bool abort = true)
{
if (CUFFT_SUCCESS != err) {
fprintf(stderr, "CUFFTassert: Error nr. %d - %s %s %d\n", err, _cudaGetErrorEnum(err), __FILE__, __LINE__);
if (abort) exit(err);
}
}
extern "C" void cufftSafeCall(cufftResult err) { cufftAssert(err, __FILE__, __LINE__); }
/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(cusparseStatus_t error)
{
switch (error)
{
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "CUSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void __cusparseSafeCall(cusparseStatus_t err, const char *file, const int line)
{
if (CUSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %d, error %s\nterminating!\n", __FILE__, __LINE__, \
_cusparseGetErrorEnum(err)); \
assert(0); \
}
}
extern "C" void cusparseSafeCall(cusparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
#define BLOCKSIZE_REVERSE 256
// --- Credit to http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2
template <class T>
__global__ void reverseArrayKernel(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a)
{
// --- Credit to the simpleTemplates CUDA sample
SharedMemory<T> smem;
T* s_data = smem.getPointer();
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int id = threadIdx.x;
const int offset = blockDim.x * (blockIdx.x + 1);
// --- Load one element per thread from device memory and store it *in reversed order* into shared memory
if (tid < N) s_data[BLOCKSIZE_REVERSE - (id + 1)] = a * d_in[tid];
// --- Block until all threads in the block have written their data to shared memory
__syncthreads();
// --- Write the data from shared memory in forward order
if ((N - offset + id) >= 0) d_out[N - offset + id] = s_data[threadIdx.x];
}
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
template <class T>
void reverseArray(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) {
reverseArrayKernel << <iDivUp(N, BLOCKSIZE_REVERSE), BLOCKSIZE_REVERSE, BLOCKSIZE_REVERSE * sizeof(T) >> >(d_in, d_out, N, a);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void reverseArray<float>(const float * __restrict__, float * __restrict__, const int, const float);
template void reverseArray<double>(const double * __restrict__, double * __restrict__, const int, const double);
/********************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION KERNEL */
/********************************************************/
#define BLOCKSIZE_CART2POL 256
template <class T>
__global__ void Cartesian2PolarKernel(const T * __restrict__ d_x, const T * __restrict__ d_y, T * __restrict__ d_rho, T * __restrict__ d_theta,
const int N, const T a) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
d_rho[tid] = a * hypot(d_x[tid], d_y[tid]);
d_theta[tid] = atan2(d_y[tid], d_x[tid]);
}
}
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - GPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> Cartesian2Polar(const T * __restrict__ d_x, const T * __restrict__ d_y, const int N, const T a) {
//
// T *d_rho; gpuErrchk(cudaMalloc((void**)&d_rho, N * sizeof(T)));
// T *d_theta; gpuErrchk(cudaMalloc((void**)&d_theta, N * sizeof(T)));
//
// Cartesian2PolarKernel<<<iDivUp(N, BLOCKSIZE_CART2POL), BLOCKSIZE_CART2POL>>>(d_x, d_y, d_rho, d_theta, N, a);
//#ifdef DEBUG
// gpuErrchk(cudaPeekAtLastError());
// gpuErrchk(cudaDeviceSynchronize());
//#endif
//
// return thrust::make_pair(d_rho, d_theta);
//}
//
//template thrust::pair<float *, float *> Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - CPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> h_Cartesian2Polar(const T * __restrict__ h_x, const T * __restrict__ h_y, const int N, const T a) {
//
// T *h_rho = (T *)malloc(N * sizeof(T));
// T *h_theta = (T *)malloc(N * sizeof(T));
//
// for (int i = 0; i < N; i++) {
// h_rho[i] = a * hypot(h_x[i], h_y[i]);
// h_theta[i] = atan2(h_y[i], h_x[i]);
// }
//
// return thrust::make_pair(h_rho, h_theta);
//}
//
//template thrust::pair<float *, float *> h_Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> h_Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************/
/* COMPUTE L2 NORM OF A VECTOR */
/*******************************/
template<class T>
T h_l2_norm(T *v1, T *v2, const int N) {
T norm = (T)0;
for (int i = 0; i < N; ++i)
{
T d = v1[i] - v2[i];
norm = norm + d * d;
}
return sqrt(norm);
}
template float h_l2_norm<float>(float *, float *, const int);
template double h_l2_norm<double>(double *, double *, const int);
/*******************************/
/* LINEAR COMBINATION FUNCTION */
/*******************************/
void linearCombination(const float * __restrict__ d_coeff, const float * __restrict__ d_basis_functions_real, float * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const cublasHandle_t handle) {
float alpha = 1.f;
float beta = 0.f;
cublasSafeCall(cublasSgemv(handle, CUBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
void linearCombination(const double * __restrict__ d_coeff, const double * __restrict__ d_basis_functions_real, double * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const cublasHandle_t handle) {
double alpha = 1.;
double beta = 0.;
cublasSafeCall(cublasDgemv(handle, CUBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
/******************************/
/* ADD A CONSTANT TO A VECTOR */
/******************************/
#define BLOCKSIZE_VECTORADDCONSTANT 256
template<class T>
__global__ void vectorAddConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] += scalar;
}
template<class T>
void vectorAddConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorAddConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORADDCONSTANT), BLOCKSIZE_VECTORADDCONSTANT >> >(d_in, scalar, N);
}
template void vectorAddConstant<float>(float * __restrict__, const float, const int);
template void vectorAddConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - GPU */
/*****************************************/
#define BLOCKSIZE_VECTORMULCONSTANT 256
template<class T>
__global__ void vectorMulConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] *= scalar;
}
template<class T>
void vectorMulConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorMulConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORMULCONSTANT), BLOCKSIZE_VECTORMULCONSTANT >> >(d_in, scalar, N);
}
template void vectorMulConstant<float>(float * __restrict__, const float, const int);
template void vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - CPU */
/*****************************************/
template<class T>
void h_vectorMulConstant(T * __restrict__ h_in, const T scalar, const int N) {
for (int i = 0; i < N; i++) h_in[i] *= scalar;
}
template void h_vectorMulConstant<float>(float * __restrict__, const float, const int);
template void h_vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************************/
/* FUSED MULTIPLY ADD OPERATIONS FOR HOST AND DEVICE */
/*****************************************************/
template<class T>
__host__ __device__ T fma2(T x, T y, T z) { return x * y + z; }
template float fma2<float >(float, float, float);
template double fma2<double>(double, double, double);
/*******************/
/* MODULO FUNCTION */
/*******************/
__device__ int modulo(int val, int _mod)
{
int P;
if (val > 0) { (!(_mod & (_mod - 1)) ? P = val&(_mod - 1) : P = val % (_mod)); return P; }
else
{
(!(_mod & (_mod - 1)) ? P = (-val)&(_mod - 1) : P = (-val) % (_mod));
if (P > 0) return _mod - P;
else return 0;
}
}
/***************************************/
/* ATOMIC ADDITION FUNCTION ON DOUBLES */
/***************************************/
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
register unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/*********************************/
/* ATOMIC MIN FUNCTION ON FLOATS */
/*********************************/
__device__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*)address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
/*********************/
/* DEGREE TO RADIANS */
/*********************/
double deg2rad(double deg) { return deg*PI_R / 180; }
/*********************/
/* CUDA MEMORY USAGE */
/*********************/
void cudaMemoryUsage() {
size_t free_byte;
size_t total_byte;
gpuErrchk(cudaMemGetInfo(&free_byte, &total_byte));
double free_db = (double)free_byte;
double total_db = (double)total_byte;
double used_db = total_db - free_db;
printf("GPU memory: used = %f, free = %f MB, total available = %f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0);
} |
MatKernel.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/reverse.h>
#include <thrust/reduce.h>
#include <thrust/merge.h>
#include <thrust/fill.h>
#if __CUDA_ARCH__ > 200
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
__device__ float op_add(float a, float b) {return a+b;}
__device__ float op_sub(float a, float b) {return a-b;}
__device__ float op_mul(float a, float b) {return a*b;}
__device__ float op_div(float a, float b) {return a/b;}
__device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;}
__device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;}
__device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;}
__device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;}
__device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;}
__device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;}
__device__ float op_max(float a, float b) {return max(a,b);}
__device__ float op_min(float a, float b) {return min(a,b);}
__device__ float op_atan2(float a, float b) {return atan2f(a, b);}
__device__ float op_pow(float a, float b) {return powf(a, b);}
__device__ int iop_add(int a, int b) {return a+b;}
__device__ int iop_sub(int a, int b) {return a-b;}
__device__ int iop_mul(int a, int b) {return a*b;}
__device__ int iop_div(int a, int b) {return a/b;}
__device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;}
__device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;}
__device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;}
__device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;}
__device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;}
__device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;}
__device__ int iop_max(int a, int b) {return max(a,b);}
__device__ int iop_min(int a, int b) {return min(a,b);}
__device__ long long lop_add(long long a, long long b) {return a+b;}
__device__ long long lop_sub(long long a, long long b) {return a-b;}
__device__ long long lop_mul(long long a, long long b) {return a*b;}
__device__ long long lop_div(long long a, long long b) {return a/b;}
__device__ long long lop_gt(long long a, long long b) {return (a > b) ? 1 : 0;}
__device__ long long lop_lt(long long a, long long b) {return (a < b) ? 1 : 0;}
__device__ long long lop_eq(long long a, long long b) {return (a == b) ? 1 : 0;}
__device__ long long lop_ge(long long a, long long b) {return (a >= b) ? 1 : 0;}
__device__ long long lop_le(long long a, long long b) {return (a <= b) ? 1 : 0;}
__device__ long long lop_ne(long long a, long long b) {return (a != b) ? 1 : 0;}
__device__ long long lop_max(long long a, long long b) {return max(a,b);}
__device__ long long lop_min(long long a, long long b) {return max(a,b);}
typedef float (*optype)(float,float);
typedef int (*ioptype)(int,int);
typedef long long (*loptype)(long long,long long);
// Check reducevec if these ever get changed.
__device__ const optype operators[] = {
op_add,
op_sub,
op_mul,
op_div,
op_gt,
op_lt,
op_eq,
op_ge,
op_le,
op_ne,
op_max,
op_min,
op_atan2,
op_pow};
__device__ const ioptype ioperators[] = {
iop_add,
iop_sub,
iop_mul,
iop_div,
iop_gt,
iop_lt,
iop_eq,
iop_ge,
iop_le,
iop_ne,
iop_max,
iop_min};
__device__ const loptype loperators[] = {
lop_add,
lop_sub,
lop_mul,
lop_div,
lop_gt,
lop_lt,
lop_eq,
lop_ge,
lop_le,
lop_ne,
lop_max,
lop_min};
__device__ float fn_abs(float a) {return abs(a);}
__device__ float fn_exp(float a) {return expf(a);}
__device__ float fn_log(float a) {return logf(a);}
__device__ float fn_expm1(float a) {return expm1f(a);}
__device__ float fn_sqrt(float a) {return sqrtf(a);}
__device__ float fn_ln(float a) {return logf(a);}
__device__ float fn_log10(float a) {return log10f(a);}
__device__ float fn_log1p(float a) {return log1pf(a);}
__device__ float fn_cos(float a) {return cosf(a);}
__device__ float fn_sin(float a) {return sinf(a);}
__device__ float fn_tan(float a) {return tanf(a);}
__device__ float fn_cosh(float a) {return coshf(a);}
__device__ float fn_sinh(float a) {return sinhf(a);}
__device__ float fn_tanh(float a) {return tanhf(a);}
__device__ float fn_acos(float a) {return acosf(a);}
__device__ float fn_asin(float a) {return asinf(a);}
__device__ float fn_atan(float a) {return atanf(a);}
__device__ float fn_acosh(float a) {return acoshf(a);}
__device__ float fn_asinh(float a) {return asinhf(a);}
__device__ float fn_atanh(float a) {return atanhf(a);}
__device__ float fn_erf(float a) {return erff(a);}
__device__ float fn_erfinv(float a) {return erfinvf(a);}
__device__ float fn_erfc(float a) {return erfcf(a);}
__device__ float fn_erfcinv(float a) {return erfcinvf(a);}
__device__ float fn_gammaln(float a) {return lgammaf(a);}
__device__ float fn_gamma(float a) {return tgammaf(a);}
__device__ float fn_ceil(float a) {return ceilf(a);}
__device__ float fn_floor(float a) {return floorf(a);}
__device__ float fn_round(float a) {return roundf(a);}
__device__ float fn_trunc(float a) {return truncf(a);}
__device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);}
__device__ float fn_j0(float a) {return j0f(a);}
__device__ float fn_j1(float a) {return j1f(a);}
//__device__ float fn_jn(float a) {return jnf(a);}
__device__ float fn_y0(float a) {return y0f(a);}
__device__ float fn_y1(float a) {return y1f(a);}
//__device__ float fn_yn(float a) {return ynf(a);}
__device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;}
__device__ float fn_atan2(float a, float b) {return atan2f(a, b);}
__device__ float fn_pow(float a, float b) {return powf(a, b);}
typedef float (*fntype)(float);
__device__ const fntype fctns[35] = {
fn_abs,
fn_exp,
fn_expm1,
fn_sqrt,
fn_ln,
fn_log10,
fn_log1p,
fn_cos,
fn_sin,
fn_tan,
fn_cosh,
fn_sinh,
fn_tanh,
fn_acos,
fn_asin,
fn_atan,
fn_acosh,
fn_asinh,
fn_atanh,
fn_erf,
fn_erfinv,
fn_erfc,
fn_erfcinv,
fn_gammaln,
fn_gamma,
fn_ceil,
fn_floor,
fn_round,
fn_trunc,
fn_sign,
fn_j0,
fn_j1,
fn_y0,
fn_y1,
fn_exppsi};
__device__ const optype fctns2[2] = {
fn_atan2,
fn_pow};
__global__ void __apply_gfun(float *A, float *B, int N, int opn) {
fntype fn = fctns[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = fn(A[i]);
}
}
void setsizes(int N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
int apply_gfun(float *A, float *B, int N, int opn) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_gfun), dim3(griddims),dim3(nthreads), 0, 0, A, B, N, opn);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __toFloat(int *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __floatToLong(float *A, long long *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __toInt(float *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
}
int toFloat(int *A, float *B, int N) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __toFloat), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int longToFloat(long long *A, float *B, int N) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __longToFloat), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int floatToLong(float *A, long long *B, int N) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __floatToLong), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int toInt(float *A, int *B, int N) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __toInt), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) {
int i, row, col;
float v;
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (i = id; i < nnz; i += blockDim.x * gridDim.x) {
v = data[i];
row = ir[i];
col = ic[i];
od[row + col * nrows] = v;
}
}
int full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) {
int nblocks = min(32, 1+(nnz-1)/32);
int nthreads = min(1+(nnz-1)/nblocks, 1024);
hipLaunchKernelGGL(( __full), dim3(nblocks),dim3(nthreads), 0, 0, ir, ic, data, od, nrows, ncols, nnz);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __initSeq(int *A, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = i % nrows;
}
}
int initSeq(int *A, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __initSeq), dim3(griddims),dim3(nthreads), 0, 0, A, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_gfun2(float *A, float *B, float *C, int N, int opn) {
optype fn = fctns2[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = fn(A[i], B[i]);
}
}
int apply_gfun2(float *A, float *B, float *C, int N, int opn) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_gfun2), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_full(float *A, float *B, float *C, int N, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
float val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
float val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(val,B[i]);
}
}
__global__ void __set_val(float *A, float val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
__global__ void __set_lval(long long *A, long long val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
int set_val(float *A, float val, int length) {
int nthreads;
dim3 griddims;
setsizes(length, &griddims, &nthreads);
hipLaunchKernelGGL(( __set_val), dim3(griddims),dim3(nthreads), 0, 0, A, val, length);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int set_ival(float *A, int val, int length) {
int nthreads;
dim3 griddims;
setsizes(length, &griddims, &nthreads);
hipLaunchKernelGGL(( __set_val), dim3(griddims),dim3(nthreads), 0, 0, A, *((float *)&val), length);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int set_lval(long long *A, long long val, int length) {
int nthreads;
dim3 griddims;
setsizes(length, &griddims, &nthreads);
hipLaunchKernelGGL(( __set_lval), dim3(griddims),dim3(nthreads), 0, 0, A, val, length);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int apply_binop(float *A, int Anrows, int Ancols,
float *B, int Bnrows, int Bncols, float *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
if (Anrows == Bnrows && Ancols == Bncols) {
hipLaunchKernelGGL(( __apply_full), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_col), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
hipLaunchKernelGGL(( __apply_right_row), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_col), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
hipLaunchKernelGGL(( __apply_left_row), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_val), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_val), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __sdoprow(int nrows, int ncols, int nnz, float *A, int *Aic, float *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
int col = Aic[i];
float oldA = A[i];
A[i] = op(oldA,B[col]);
}
}
__global__ void __sdopcol(int nrows, int ncols, int nnz, float *A, int *Air, float *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
int row = Air[i];
float oldA = A[i];
A[i] = op(oldA,B[row]);
}
}
__global__ void __sdopval(int nnz, float *A, float *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
float bval = B[0];
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
float oldA = A[i];
A[i] = op(oldA,bval);
}
}
int sdoprow(int nrows, int ncols, int nnz, float *A, int *Aic,
float *B, int len, int opn) {
int nthreads;
dim3 griddims;
setsizes(nnz, &griddims, &nthreads);
if (len > 1) {
hipLaunchKernelGGL(( __sdoprow), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, nnz, A, Aic, B, opn);
} else {
hipLaunchKernelGGL(( __sdopval), dim3(griddims),dim3(nthreads), 0, 0, nnz, A, B, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int sdopcol(int nrows, int ncols, int nnz, float *A, int *Air,
float *B, int len, int opn) {
int nthreads;
dim3 griddims;
setsizes(nnz, &griddims, &nthreads);
if (len > 1) {
hipLaunchKernelGGL(( __sdopcol), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, nnz, A, Air, B, opn);
} else {
hipLaunchKernelGGL(( __sdopval), dim3(griddims),dim3(nthreads), 0, 0, nnz, A, B, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_full_int(int *A, int *B, int *C, int N, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(val,B[i]);
}
}
__global__ void __apply_full_long(long long *A, long long *B, long long *C, int N, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(val,B[i]);
}
}
int apply_biniop(int *A, int Anrows, int Ancols,
int *B, int Bnrows, int Bncols,
int *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
if (Anrows == Bnrows && Ancols == Bncols) {
hipLaunchKernelGGL(( __apply_full_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_col_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
hipLaunchKernelGGL(( __apply_right_row_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_col_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
hipLaunchKernelGGL(( __apply_left_row_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_val_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_val_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int apply_binlop(long long *A, int Anrows, int Ancols,
long long *B, int Bnrows, int Bncols,
long long *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
if (Anrows == Bnrows && Ancols == Bncols) {
hipLaunchKernelGGL(( __apply_full_long), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_col_long), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
hipLaunchKernelGGL(( __apply_right_row_long), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_col_long), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
hipLaunchKernelGGL(( __apply_left_row_long), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_val_long), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_val_long), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
// Implement B[I,J] = A
// indexed copy: version with one block per column
#define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyToInds2D##DFNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A[i + iblock * lda]; \
} \
} \
}
COPYTOINDS2DA(nn,I[i],J[iblock],float)
COPYTOINDS2DA(xn,i,J[iblock],float)
COPYTOINDS2DA(nx,I[i],iblock,float)
COPYTOINDS2DA(xx,i,iblock,float)
COPYTOINDS2DA(nnl,I[i],J[iblock],long long)
COPYTOINDS2DA(xnl,i,J[iblock],long long)
COPYTOINDS2DA(nxl,I[i],iblock,long long)
COPYTOINDS2DA(xxl,i,iblock,long long)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyToInds2DB##DFNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \
} \
}
COPYTOINDS2DB(nn,I[irow],J[icol],float)
COPYTOINDS2DB(xn,irow,J[icol],float)
COPYTOINDS2DB(nx,I[irow],icol,float)
COPYTOINDS2DB(xx,irow,icol,float)
COPYTOINDS2DB(nnl,I[irow],J[icol],long long)
COPYTOINDS2DB(xnl,irow,J[icol],long long)
COPYTOINDS2DB(nxl,I[irow],icol,long long)
COPYTOINDS2DB(xxl,irow,icol,long long)
// Implement B[I,J] = A
int copyToInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = min(len, max(32, min(1024, nrows)));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int copyToInds2DLong(long long *A, int lda, long long *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = min(len, max(32, min(1024, nrows)));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2Dxxl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2Dxnl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2Dnxl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2Dnnl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2DBxxl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2DBxnl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2DBnxl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2DBnnl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
// Implement B = A[I,J]
// indexed copy: version with one block per column
#define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyFromInds2D##FNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[i + iblock * ldb] = A[IEXPR + icol * lda]; \
} \
} \
}
COPYFROMINDS2DA(nn,I[i],J[iblock],float)
COPYFROMINDS2DA(xn,i,J[iblock],float)
COPYFROMINDS2DA(nx,I[i],iblock,float)
COPYFROMINDS2DA(xx,i,iblock,float)
COPYFROMINDS2DA(nnl,I[i],J[iblock],long long)
COPYFROMINDS2DA(xnl,i,J[iblock],long long)
COPYFROMINDS2DA(nxl,I[i],iblock,long long)
COPYFROMINDS2DA(xxl,i,iblock,long long)
// Implement B = A[I,J]
// indexed copy: version with one thread per element
#define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyFromInds2DB##FNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \
} \
}
COPYFROMINDS2DB(nn,I[irow],J[icol],float)
COPYFROMINDS2DB(xn,irow,J[icol],float)
COPYFROMINDS2DB(nx,I[irow],icol,float)
COPYFROMINDS2DB(xx,irow,icol,float)
COPYFROMINDS2DB(nnl,I[irow],J[icol],long long)
COPYFROMINDS2DB(xnl,irow,J[icol],long long)
COPYFROMINDS2DB(nxl,I[irow],icol,long long)
COPYFROMINDS2DB(xxl,irow,icol,long long)
// Implement B = A[I,J]
int copyFromInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = min(len, max(32, min(1024, nrows)));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int copyFromInds2DLong(long long *A, int lda, long long *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = min(len, max(32, min(1024, nrows)));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2Dxxl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2Dxnl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2Dnxl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2Dnnl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2DBxxl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2DBxnl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2DBnxl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2DBnnl), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmult(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
__global__ void __dsmultx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum);
sum = 0;
}
}
}
int dsmult(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dsmult_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreads) {
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dsmultx_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreadsx, int nthreadsy) {
dim3 threadDim(nthreadsx, nthreadsy, 1);
hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
__global__ void __dsmultTx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[threadIdx.x + nrows * Bic[j]];
}
atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]);
}
}
int dsmultT(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
hipLaunchKernelGGL(( __dsmultTx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __dsmultT), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Aic[i]], P[i]);
}
}
__global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Air[i]], P[i]);
}
}
int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B, int n) {
int nthreads = min(128, nnz);
int nblks = min(65536, max(1, (nnz-1) / 128));
if (n == 1) {
hipLaunchKernelGGL(( __spsum1), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B);
} else {
hipLaunchKernelGGL(( __spsum2), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn);
__global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr);
#define DDS_BLKY 32
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
float tmp = __shfl_down(sum, i);
if (threadIdx.x + i < blockDim.x) sum = sum + tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&P[j], sum);
}
}
}
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {
__shared__ float merge[32];
int jstart = ((long long)blockIdx.x) * ncols / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int aoff, boff;
float user, prod, sum, bsum;
for (int j0 = jstart; j0 < jend ; j0++) {
boff = nrows * j0;
user = B[tid + boff];
for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) {
aoff = nrows * Cir[j];
prod = A[tid + aoff] * user;
sum = prod + __shfl_down(prod, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
bsum = __shfl(sum, 0);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
merge[threadIdx.x] = bsum;
}
__syncthreads();
if (threadIdx.y == 0) {
sum = merge[threadIdx.x];
sum = sum + __shfl_down(sum, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
}
}
#else
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
__shared__ float parts[32*DDS_BLKY];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
parts[tid] = sum;
for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) {
__syncthreads();
if (i + tid < blockDim.x * blockDim.y) {
parts[tid] = parts[tid] + parts[i + tid];
}
}
__syncthreads();
if (tid == 0) {
P[j] = parts[0];
}
__syncthreads();
}
}
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {}
#endif
int dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,nnz/128));
hipLaunchKernelGGL(( __dds), dim3(nblocks),dim3(blockDims), 0, 0, nrows, nnz, A, B, Cir, Cic, P);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P) {
dim3 blockDims(32, 32, 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,ncols/64));
hipLaunchKernelGGL(( __dds0), dim3(nblocks),dim3(blockDims), 0, 0, nrows, ncols, A, B, Cir, Cic, P);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
optype op = operators[opn];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = A[threadIdx.x + icol * nrows];
for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) {
v = op(v, A[i + icol * nrows]);
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = op(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
B[icol] = v;
}
}
}
#else
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
__shared__ float parts[32][33];
optype op = operators[opn];
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = A[threadIdx.x + icol * nrows];
for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
B[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
template<typename T>
void reducevec(int n, T *A, T *B, int opn) {
thrust::device_ptr<T> pa(A);
thrust::device_ptr<T> pb(B);
T v;
switch (opn) {
case 0 : // sum
v = thrust::reduce(pa, pa + n);
thrust::fill(pb, pb + 1, v);
break;
case 10 : // max
v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::min(), thrust::maximum<T>());
thrust::fill(pb, pb + 1, v);
break;
case 11: // min
v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::max(), thrust::minimum<T>());
thrust::fill(pb, pb + 1, v);
break;
}
}
int reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
if (ncols == 1) {
reducevec<float>(nrows, A, B, opn);
} else {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
hipLaunchKernelGGL(( __reduce1op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
__global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
optype opbf = operators[opb];
optype oprf = operators[opr];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = 0;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows]));
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = oprf(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
C[icol] = v;
}
}
}
#else
__global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
__shared__ float parts[32][33];
optype opbf = operators[opb];
optype oprf = operators[opr];
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = 0;
for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) {
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows]));
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
C[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
int reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
hipLaunchKernelGGL(( __reducebin1op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, C, opb, opr);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define BLOCKDIM 32
__global__ void __transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int gridx = min(32, 1+(nrows-1)/256);
int gridy = min(32, 1+(ncols-1)/256);
const dim3 griddims(gridx, gridy, 1);
const dim3 blockdims(BLOCKDIM,16,1);
hipError_t err;
int dev = -1;
hipGetDevice(&dev);
hipLaunchKernelGGL(( __transpose), dim3(griddims),dim3(blockdims), 0, 0, in, instride, out, outstride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols);
return err;
}
return 0;
}
__global__ void __reduce2op(int nrows, int ncols, float *A, float *B, int opn) {
__shared__ float parts[32][33];
optype op = operators[opn];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
float v = A[irow + threadIdx.y * nrows];
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
float newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
B[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reduce2op(int nrows, int ncols, float *A, float *B, int opn) {
if (nrows == 1) {
reducevec<float>(ncols, A, B, opn);
} else {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
hipLaunchKernelGGL(( __reduce2op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __reducebin2op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
__shared__ float parts[32][33];
optype opbf = operators[opb];
optype oprf = operators[opr];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
float v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]);
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows]));
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
float newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
C[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reducebin2op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
hipLaunchKernelGGL(( __reducebin2op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, C, opb, opr);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __embedmat2d(float *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
float v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
b[i] = (long long)vi + (((long long)(i/nrows+1))<<32);
}
}
__global__ void __embedmat(float *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
float v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
c[i] = (long long)vi + (((long long)b[i])<<32);
}
}
int embedmat2d(float *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __embedmat2d), dim3(griddims),dim3(nthreads), 0, 0, a, b, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int embedmat(float *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizes(n, &griddims, &nthreads);
hipLaunchKernelGGL(( __embedmat), dim3(griddims),dim3(nthreads), 0, 0, a, b, c, n);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __extractmat2d(float *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&b[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((float *)&vi);
}
}
__global__ void __extractmat(float *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((float *)&vi);
b[i] = *(((int *)&c[i])+1);
}
}
int extractmat2d(float *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __extractmat2d), dim3(griddims),dim3(nthreads), 0, 0, a, b, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int extractmat(float *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizes(n, &griddims, &nthreads);
hipLaunchKernelGGL(( __extractmat), dim3(griddims),dim3(nthreads), 0, 0, a, b, c, n);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int fsort2d(float *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) {
for (int i = 0; i < ncols; i++) {
thrust::device_ptr<float> keys(pkeys+i*nrows);
thrust::device_ptr<unsigned int> vals(pvals+i*nrows);
if (asc > 0) {
thrust::sort_by_key(keys, keys + nrows, vals);
} else {
thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<float>());
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int isort(int *pkeys, int N, int asc) {
thrust::device_ptr<int> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<int>());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int fsort(float *pkeys, int N, int asc) {
thrust::device_ptr<float> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<int>());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int isortk(int *pkeys, unsigned int *pvals, int N, int asc) {
thrust::device_ptr<int> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
if (asc > 0) {
thrust::sort_by_key(keys, keys + N, vals);
} else {
thrust::sort_by_key(keys, keys + N, vals, thrust::greater<int>());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int fsorts(float *pkeys, unsigned int *pvals, int *jc, int m, int asc) {
for (int i = 0; i < m; i++) {
thrust::device_ptr<float> keys(pkeys + jc[i]);
thrust::device_ptr<unsigned int> vals(pvals + jc[i]);
int b = jc[i+1] - jc[i];
if (asc > 0) {
thrust::sort_by_key(keys, keys + b, vals);
} else {
thrust::sort_by_key(keys, keys + b, vals, thrust::greater<float>());
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dsortk(double *pkeys, unsigned int *pvals, int N, int asc) {
thrust::device_ptr<double> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
if (asc > 0) {
thrust::sort_by_key(keys, keys + N, vals);
} else {
thrust::sort_by_key(keys, keys + N, vals, thrust::greater<double>());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int lsortk(long long *pkeys, unsigned int *pvals, int N, int asc) {
thrust::device_ptr<long long> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
if (asc > 0) {
thrust::sort_by_key(keys, keys + N, vals);
} else {
thrust::sort_by_key(keys, keys + N, vals, thrust::greater<long long>());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int lsort(long long *pkeys, int N, int asc) {
thrust::device_ptr<long long> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<long long>());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
typedef struct lll {
int x;
int y;
int z;
int w;
} lllint;
struct cmp_lllint_key_asc
{
__host__ __device__ inline bool operator()(const lllint &lhs, const lllint &rhs) const
{
if (lhs.x < rhs.x) return true;
if (lhs.x > rhs.x) return false;
if (lhs.y < rhs.y) return true;
if (lhs.y > rhs.y) return false;
if (lhs.z < rhs.z) return true;
if (lhs.z > rhs.z) return false;
return (lhs.w < rhs.w);
}
};
struct cmp_lllint_key_desc
{
__host__ __device__ inline bool operator()(const lllint &lhs, const lllint &rhs) const
{
if (lhs.x > rhs.x) return true;
if (lhs.x < rhs.x) return false;
if (lhs.y > rhs.y) return true;
if (lhs.y < rhs.y) return false;
if (lhs.z > rhs.z) return true;
if (lhs.z < rhs.z) return false;
return (lhs.w > rhs.w);
}
};
int i4sort(int *pkeys0, int N, int asc) {
lllint *pkeys = (lllint *)pkeys0;
thrust::device_ptr<lllint> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N, cmp_lllint_key_asc());
} else {
thrust::sort(keys, keys + N, cmp_lllint_key_desc());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
typedef struct i3 {
int x;
int y;
int z;
} i3struct;
struct cmp_i3struct_key_asc
{
__host__ __device__ inline bool operator()(const i3struct &lhs, const i3struct &rhs) const
{
if (lhs.x < rhs.x) return true;
if (lhs.x > rhs.x) return false;
if (lhs.y < rhs.y) return true;
if (lhs.y > rhs.y) return false;
return (lhs.z < rhs.z);
}
};
struct cmp_i3struct_key_desc
{
__host__ __device__ inline bool operator()(const i3struct &lhs, const i3struct &rhs) const
{
if (lhs.x > rhs.x) return true;
if (lhs.x < rhs.x) return false;
if (lhs.y > rhs.y) return true;
if (lhs.y < rhs.y) return false;
return (lhs.z > rhs.z);
}
};
int i3sortk(int *pkeys0, unsigned int *pvals, int N, int asc) {
i3struct *pkeys = (i3struct *)pkeys0;
thrust::device_ptr<i3struct> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
if (asc > 0) {
thrust::sort_by_key(keys, keys + N, vals, cmp_i3struct_key_asc());
} else {
thrust::sort_by_key(keys, keys + N, vals, cmp_i3struct_key_desc());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
// This path may break. If so look for radixsort_api.h in /usr/local/cuda/include
// and fix the path below.
using namespace thrust::system::cuda::detail::detail::b40c_thrust;
int fsortsizex(int N) {
RadixSortingEnactor<float,unsigned int> sorter(N);
return sorter.SpineElements();
}
int lsortsizex(int N) {
RadixSortingEnactor<long long,unsigned int> sorter(N);
return sorter.SpineElements();
}
int fsort2dx(float *pkeys, unsigned int *pvals, float *tkeys, unsigned int *tvals,
int *ispine, bool * bflags, int nrows, int ncols, int asc) {
int i;
hipError_t err;
RadixSortingEnactor<float,unsigned int> sorter(nrows);
RadixSortStorage<float,unsigned int> storage;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
storage.using_alternate_storage = false;
for (i = 0; i < ncols; i++) {
storage.d_keys = pkeys+i*nrows;
storage.d_values = pvals+i*nrows;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
if (asc == 0) {
thrust::device_ptr<float> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+nrows);
thrust::reverse(vals, vals+nrows);
}
hipDeviceSynchronize();
sorter.EnactSort(storage);
hipDeviceSynchronize();
err = hipGetLastError();
if (err > 0) return err;
if (asc == 0) {
thrust::device_ptr<float> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+nrows);
thrust::reverse(vals, vals+nrows);
}
hipDeviceSynchronize();
if (storage.d_keys == tkeys) {
hipMemcpy(pkeys+i*nrows, tkeys, nrows*sizeof(float), hipMemcpyDeviceToDevice);
}
if (storage.d_values == tvals) {
hipMemcpy(pvals+i*nrows, tvals, nrows*sizeof(unsigned int), hipMemcpyDeviceToDevice);
}
}
return err;
}
int lsortx(long long *pkeys, unsigned int *pvals, long long *tkeys, unsigned int *tvals, int *ispine, bool * bflags, int N, int asc) {
RadixSortingEnactor<long long,unsigned int> sorter(N);
RadixSortStorage<long long,unsigned int> storage;
storage.d_keys = pkeys;
storage.d_values = pvals;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
if (asc == 0) {
thrust::device_ptr<long long> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+N);
thrust::reverse(vals, vals+N);
}
hipDeviceSynchronize();
sorter.EnactSort(storage);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (asc == 0) {
thrust::device_ptr<long long> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+N);
thrust::reverse(vals, vals+N);
}
return err;
}
__global__ void __stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) {
__shared__ float ss[32];
__shared__ unsigned int ibin[32];
__shared__ unsigned int ebin[32];
__shared__ unsigned int todo[32];
__shared__ float bins[64][33];
__shared__ unsigned int topush;
int tid = threadIdx.x;
ss[tid] = strata[tid];
ibin[tid] = 0;
for (int i = 0; i < n; i += blockDim.x * gridDim.x) {
int ii = i + tid + blockDim.x * blockIdx.x;
if (tid == 0) topush = 0;
if (ii < n) {
float v = a[ii];
int j = 1;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = j - 32;
int k = atomicInc(&ibin[j], 256);
bins[k][j] = v;
if (k == 31) {
k = atomicInc(&topush, 1024);
todo[k] = j;
}
}
if (ibin[tid] >= 32) {
ebin[tid] = atomicAdd(&bi[tid], 32);
ibin[tid] = ibin[tid] - 32;
}
for (int k = 0; k < topush; k++) {
int j = todo[k];
b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j];
}
}
ebin[tid] = atomicAdd(&bi[tid], ibin[tid]);
for (int j = 0; j < 32; j++) {
if (tid < ibin[j]) {
b[j*stride + ebin[j] + tid] = bins[tid][j];
}
}
}
int stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) {
hipLaunchKernelGGL(( __stratify), dim3(40),dim3(32), 0, 0, strata, n, a, b, bi, stride);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define SNDVALS 256
#define SNDGRPS 4
#define SNTHREADS 1024
#define SBIGBLK (4*1024)
__global__ void __stratifycounts(float *strata, int n, float *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ float ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
float v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
int stratifycounts(float *strata, int n, float *a, unsigned int *bi) {
const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1);
const dim3 griddims(8,1,1);
hipLaunchKernelGGL(( __stratifycounts), dim3(griddims),dim3(blockdims), 0, 0, strata, n, a, bi);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define RNDVALS 256
#define RNTHREADS 256
#define RNDBITS 8
#define RBIGBLK (4*1024)
__global__ void __radixcounts(float *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
float v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
}
int radixcounts(float *a, int n, int digit, unsigned int *bi) {
const dim3 blockdims(RNTHREADS,1,1);
const dim3 griddims(32,1,1);
hipLaunchKernelGGL(( __radixcounts), dim3(griddims),dim3(blockdims), 0, 0, a, n, digit, bi);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
#define GENDISTS(DFNAME,DFUNC) \
__global__ void DFNAME(float *A, int lda, float *B, int ldb, float *C, \
int ldc, int d, int nrows, int ncols, float p) { \
int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \
int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \
float va, vb, vc; \
float R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \
R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \
int xi = threadIdx.x + xblk; \
int yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
} \
yi = threadIdx.x + yblk; \
int nbr = (threadIdx.x + 1) % blockDim.x; \
for (int i = 0; i < d; i++) { \
va = (xi < nrows) ? A[xi + i * lda] : 0; \
vb = (yi < ncols) ? B[yi + i * ldb] : 0; \
vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \
vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \
vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \
vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \
vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \
vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \
vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \
vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \
vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \
vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \
vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \
vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \
vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \
vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \
vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \
vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \
} \
yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \
} \
}
GENDISTS(__l1dist,vc+=abs(va-vb))
GENDISTS(__l2dist,vc+=(va-vb)*(va-vb))
GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p))
GENDISTS(__linfdist,vc=max(vc,abs(va-vb)))
GENDISTS(__msum,vc=max(vc,va+vb))
#else
__global__ void __l1dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Lidist not supported on arch <= 200\n");
}
__global__ void __l2dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, L2dist not supported on arch <= 200\n");
}
__global__ void __minkowskidist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Minkowski distance not supported on arch <= 200\n");
}
__global__ void __linfdist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Max-abs distance not supported on arch <= 200\n");
}
__global__ void __msum(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Max-sum multiply not supported on arch <= 200\n");
}
#endif
int dists(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
// hipSetDevice(ithread);
if (p == 0.0f) {
hipLaunchKernelGGL(( __linfdist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 1.0f) {
hipLaunchKernelGGL(( __l1dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 2.0f) {
hipLaunchKernelGGL(( __l2dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else {
hipLaunchKernelGGL(( __minkowskidist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int maxsumx(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
hipLaunchKernelGGL(( __msum), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, 0);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
__shared__ T tots[32];
int start, end, ij;
int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index
T sum, tsum, tmp, ttot, ttot0;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
start = jc[ij] + bid * nrows;
end = jc[ij+1] + bid * nrows;
sum = 0;
for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
tsum = in[i];
tmp = __shfl_up(tsum, 1);
if (threadIdx.x >= 1) tsum += tmp;
tmp = __shfl_up(tsum, 2);
if (threadIdx.x >= 2) tsum += tmp;
tmp = __shfl_up(tsum, 4);
if (threadIdx.x >= 4) tsum += tmp;
tmp = __shfl_up(tsum, 8);
if (threadIdx.x >= 8) tsum += tmp;
tmp = __shfl_up(tsum, 16);
if (threadIdx.x >= 16) tsum += tmp;
ttot = __shfl(tsum, min(end-start-1, 31));
ttot0 = ttot;
__syncthreads();
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
__syncthreads();
for (int k = 1; k < blockDim.y; k *= 2) {
if (threadIdx.y >= k) {
if (threadIdx.x == threadIdx.y - k) {
ttot += tots[threadIdx.x];
}
}
__syncthreads();
if (threadIdx.y >= k) {
ttot = __shfl(ttot, threadIdx.y - k);
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
}
__syncthreads();
}
out[i] = sum + tsum + ttot - ttot0;
if (threadIdx.x == blockDim.y - 1) {
ttot = tots[threadIdx.x];
}
__syncthreads();
ttot = __shfl(ttot, blockDim.y - 1);
sum += ttot;
}
}
}
}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k, start, end, ij;
int bid = blockIdx.y + blockIdx.z * gridDim.y;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
vmax = maxminv;
imax = -1;
start = jc[ij];
end = jc[ij+1];
for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[ij + m * bid] = vmax;
outi[ij + m * bid] = imax;
}
}
}
}
}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
if (bid < ncols) {
vmax = maxminv;
imax = -1;
for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[bid] = vmax;
outi[bid] = imax;
}
}
__syncthreads();
}
}
// Not very fast for wide matrices
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
T vmax, vtmp;
int imax, itmp, i, j;
for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) {
if (ncols > 0) {
vmax = in[i];
imax = 0;
for (j = 1; j < ncols; j++) {
vtmp = in[i + nrows * j];
itmp = j;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
out[i] = vmax;
outi[i] = imax;
}
}
}
#else
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {}
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {}
#endif
void setinds(int ncols, int &nc1, int &nc2) {
if (ncols < 65536) {
nc1 = ncols;
nc2 = 1;
} else {
nc1 = (int)sqrt((double)ncols);
nc2 = 1 + (ncols-1)/nc1;
}
}
template<class T>
int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
int nc1, nc2;
setinds(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __cumsumg<T>), dim3(grid),dim3(tblock), 0, 0, in, out, jc, nrows, ncols, m);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int cumsumgf(float *in, float *out, int *jc, int nrows, int ncols, int m) {
return cumsumg<float>(in, out, jc, nrows, ncols, m);
}
int cumsumgi(int *in, int *out, int *jc, int nrows, int ncols, int m) {
return cumsumg<int>(in, out, jc, nrows, ncols, m);
}
template<class T>
int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {
int nc1, nc2;
setinds(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxming<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, jc, nrows, ncols, m, minv, dir);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
// JFC: problem here ncols a non-multiple of 16, and nrows < 32.
template<class T>
int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {
int nc1, nc2;
setinds(ncols, nc1, nc2);
dim3 grid(nc1, nc2, 1);
int ny = min(32, 1+nrows/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxmini_cols<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, nrows, ncols, minv, dir);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
template<class T>
int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
int nb = min(32,1+nrows/32);
dim3 grid(nb,1,1);
int ny = min(32, 1+nrows/nb/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxmini_rows<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, nrows, ncols, dir);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int maxgf(float *in, float *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<float>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1);
}
int maxgi(int *in, int *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<int>(in, out, outi, jc, nrows, ncols, m, 0x80000000, 1);
}
int mingf(float *in, float *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<float>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0);
}
int mingi(int *in, int *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<int>(in, out, outi, jc, nrows, ncols, m, 0x7fffffff, 0);
}
int maxif(float *in, float *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<float>(in, out, outi, nrows, ncols, -3e38f, 1);
} else if (dir == 2) {
return maxmini_rows<float>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int maxii(int *in, int *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<int>(in, out, outi, nrows, ncols, 0x80000000, 1);
} else if (dir == 2) {
return maxmini_rows<int>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int maxil(long long *in, long long *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<long long>(in, out, outi, nrows, ncols, LLONG_MIN, 1);
} else if (dir == 2) {
return maxmini_rows<long long>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int minif(float *in, float *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<float>(in, out, outi, nrows, ncols, 3e38f, 0);
} else if (dir == 2) {
return maxmini_rows<float>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
int minii(int *in, int *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<int>(in, out, outi, nrows, ncols, 0x7fffffff, 0);
} else if (dir == 2) {
return maxmini_rows<int>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
int minil(long long *in, long long *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<long long>(in, out, outi, nrows, ncols, LLONG_MAX, 0);
} else if (dir == 2) {
return maxmini_rows<long long>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
__global__ void __dmv(float *a, int nrows, int ncols, float *b, float *c) {
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
float accum = 0.0f;
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
accum += a[tx+nrows*ty] * b[ty];
}
atomicAdd(&c[tx], accum);
}
}
#if __CUDA_ARCH__ > 200
__global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
float accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
for (int i = 1; i < blockDim.x; i *= 2) {
float tmp = __shfl_down(accum, i);
if (threadIdx.x + i < blockDim.x) accum += tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&c[ty], accum);
}
}
}
#else
__global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
float accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
atomicAdd(&c[ty], accum);
}
}
#endif
__global__ void __dmv0(float *a, int nrows, int ncols, int tstep, float *b, float *c) {
float accum = 0.0f;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < tstep) {
for (; tx < nrows*ncols; tx += tstep) {
int icol = tx / nrows;
accum += a[tx] * b[icol];
}
int irow = tx % nrows;
atomicAdd(&c[irow], accum);
}
}
int dmv(float *a, int nrows, int ncols, float *b, float *c, int trans) {
if (trans == 1) {
int ntx = min(32, nrows);
int nty = min(32, ncols);
int nbx = min(256, 1 + nrows/ntx/8);
int nby = min(256, 1 + ncols/nty/2);
dim3 blockdims(ntx,nty,1);
dim3 griddims(nbx,nby,1);
hipLaunchKernelGGL(( __dmvt), dim3(griddims),dim3(blockdims), 0, 0, a, nrows, ncols, b, c);
} else {
int ntx = min(1024, nrows*ncols);
int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32);
int tstep = (ntx*nbx/nrows)*nrows;
hipLaunchKernelGGL(( __dmv0), dim3(nbx),dim3(ntx), 0, 0, a, nrows, ncols, tstep, b, c);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \
__global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \
int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \
int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \
istart = (istart / 32) * 32; \
if (blockIdx.x != gridDim.x - 1) { \
iend = (iend / 32) * 32; \
} \
for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \
atomicAdd(&S[II + nrows * IJ], IV); \
} \
} \
int accum(TI, TJ, TV, TS, int m, int nrows) { \
int nthreads = min(512, m); \
int nblocks = max(1, min(65535, m/nthreads/8)); \
hipLaunchKernelGGL(( __accum), dim3(nblocks),dim3(nthreads), 0, 0, I,J,V,S,m,nrows); \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
ACCUM_KERNEL(int*I, int*J, float*V, float*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, float*V, float*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, float*V, float*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, float V, float*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, float V, float*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, float V, float*S, I, J[i], V)
ACCUM_KERNEL(int*I, int*J, int*V, int*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, int*V, int*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, int*V, int*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, int V, int*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, int V, int*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, int V, int*S, I, J[i], V)
ACCUM_KERNEL(int*I, int*J, unsigned long long*V, unsigned long long*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, unsigned long long*V, unsigned long long*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, unsigned long long*V, unsigned long long*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, unsigned long long V, unsigned long long*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, unsigned long long V, unsigned long long*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, unsigned long long V, unsigned long long*S, I, J[i], V)
const int INBLOCK = 4;
// copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix
// (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of
// rows of the output matrix. iptrs specifies the columns of the input array to copy.
// outstride is stride of the output matrix
__global__ void __icopy_transpose(int *iptrs, float *in, float *out, int outstride, int nrows, int ncols) {
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int icopy_transpose(int *iptrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __icopy_transpose), dim3(griddims),dim3(blockdims), 0, 0, iptrs, in, out, stride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;}
return 0;
}
// copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix
__global__ void __ocopy_transpose(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb];
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_add(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]);
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_min(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb]));
}
}
__syncthreads();
}
}
}
int ocopy_transpose_add(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose_add), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose_min(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose_min), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
#ifdef TEST
int main(int argc, char **argv) {
int m=8, n=8, opn = 0;
float *dA, *dB, *dC, *A, *B, *C;
if (argc > 1) {
sscanf(argv[1], "%d", &opn);
if (argc > 2) {
sscanf(argv[2], "%d", &m);
if (argc > 3) {
sscanf(argv[3], "%d", &n);
}
}
}
A = (float *)malloc(m*n*sizeof(float));
B = (float *)malloc(m*n*sizeof(float));
C = (float *)malloc(m*n*sizeof(float));
hipMalloc((void**)&dA, m*n*sizeof(float));
hipMalloc((void**)&dB, m*n*sizeof(float));
hipMalloc((void**)&dC, m*n*sizeof(float));
for (int i = 0; i < m*n; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
hipMemcpy(dA, A, m*n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB, B, m*n*sizeof(float), hipMemcpyHostToDevice);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
MatKernel(dA, m, n, dB, m, n, dC, opn);
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "CUDA error %d", err);
exit(1);
}
hipMemcpy(C, dC, m*n*sizeof(float), hipMemcpyDeviceToHost);
printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
if (dA != NULL) hipFree(dA);
if (dB != NULL) hipFree(dB);
if (dC != NULL) hipFree(dC);
if (C != NULL) free(C);
}
#endif
__global__ void __poissonrnd(int n, float *A, int *B, hiprandState_t *rstates) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
int nthreads = blockDim.x * gridDim.x;
hiprandState_t rstate = rstates[id];
for (int i = id; i < n; i += nthreads) {
int cr = hiprand_poisson(&rstate, A[i]);
B[i] = cr;
}
}
__global__ void __randinit(hiprandState_t *rstates) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init(1234, id, 0, &rstates[id]);
}
int poissonrnd(int n, float *A, int *B, int nthreads) {
int nblocks = min(1024, max(1,nthreads/1024));
int nth = min(n, 1024);
hiprandState_t *rstates;
int err;
err = hipMalloc(( void **)& rstates , nblocks * nth * sizeof(hiprandState_t));
if (err > 0) {
fprintf(stderr, "Error in hipMalloc %d", err);
return err;
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( __randinit), dim3(nblocks),dim3(nth), 0, 0, rstates);
hipDeviceSynchronize();
hipLaunchKernelGGL(( __poissonrnd), dim3(nblocks),dim3(nth), 0, 0, n, A, B, rstates);
hipDeviceSynchronize();
hipFree(rstates);
err = hipGetLastError();
return err;
}
__global__ void __binornd(int nvals, float *A, int *C, int *Out, hiprandState_t *rstates) {
int jstart = ((long long)blockIdx.x) * nvals / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nvals / gridDim.x;
int id = threadIdx.x + blockDim.x * threadIdx.y;
int ib = id + blockIdx.x * blockDim.x * blockDim.y;
hiprandState_t rstate;
float X, Y, V;
if (ib < nvals) {
rstate = rstates[ib];
}
const float pi = 3.1415926f;
for (int j = jstart + id; j < jend; j += blockDim.x * blockDim.y) {
int n = C[j];
float p = A[j];
float np = n * p;
float pvar = np * (1 - p);
float delta1 = max(1.0f, floor(sqrt(pvar * log(128 * np / (81 * pi * (1-p))))));
float delta2 = max(1.0f, floor(sqrt(pvar * log(128 * n * (1-p) / (pi * p)))));
float sigma1 = sqrt(pvar)*(1+delta1/(4*np));
float sigma2 = sqrt(pvar)*(1+delta2/(4*n*(1-p)));
float c = 2 * delta1 / np;
float a1 = 0.5f * exp(c) * sigma1 * sqrt(2*pi);
float a2 = 0.5f * sigma2 * sqrt(2*pi);
float a3 = exp(delta1/(n*(1-p)) - delta1*delta1/(2*sigma1*sigma1))*2*sigma1*sigma1/delta1;
float a4 = exp(-delta2*delta2/(2*sigma2*sigma2))*2*sigma2*sigma2/delta2;
float s = a1 + a2 + a3 + a4;
int i = 0;
while (i < 100) { // Give up eventually
i += 1;
float U = s * hiprand_uniform(&rstate);
float E1 = - log(hiprand_uniform(&rstate)); // safe since hiprand_uniform wont return 0
if (U <= a1 + a2) {
float N = hiprand_normal(&rstate);
if (U <= a1) {
Y = sigma1 * abs(N);
if (Y >= delta1) continue;
X = floor(Y);
V = - E1 - N * N/2 + c;
} else {
Y = sigma2 * abs(N);
if (Y >= delta2) continue;
X = floor(-Y);
V = - E1 - N * N/2;
}
} else {
float E2 = - log(hiprand_uniform(&rstate));
if (U <= a1 + a2 + a3) {
Y = delta1 + 2*sigma1*sigma1*E1/delta1;
X = floor(Y);
V = - E2 - delta1*Y/(2*sigma1*sigma1) + delta1/(n*(1-p));
} else {
Y = delta2 + 2*sigma2*sigma2*E1/delta2;
X = floor(-Y);
V = - E2 - delta2*Y/(2*sigma2*sigma2);
}
}
if (X < - np || X > n * (1-p)) continue;
if (V > lgamma(np + X) - lgamma(np)) continue;
break;
}
Out[j] = (int)X;
}
}
int binornd(int nvals, float *A, int *C, int *Out) {
int nthreads = min(nvals, 1024);
int nblocks = min(128, 1 + (nvals-1)/nthreads);
hiprandState_t *rstates;
int err = hipMalloc(( void **)& rstates , nthreads * nblocks * sizeof(hiprandState_t));
if (err > 0) {
fprintf(stderr, "Error in hipMalloc %d", err);
return err;
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( __randinit), dim3(nblocks),dim3(nthreads), 0, 0, rstates);
hipDeviceSynchronize();
hipLaunchKernelGGL(( __binornd), dim3(nblocks),dim3(nthreads), 0, 0, nvals, A, C, Out, rstates);
hipDeviceSynchronize();
hipFree(rstates);
err = hipGetLastError();
return err;
}
int collectLVec(long long *pakeys, unsigned int *pavals, long long *pokeys, unsigned int *povals, int n) {
thrust::device_ptr<long long> akeys(pakeys);
thrust::device_ptr<long long> okeys(pokeys);
thrust::device_ptr<unsigned int> avals(pavals);
thrust::device_ptr<unsigned int> ovals(povals);
thrust::pair<thrust::device_ptr<long long>, thrust::device_ptr<unsigned int> > new_end;
new_end = thrust::reduce_by_key(akeys, akeys + n, avals, okeys, ovals);
int len = new_end.first - okeys;
return len;
}
int mergeLVecs(long long *pakeys, unsigned int *pavals, long long *pbkeys, unsigned int *pbvals, long long *pokeys, unsigned int *povals, int n1, int n2) {
thrust::device_ptr<long long> akeys(pakeys);
thrust::device_ptr<long long> bkeys(pbkeys);
thrust::device_ptr<long long> okeys(pokeys);
thrust::device_ptr<unsigned int> avals(pavals);
thrust::device_ptr<unsigned int> bvals(pbvals);
thrust::device_ptr<unsigned int> ovals(povals);
thrust::merge_by_key(akeys, akeys+n1, bkeys, bkeys+n2, avals, bvals, okeys, ovals);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
| MatKernel.cu | #include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/reverse.h>
#include <thrust/reduce.h>
#include <thrust/merge.h>
#include <thrust/fill.h>
#if __CUDA_ARCH__ > 200
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
__device__ float op_add(float a, float b) {return a+b;}
__device__ float op_sub(float a, float b) {return a-b;}
__device__ float op_mul(float a, float b) {return a*b;}
__device__ float op_div(float a, float b) {return a/b;}
__device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;}
__device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;}
__device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;}
__device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;}
__device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;}
__device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;}
__device__ float op_max(float a, float b) {return max(a,b);}
__device__ float op_min(float a, float b) {return min(a,b);}
__device__ float op_atan2(float a, float b) {return atan2f(a, b);}
__device__ float op_pow(float a, float b) {return powf(a, b);}
__device__ int iop_add(int a, int b) {return a+b;}
__device__ int iop_sub(int a, int b) {return a-b;}
__device__ int iop_mul(int a, int b) {return a*b;}
__device__ int iop_div(int a, int b) {return a/b;}
__device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;}
__device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;}
__device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;}
__device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;}
__device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;}
__device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;}
__device__ int iop_max(int a, int b) {return max(a,b);}
__device__ int iop_min(int a, int b) {return min(a,b);}
__device__ long long lop_add(long long a, long long b) {return a+b;}
__device__ long long lop_sub(long long a, long long b) {return a-b;}
__device__ long long lop_mul(long long a, long long b) {return a*b;}
__device__ long long lop_div(long long a, long long b) {return a/b;}
__device__ long long lop_gt(long long a, long long b) {return (a > b) ? 1 : 0;}
__device__ long long lop_lt(long long a, long long b) {return (a < b) ? 1 : 0;}
__device__ long long lop_eq(long long a, long long b) {return (a == b) ? 1 : 0;}
__device__ long long lop_ge(long long a, long long b) {return (a >= b) ? 1 : 0;}
__device__ long long lop_le(long long a, long long b) {return (a <= b) ? 1 : 0;}
__device__ long long lop_ne(long long a, long long b) {return (a != b) ? 1 : 0;}
__device__ long long lop_max(long long a, long long b) {return max(a,b);}
__device__ long long lop_min(long long a, long long b) {return max(a,b);}
typedef float (*optype)(float,float);
typedef int (*ioptype)(int,int);
typedef long long (*loptype)(long long,long long);
// Check reducevec if these ever get changed.
__device__ const optype operators[] = {
op_add,
op_sub,
op_mul,
op_div,
op_gt,
op_lt,
op_eq,
op_ge,
op_le,
op_ne,
op_max,
op_min,
op_atan2,
op_pow};
__device__ const ioptype ioperators[] = {
iop_add,
iop_sub,
iop_mul,
iop_div,
iop_gt,
iop_lt,
iop_eq,
iop_ge,
iop_le,
iop_ne,
iop_max,
iop_min};
__device__ const loptype loperators[] = {
lop_add,
lop_sub,
lop_mul,
lop_div,
lop_gt,
lop_lt,
lop_eq,
lop_ge,
lop_le,
lop_ne,
lop_max,
lop_min};
__device__ float fn_abs(float a) {return abs(a);}
__device__ float fn_exp(float a) {return expf(a);}
__device__ float fn_log(float a) {return logf(a);}
__device__ float fn_expm1(float a) {return expm1f(a);}
__device__ float fn_sqrt(float a) {return sqrtf(a);}
__device__ float fn_ln(float a) {return logf(a);}
__device__ float fn_log10(float a) {return log10f(a);}
__device__ float fn_log1p(float a) {return log1pf(a);}
__device__ float fn_cos(float a) {return cosf(a);}
__device__ float fn_sin(float a) {return sinf(a);}
__device__ float fn_tan(float a) {return tanf(a);}
__device__ float fn_cosh(float a) {return coshf(a);}
__device__ float fn_sinh(float a) {return sinhf(a);}
__device__ float fn_tanh(float a) {return tanhf(a);}
__device__ float fn_acos(float a) {return acosf(a);}
__device__ float fn_asin(float a) {return asinf(a);}
__device__ float fn_atan(float a) {return atanf(a);}
__device__ float fn_acosh(float a) {return acoshf(a);}
__device__ float fn_asinh(float a) {return asinhf(a);}
__device__ float fn_atanh(float a) {return atanhf(a);}
__device__ float fn_erf(float a) {return erff(a);}
__device__ float fn_erfinv(float a) {return erfinvf(a);}
__device__ float fn_erfc(float a) {return erfcf(a);}
__device__ float fn_erfcinv(float a) {return erfcinvf(a);}
__device__ float fn_gammaln(float a) {return lgammaf(a);}
__device__ float fn_gamma(float a) {return tgammaf(a);}
__device__ float fn_ceil(float a) {return ceilf(a);}
__device__ float fn_floor(float a) {return floorf(a);}
__device__ float fn_round(float a) {return roundf(a);}
__device__ float fn_trunc(float a) {return truncf(a);}
__device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);}
__device__ float fn_j0(float a) {return j0f(a);}
__device__ float fn_j1(float a) {return j1f(a);}
//__device__ float fn_jn(float a) {return jnf(a);}
__device__ float fn_y0(float a) {return y0f(a);}
__device__ float fn_y1(float a) {return y1f(a);}
//__device__ float fn_yn(float a) {return ynf(a);}
__device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;}
__device__ float fn_atan2(float a, float b) {return atan2f(a, b);}
__device__ float fn_pow(float a, float b) {return powf(a, b);}
typedef float (*fntype)(float);
__device__ const fntype fctns[35] = {
fn_abs,
fn_exp,
fn_expm1,
fn_sqrt,
fn_ln,
fn_log10,
fn_log1p,
fn_cos,
fn_sin,
fn_tan,
fn_cosh,
fn_sinh,
fn_tanh,
fn_acos,
fn_asin,
fn_atan,
fn_acosh,
fn_asinh,
fn_atanh,
fn_erf,
fn_erfinv,
fn_erfc,
fn_erfcinv,
fn_gammaln,
fn_gamma,
fn_ceil,
fn_floor,
fn_round,
fn_trunc,
fn_sign,
fn_j0,
fn_j1,
fn_y0,
fn_y1,
fn_exppsi};
__device__ const optype fctns2[2] = {
fn_atan2,
fn_pow};
__global__ void __apply_gfun(float *A, float *B, int N, int opn) {
fntype fn = fctns[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = fn(A[i]);
}
}
void setsizes(int N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
int apply_gfun(float *A, float *B, int N, int opn) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
__apply_gfun<<<griddims,nthreads>>>(A, B, N, opn);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __toFloat(int *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __floatToLong(float *A, long long *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __toInt(float *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
}
int toFloat(int *A, float *B, int N) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
__toFloat<<<griddims,nthreads>>>(A, B, N);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int longToFloat(long long *A, float *B, int N) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
__longToFloat<<<griddims,nthreads>>>(A, B, N);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int floatToLong(float *A, long long *B, int N) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
__floatToLong<<<griddims,nthreads>>>(A, B, N);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int toInt(float *A, int *B, int N) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
__toInt<<<griddims,nthreads>>>(A, B, N);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) {
int i, row, col;
float v;
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (i = id; i < nnz; i += blockDim.x * gridDim.x) {
v = data[i];
row = ir[i];
col = ic[i];
od[row + col * nrows] = v;
}
}
int full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) {
int nblocks = min(32, 1+(nnz-1)/32);
int nthreads = min(1+(nnz-1)/nblocks, 1024);
__full<<<nblocks,nthreads>>>(ir, ic, data, od, nrows, ncols, nnz);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __initSeq(int *A, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = i % nrows;
}
}
int initSeq(int *A, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__initSeq<<<griddims,nthreads>>>(A, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_gfun2(float *A, float *B, float *C, int N, int opn) {
optype fn = fctns2[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = fn(A[i], B[i]);
}
}
int apply_gfun2(float *A, float *B, float *C, int N, int opn) {
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
__apply_gfun2<<<griddims,nthreads>>>(A, B, C, N, opn);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_full(float *A, float *B, float *C, int N, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
float val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
float val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(val,B[i]);
}
}
__global__ void __set_val(float *A, float val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
__global__ void __set_lval(long long *A, long long val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
int set_val(float *A, float val, int length) {
int nthreads;
dim3 griddims;
setsizes(length, &griddims, &nthreads);
__set_val<<<griddims,nthreads>>>(A, val, length);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int set_ival(float *A, int val, int length) {
int nthreads;
dim3 griddims;
setsizes(length, &griddims, &nthreads);
__set_val<<<griddims,nthreads>>>(A, *((float *)&val), length);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int set_lval(long long *A, long long val, int length) {
int nthreads;
dim3 griddims;
setsizes(length, &griddims, &nthreads);
__set_lval<<<griddims,nthreads>>>(A, val, length);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int apply_binop(float *A, int Anrows, int Ancols,
float *B, int Bnrows, int Bncols, float *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
if (Anrows == Bnrows && Ancols == Bncols) {
__apply_full<<<griddims,nthreads>>>(A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
__apply_right_col<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
__apply_right_row<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
__apply_left_col<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
__apply_left_row<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
__apply_right_val<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
__apply_left_val<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __sdoprow(int nrows, int ncols, int nnz, float *A, int *Aic, float *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
int col = Aic[i];
float oldA = A[i];
A[i] = op(oldA,B[col]);
}
}
__global__ void __sdopcol(int nrows, int ncols, int nnz, float *A, int *Air, float *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
int row = Air[i];
float oldA = A[i];
A[i] = op(oldA,B[row]);
}
}
__global__ void __sdopval(int nnz, float *A, float *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
float bval = B[0];
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
float oldA = A[i];
A[i] = op(oldA,bval);
}
}
int sdoprow(int nrows, int ncols, int nnz, float *A, int *Aic,
float *B, int len, int opn) {
int nthreads;
dim3 griddims;
setsizes(nnz, &griddims, &nthreads);
if (len > 1) {
__sdoprow<<<griddims,nthreads>>>(nrows, ncols, nnz, A, Aic, B, opn);
} else {
__sdopval<<<griddims,nthreads>>>(nnz, A, B, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int sdopcol(int nrows, int ncols, int nnz, float *A, int *Air,
float *B, int len, int opn) {
int nthreads;
dim3 griddims;
setsizes(nnz, &griddims, &nthreads);
if (len > 1) {
__sdopcol<<<griddims,nthreads>>>(nrows, ncols, nnz, A, Air, B, opn);
} else {
__sdopval<<<griddims,nthreads>>>(nnz, A, B, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_full_int(int *A, int *B, int *C, int N, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(val,B[i]);
}
}
__global__ void __apply_full_long(long long *A, long long *B, long long *C, int N, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val_long(long long *A, long long *B, long long *C, int nrows, int ncols, int opn) {
loptype op = loperators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(val,B[i]);
}
}
int apply_biniop(int *A, int Anrows, int Ancols,
int *B, int Bnrows, int Bncols,
int *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
if (Anrows == Bnrows && Ancols == Bncols) {
__apply_full_int<<<griddims,nthreads>>>(A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
__apply_right_col_int<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
__apply_right_row_int<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
__apply_left_col_int<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
__apply_left_row_int<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
__apply_right_val_int<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
__apply_left_val_int<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int apply_binlop(long long *A, int Anrows, int Ancols,
long long *B, int Bnrows, int Bncols,
long long *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads;
dim3 griddims;
setsizes(N, &griddims, &nthreads);
if (Anrows == Bnrows && Ancols == Bncols) {
__apply_full_long<<<griddims,nthreads>>>(A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
__apply_right_col_long<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
__apply_right_row_long<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
__apply_left_col_long<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
__apply_left_row_long<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
__apply_right_val_long<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
__apply_left_val_long<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B[I,J] = A
// indexed copy: version with one block per column
#define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyToInds2D##DFNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A[i + iblock * lda]; \
} \
} \
}
COPYTOINDS2DA(nn,I[i],J[iblock],float)
COPYTOINDS2DA(xn,i,J[iblock],float)
COPYTOINDS2DA(nx,I[i],iblock,float)
COPYTOINDS2DA(xx,i,iblock,float)
COPYTOINDS2DA(nnl,I[i],J[iblock],long long)
COPYTOINDS2DA(xnl,i,J[iblock],long long)
COPYTOINDS2DA(nxl,I[i],iblock,long long)
COPYTOINDS2DA(xxl,i,iblock,long long)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyToInds2DB##DFNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \
} \
}
COPYTOINDS2DB(nn,I[irow],J[icol],float)
COPYTOINDS2DB(xn,irow,J[icol],float)
COPYTOINDS2DB(nx,I[irow],icol,float)
COPYTOINDS2DB(xx,irow,icol,float)
COPYTOINDS2DB(nnl,I[irow],J[icol],long long)
COPYTOINDS2DB(xnl,irow,J[icol],long long)
COPYTOINDS2DB(nxl,I[irow],icol,long long)
COPYTOINDS2DB(xxl,irow,icol,long long)
// Implement B[I,J] = A
int copyToInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = min(len, max(32, min(1024, nrows)));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyToInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyToInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int copyToInds2DLong(long long *A, int lda, long long *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = min(len, max(32, min(1024, nrows)));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyToInds2Dxxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dxnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2Dnxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dnnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyToInds2DBxxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBxnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2DBnxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBnnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B = A[I,J]
// indexed copy: version with one block per column
#define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyFromInds2D##FNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[i + iblock * ldb] = A[IEXPR + icol * lda]; \
} \
} \
}
COPYFROMINDS2DA(nn,I[i],J[iblock],float)
COPYFROMINDS2DA(xn,i,J[iblock],float)
COPYFROMINDS2DA(nx,I[i],iblock,float)
COPYFROMINDS2DA(xx,i,iblock,float)
COPYFROMINDS2DA(nnl,I[i],J[iblock],long long)
COPYFROMINDS2DA(xnl,i,J[iblock],long long)
COPYFROMINDS2DA(nxl,I[i],iblock,long long)
COPYFROMINDS2DA(xxl,i,iblock,long long)
// Implement B = A[I,J]
// indexed copy: version with one thread per element
#define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyFromInds2DB##FNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \
} \
}
COPYFROMINDS2DB(nn,I[irow],J[icol],float)
COPYFROMINDS2DB(xn,irow,J[icol],float)
COPYFROMINDS2DB(nx,I[irow],icol,float)
COPYFROMINDS2DB(xx,irow,icol,float)
COPYFROMINDS2DB(nnl,I[irow],J[icol],long long)
COPYFROMINDS2DB(xnl,irow,J[icol],long long)
COPYFROMINDS2DB(nxl,I[irow],icol,long long)
COPYFROMINDS2DB(xxl,irow,icol,long long)
// Implement B = A[I,J]
int copyFromInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = min(len, max(32, min(1024, nrows)));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int copyFromInds2DLong(long long *A, int lda, long long *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = min(len, max(32, min(1024, nrows)));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2Dxxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dxnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2Dnxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dnnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2DBxxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBxnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2DBnxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBnnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmult(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
__global__ void __dsmultx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum);
sum = 0;
}
}
}
int dsmult(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
__dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dsmult_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreads) {
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dsmultx_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreadsx, int nthreadsy) {
dim3 threadDim(nthreadsx, nthreadsy, 1);
__dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
__global__ void __dsmultTx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[threadIdx.x + nrows * Bic[j]];
}
atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]);
}
}
int dsmultT(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
__dsmultTx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
__dsmultT<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Aic[i]], P[i]);
}
}
__global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Air[i]], P[i]);
}
}
int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B, int n) {
int nthreads = min(128, nnz);
int nblks = min(65536, max(1, (nnz-1) / 128));
if (n == 1) {
__spsum1<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B);
} else {
__spsum2<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn);
__global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr);
#define DDS_BLKY 32
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
float tmp = __shfl_down(sum, i);
if (threadIdx.x + i < blockDim.x) sum = sum + tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&P[j], sum);
}
}
}
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {
__shared__ float merge[32];
int jstart = ((long long)blockIdx.x) * ncols / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int aoff, boff;
float user, prod, sum, bsum;
for (int j0 = jstart; j0 < jend ; j0++) {
boff = nrows * j0;
user = B[tid + boff];
for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) {
aoff = nrows * Cir[j];
prod = A[tid + aoff] * user;
sum = prod + __shfl_down(prod, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
bsum = __shfl(sum, 0);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
merge[threadIdx.x] = bsum;
}
__syncthreads();
if (threadIdx.y == 0) {
sum = merge[threadIdx.x];
sum = sum + __shfl_down(sum, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
}
}
#else
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
__shared__ float parts[32*DDS_BLKY];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
parts[tid] = sum;
for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) {
__syncthreads();
if (i + tid < blockDim.x * blockDim.y) {
parts[tid] = parts[tid] + parts[i + tid];
}
}
__syncthreads();
if (tid == 0) {
P[j] = parts[0];
}
__syncthreads();
}
}
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {}
#endif
int dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,nnz/128));
__dds<<<nblocks,blockDims>>>(nrows, nnz, A, B, Cir, Cic, P);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P) {
dim3 blockDims(32, 32, 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,ncols/64));
__dds0<<<nblocks,blockDims>>>(nrows, ncols, A, B, Cir, Cic, P);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
optype op = operators[opn];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = A[threadIdx.x + icol * nrows];
for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) {
v = op(v, A[i + icol * nrows]);
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = op(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
B[icol] = v;
}
}
}
#else
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
__shared__ float parts[32][33];
optype op = operators[opn];
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = A[threadIdx.x + icol * nrows];
for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
B[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
template<typename T>
void reducevec(int n, T *A, T *B, int opn) {
thrust::device_ptr<T> pa(A);
thrust::device_ptr<T> pb(B);
T v;
switch (opn) {
case 0 : // sum
v = thrust::reduce(pa, pa + n);
thrust::fill(pb, pb + 1, v);
break;
case 10 : // max
v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::min(), thrust::maximum<T>());
thrust::fill(pb, pb + 1, v);
break;
case 11: // min
v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::max(), thrust::minimum<T>());
thrust::fill(pb, pb + 1, v);
break;
}
}
int reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
if (ncols == 1) {
reducevec<float>(nrows, A, B, opn);
} else {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
__reduce1op<<<nblks,blkdims>>>(nrows, ncols, A, B, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
__global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
optype opbf = operators[opb];
optype oprf = operators[opr];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = 0;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows]));
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = oprf(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
C[icol] = v;
}
}
}
#else
__global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
__shared__ float parts[32][33];
optype opbf = operators[opb];
optype oprf = operators[opr];
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = 0;
for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) {
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows]));
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
C[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
int reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
__reducebin1op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define BLOCKDIM 32
__global__ void __transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int gridx = min(32, 1+(nrows-1)/256);
int gridy = min(32, 1+(ncols-1)/256);
const dim3 griddims(gridx, gridy, 1);
const dim3 blockdims(BLOCKDIM,16,1);
cudaError_t err;
int dev = -1;
cudaGetDevice(&dev);
__transpose<<<griddims,blockdims>>>(in, instride, out, outstride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols);
return err;
}
return 0;
}
__global__ void __reduce2op(int nrows, int ncols, float *A, float *B, int opn) {
__shared__ float parts[32][33];
optype op = operators[opn];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
float v = A[irow + threadIdx.y * nrows];
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
float newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
B[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reduce2op(int nrows, int ncols, float *A, float *B, int opn) {
if (nrows == 1) {
reducevec<float>(ncols, A, B, opn);
} else {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
__reduce2op<<<nblks,blkdims>>>(nrows, ncols, A, B, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __reducebin2op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
__shared__ float parts[32][33];
optype opbf = operators[opb];
optype oprf = operators[opr];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
float v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]);
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows]));
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
float newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
C[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reducebin2op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
__reducebin2op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __embedmat2d(float *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
float v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
b[i] = (long long)vi + (((long long)(i/nrows+1))<<32);
}
}
__global__ void __embedmat(float *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
float v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
c[i] = (long long)vi + (((long long)b[i])<<32);
}
}
int embedmat2d(float *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__embedmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int embedmat(float *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizes(n, &griddims, &nthreads);
__embedmat<<<griddims,nthreads>>>(a, b, c, n);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __extractmat2d(float *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&b[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((float *)&vi);
}
}
__global__ void __extractmat(float *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((float *)&vi);
b[i] = *(((int *)&c[i])+1);
}
}
int extractmat2d(float *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizes(nrows*ncols, &griddims, &nthreads);
__extractmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int extractmat(float *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizes(n, &griddims, &nthreads);
__extractmat<<<griddims,nthreads>>>(a, b, c, n);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int fsort2d(float *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) {
for (int i = 0; i < ncols; i++) {
thrust::device_ptr<float> keys(pkeys+i*nrows);
thrust::device_ptr<unsigned int> vals(pvals+i*nrows);
if (asc > 0) {
thrust::sort_by_key(keys, keys + nrows, vals);
} else {
thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<float>());
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int isort(int *pkeys, int N, int asc) {
thrust::device_ptr<int> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<int>());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int fsort(float *pkeys, int N, int asc) {
thrust::device_ptr<float> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<int>());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int isortk(int *pkeys, unsigned int *pvals, int N, int asc) {
thrust::device_ptr<int> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
if (asc > 0) {
thrust::sort_by_key(keys, keys + N, vals);
} else {
thrust::sort_by_key(keys, keys + N, vals, thrust::greater<int>());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int fsorts(float *pkeys, unsigned int *pvals, int *jc, int m, int asc) {
for (int i = 0; i < m; i++) {
thrust::device_ptr<float> keys(pkeys + jc[i]);
thrust::device_ptr<unsigned int> vals(pvals + jc[i]);
int b = jc[i+1] - jc[i];
if (asc > 0) {
thrust::sort_by_key(keys, keys + b, vals);
} else {
thrust::sort_by_key(keys, keys + b, vals, thrust::greater<float>());
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dsortk(double *pkeys, unsigned int *pvals, int N, int asc) {
thrust::device_ptr<double> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
if (asc > 0) {
thrust::sort_by_key(keys, keys + N, vals);
} else {
thrust::sort_by_key(keys, keys + N, vals, thrust::greater<double>());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int lsortk(long long *pkeys, unsigned int *pvals, int N, int asc) {
thrust::device_ptr<long long> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
if (asc > 0) {
thrust::sort_by_key(keys, keys + N, vals);
} else {
thrust::sort_by_key(keys, keys + N, vals, thrust::greater<long long>());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int lsort(long long *pkeys, int N, int asc) {
thrust::device_ptr<long long> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<long long>());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
typedef struct lll {
int x;
int y;
int z;
int w;
} lllint;
struct cmp_lllint_key_asc
{
__host__ __device__ inline bool operator()(const lllint &lhs, const lllint &rhs) const
{
if (lhs.x < rhs.x) return true;
if (lhs.x > rhs.x) return false;
if (lhs.y < rhs.y) return true;
if (lhs.y > rhs.y) return false;
if (lhs.z < rhs.z) return true;
if (lhs.z > rhs.z) return false;
return (lhs.w < rhs.w);
}
};
struct cmp_lllint_key_desc
{
__host__ __device__ inline bool operator()(const lllint &lhs, const lllint &rhs) const
{
if (lhs.x > rhs.x) return true;
if (lhs.x < rhs.x) return false;
if (lhs.y > rhs.y) return true;
if (lhs.y < rhs.y) return false;
if (lhs.z > rhs.z) return true;
if (lhs.z < rhs.z) return false;
return (lhs.w > rhs.w);
}
};
int i4sort(int *pkeys0, int N, int asc) {
lllint *pkeys = (lllint *)pkeys0;
thrust::device_ptr<lllint> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N, cmp_lllint_key_asc());
} else {
thrust::sort(keys, keys + N, cmp_lllint_key_desc());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
typedef struct i3 {
int x;
int y;
int z;
} i3struct;
struct cmp_i3struct_key_asc
{
__host__ __device__ inline bool operator()(const i3struct &lhs, const i3struct &rhs) const
{
if (lhs.x < rhs.x) return true;
if (lhs.x > rhs.x) return false;
if (lhs.y < rhs.y) return true;
if (lhs.y > rhs.y) return false;
return (lhs.z < rhs.z);
}
};
struct cmp_i3struct_key_desc
{
__host__ __device__ inline bool operator()(const i3struct &lhs, const i3struct &rhs) const
{
if (lhs.x > rhs.x) return true;
if (lhs.x < rhs.x) return false;
if (lhs.y > rhs.y) return true;
if (lhs.y < rhs.y) return false;
return (lhs.z > rhs.z);
}
};
int i3sortk(int *pkeys0, unsigned int *pvals, int N, int asc) {
i3struct *pkeys = (i3struct *)pkeys0;
thrust::device_ptr<i3struct> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
if (asc > 0) {
thrust::sort_by_key(keys, keys + N, vals, cmp_i3struct_key_asc());
} else {
thrust::sort_by_key(keys, keys + N, vals, cmp_i3struct_key_desc());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
// This path may break. If so look for radixsort_api.h in /usr/local/cuda/include
// and fix the path below.
using namespace thrust::system::cuda::detail::detail::b40c_thrust;
int fsortsizex(int N) {
RadixSortingEnactor<float,unsigned int> sorter(N);
return sorter.SpineElements();
}
int lsortsizex(int N) {
RadixSortingEnactor<long long,unsigned int> sorter(N);
return sorter.SpineElements();
}
int fsort2dx(float *pkeys, unsigned int *pvals, float *tkeys, unsigned int *tvals,
int *ispine, bool * bflags, int nrows, int ncols, int asc) {
int i;
cudaError_t err;
RadixSortingEnactor<float,unsigned int> sorter(nrows);
RadixSortStorage<float,unsigned int> storage;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
storage.using_alternate_storage = false;
for (i = 0; i < ncols; i++) {
storage.d_keys = pkeys+i*nrows;
storage.d_values = pvals+i*nrows;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
if (asc == 0) {
thrust::device_ptr<float> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+nrows);
thrust::reverse(vals, vals+nrows);
}
cudaDeviceSynchronize();
sorter.EnactSort(storage);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err > 0) return err;
if (asc == 0) {
thrust::device_ptr<float> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+nrows);
thrust::reverse(vals, vals+nrows);
}
cudaDeviceSynchronize();
if (storage.d_keys == tkeys) {
cudaMemcpy(pkeys+i*nrows, tkeys, nrows*sizeof(float), cudaMemcpyDeviceToDevice);
}
if (storage.d_values == tvals) {
cudaMemcpy(pvals+i*nrows, tvals, nrows*sizeof(unsigned int), cudaMemcpyDeviceToDevice);
}
}
return err;
}
int lsortx(long long *pkeys, unsigned int *pvals, long long *tkeys, unsigned int *tvals, int *ispine, bool * bflags, int N, int asc) {
RadixSortingEnactor<long long,unsigned int> sorter(N);
RadixSortStorage<long long,unsigned int> storage;
storage.d_keys = pkeys;
storage.d_values = pvals;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
if (asc == 0) {
thrust::device_ptr<long long> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+N);
thrust::reverse(vals, vals+N);
}
cudaDeviceSynchronize();
sorter.EnactSort(storage);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (asc == 0) {
thrust::device_ptr<long long> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+N);
thrust::reverse(vals, vals+N);
}
return err;
}
__global__ void __stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) {
__shared__ float ss[32];
__shared__ unsigned int ibin[32];
__shared__ unsigned int ebin[32];
__shared__ unsigned int todo[32];
__shared__ float bins[64][33];
__shared__ unsigned int topush;
int tid = threadIdx.x;
ss[tid] = strata[tid];
ibin[tid] = 0;
for (int i = 0; i < n; i += blockDim.x * gridDim.x) {
int ii = i + tid + blockDim.x * blockIdx.x;
if (tid == 0) topush = 0;
if (ii < n) {
float v = a[ii];
int j = 1;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = j - 32;
int k = atomicInc(&ibin[j], 256);
bins[k][j] = v;
if (k == 31) {
k = atomicInc(&topush, 1024);
todo[k] = j;
}
}
if (ibin[tid] >= 32) {
ebin[tid] = atomicAdd(&bi[tid], 32);
ibin[tid] = ibin[tid] - 32;
}
for (int k = 0; k < topush; k++) {
int j = todo[k];
b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j];
}
}
ebin[tid] = atomicAdd(&bi[tid], ibin[tid]);
for (int j = 0; j < 32; j++) {
if (tid < ibin[j]) {
b[j*stride + ebin[j] + tid] = bins[tid][j];
}
}
}
int stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) {
__stratify<<<40,32>>>(strata, n, a, b, bi, stride);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define SNDVALS 256
#define SNDGRPS 4
#define SNTHREADS 1024
#define SBIGBLK (4*1024)
__global__ void __stratifycounts(float *strata, int n, float *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ float ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
float v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
int stratifycounts(float *strata, int n, float *a, unsigned int *bi) {
const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1);
const dim3 griddims(8,1,1);
__stratifycounts<<<griddims,blockdims>>>(strata, n, a, bi);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define RNDVALS 256
#define RNTHREADS 256
#define RNDBITS 8
#define RBIGBLK (4*1024)
__global__ void __radixcounts(float *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
float v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
}
int radixcounts(float *a, int n, int digit, unsigned int *bi) {
const dim3 blockdims(RNTHREADS,1,1);
const dim3 griddims(32,1,1);
__radixcounts<<<griddims,blockdims>>>(a, n, digit, bi);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
#define GENDISTS(DFNAME,DFUNC) \
__global__ void DFNAME(float *A, int lda, float *B, int ldb, float *C, \
int ldc, int d, int nrows, int ncols, float p) { \
int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \
int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \
float va, vb, vc; \
float R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \
R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \
int xi = threadIdx.x + xblk; \
int yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
} \
yi = threadIdx.x + yblk; \
int nbr = (threadIdx.x + 1) % blockDim.x; \
for (int i = 0; i < d; i++) { \
va = (xi < nrows) ? A[xi + i * lda] : 0; \
vb = (yi < ncols) ? B[yi + i * ldb] : 0; \
vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \
vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \
vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \
vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \
vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \
vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \
vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \
vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \
vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \
vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \
vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \
vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \
vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \
vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \
vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \
vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \
} \
yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \
} \
}
GENDISTS(__l1dist,vc+=abs(va-vb))
GENDISTS(__l2dist,vc+=(va-vb)*(va-vb))
GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p))
GENDISTS(__linfdist,vc=max(vc,abs(va-vb)))
GENDISTS(__msum,vc=max(vc,va+vb))
#else
__global__ void __l1dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Lidist not supported on arch <= 200\n");
}
__global__ void __l2dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, L2dist not supported on arch <= 200\n");
}
__global__ void __minkowskidist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Minkowski distance not supported on arch <= 200\n");
}
__global__ void __linfdist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Max-abs distance not supported on arch <= 200\n");
}
__global__ void __msum(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Max-sum multiply not supported on arch <= 200\n");
}
#endif
int dists(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
// cudaSetDevice(ithread);
if (p == 0.0f) {
__linfdist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 1.0f) {
__l1dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 2.0f) {
__l2dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else {
__minkowskidist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int maxsumx(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
__msum<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, 0);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
__shared__ T tots[32];
int start, end, ij;
int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index
T sum, tsum, tmp, ttot, ttot0;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
start = jc[ij] + bid * nrows;
end = jc[ij+1] + bid * nrows;
sum = 0;
for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
tsum = in[i];
tmp = __shfl_up(tsum, 1);
if (threadIdx.x >= 1) tsum += tmp;
tmp = __shfl_up(tsum, 2);
if (threadIdx.x >= 2) tsum += tmp;
tmp = __shfl_up(tsum, 4);
if (threadIdx.x >= 4) tsum += tmp;
tmp = __shfl_up(tsum, 8);
if (threadIdx.x >= 8) tsum += tmp;
tmp = __shfl_up(tsum, 16);
if (threadIdx.x >= 16) tsum += tmp;
ttot = __shfl(tsum, min(end-start-1, 31));
ttot0 = ttot;
__syncthreads();
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
__syncthreads();
for (int k = 1; k < blockDim.y; k *= 2) {
if (threadIdx.y >= k) {
if (threadIdx.x == threadIdx.y - k) {
ttot += tots[threadIdx.x];
}
}
__syncthreads();
if (threadIdx.y >= k) {
ttot = __shfl(ttot, threadIdx.y - k);
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
}
__syncthreads();
}
out[i] = sum + tsum + ttot - ttot0;
if (threadIdx.x == blockDim.y - 1) {
ttot = tots[threadIdx.x];
}
__syncthreads();
ttot = __shfl(ttot, blockDim.y - 1);
sum += ttot;
}
}
}
}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k, start, end, ij;
int bid = blockIdx.y + blockIdx.z * gridDim.y;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
vmax = maxminv;
imax = -1;
start = jc[ij];
end = jc[ij+1];
for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[ij + m * bid] = vmax;
outi[ij + m * bid] = imax;
}
}
}
}
}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
if (bid < ncols) {
vmax = maxminv;
imax = -1;
for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[bid] = vmax;
outi[bid] = imax;
}
}
__syncthreads();
}
}
// Not very fast for wide matrices
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
T vmax, vtmp;
int imax, itmp, i, j;
for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) {
if (ncols > 0) {
vmax = in[i];
imax = 0;
for (j = 1; j < ncols; j++) {
vtmp = in[i + nrows * j];
itmp = j;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
out[i] = vmax;
outi[i] = imax;
}
}
}
#else
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {}
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {}
#endif
void setinds(int ncols, int &nc1, int &nc2) {
if (ncols < 65536) {
nc1 = ncols;
nc2 = 1;
} else {
nc1 = (int)sqrt((double)ncols);
nc2 = 1 + (ncols-1)/nc1;
}
}
template<class T>
int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
int nc1, nc2;
setinds(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
__cumsumg<T><<<grid,tblock>>>(in, out, jc, nrows, ncols, m);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int cumsumgf(float *in, float *out, int *jc, int nrows, int ncols, int m) {
return cumsumg<float>(in, out, jc, nrows, ncols, m);
}
int cumsumgi(int *in, int *out, int *jc, int nrows, int ncols, int m) {
return cumsumg<int>(in, out, jc, nrows, ncols, m);
}
template<class T>
int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {
int nc1, nc2;
setinds(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
__maxming<T><<<grid,tblock>>>(in, out, outi, jc, nrows, ncols, m, minv, dir);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
// JFC: problem here ncols a non-multiple of 16, and nrows < 32.
template<class T>
int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {
int nc1, nc2;
setinds(ncols, nc1, nc2);
dim3 grid(nc1, nc2, 1);
int ny = min(32, 1+nrows/32);
dim3 tblock(32, ny, 1);
__maxmini_cols<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, minv, dir);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
template<class T>
int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
int nb = min(32,1+nrows/32);
dim3 grid(nb,1,1);
int ny = min(32, 1+nrows/nb/32);
dim3 tblock(32, ny, 1);
__maxmini_rows<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, dir);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int maxgf(float *in, float *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<float>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1);
}
int maxgi(int *in, int *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<int>(in, out, outi, jc, nrows, ncols, m, 0x80000000, 1);
}
int mingf(float *in, float *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<float>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0);
}
int mingi(int *in, int *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<int>(in, out, outi, jc, nrows, ncols, m, 0x7fffffff, 0);
}
int maxif(float *in, float *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<float>(in, out, outi, nrows, ncols, -3e38f, 1);
} else if (dir == 2) {
return maxmini_rows<float>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int maxii(int *in, int *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<int>(in, out, outi, nrows, ncols, 0x80000000, 1);
} else if (dir == 2) {
return maxmini_rows<int>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int maxil(long long *in, long long *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<long long>(in, out, outi, nrows, ncols, LLONG_MIN, 1);
} else if (dir == 2) {
return maxmini_rows<long long>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int minif(float *in, float *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<float>(in, out, outi, nrows, ncols, 3e38f, 0);
} else if (dir == 2) {
return maxmini_rows<float>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
int minii(int *in, int *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<int>(in, out, outi, nrows, ncols, 0x7fffffff, 0);
} else if (dir == 2) {
return maxmini_rows<int>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
int minil(long long *in, long long *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<long long>(in, out, outi, nrows, ncols, LLONG_MAX, 0);
} else if (dir == 2) {
return maxmini_rows<long long>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
__global__ void __dmv(float *a, int nrows, int ncols, float *b, float *c) {
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
float accum = 0.0f;
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
accum += a[tx+nrows*ty] * b[ty];
}
atomicAdd(&c[tx], accum);
}
}
#if __CUDA_ARCH__ > 200
__global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
float accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
for (int i = 1; i < blockDim.x; i *= 2) {
float tmp = __shfl_down(accum, i);
if (threadIdx.x + i < blockDim.x) accum += tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&c[ty], accum);
}
}
}
#else
__global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
float accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
atomicAdd(&c[ty], accum);
}
}
#endif
__global__ void __dmv0(float *a, int nrows, int ncols, int tstep, float *b, float *c) {
float accum = 0.0f;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < tstep) {
for (; tx < nrows*ncols; tx += tstep) {
int icol = tx / nrows;
accum += a[tx] * b[icol];
}
int irow = tx % nrows;
atomicAdd(&c[irow], accum);
}
}
int dmv(float *a, int nrows, int ncols, float *b, float *c, int trans) {
if (trans == 1) {
int ntx = min(32, nrows);
int nty = min(32, ncols);
int nbx = min(256, 1 + nrows/ntx/8);
int nby = min(256, 1 + ncols/nty/2);
dim3 blockdims(ntx,nty,1);
dim3 griddims(nbx,nby,1);
__dmvt<<<griddims,blockdims>>>(a, nrows, ncols, b, c);
} else {
int ntx = min(1024, nrows*ncols);
int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32);
int tstep = (ntx*nbx/nrows)*nrows;
__dmv0<<<nbx,ntx>>>(a, nrows, ncols, tstep, b, c);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \
__global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \
int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \
int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \
istart = (istart / 32) * 32; \
if (blockIdx.x != gridDim.x - 1) { \
iend = (iend / 32) * 32; \
} \
for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \
atomicAdd(&S[II + nrows * IJ], IV); \
} \
} \
int accum(TI, TJ, TV, TS, int m, int nrows) { \
int nthreads = min(512, m); \
int nblocks = max(1, min(65535, m/nthreads/8)); \
__accum<<<nblocks,nthreads>>>(I,J,V,S,m,nrows); \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
ACCUM_KERNEL(int*I, int*J, float*V, float*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, float*V, float*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, float*V, float*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, float V, float*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, float V, float*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, float V, float*S, I, J[i], V)
ACCUM_KERNEL(int*I, int*J, int*V, int*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, int*V, int*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, int*V, int*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, int V, int*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, int V, int*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, int V, int*S, I, J[i], V)
ACCUM_KERNEL(int*I, int*J, unsigned long long*V, unsigned long long*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, unsigned long long*V, unsigned long long*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, unsigned long long*V, unsigned long long*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, unsigned long long V, unsigned long long*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, unsigned long long V, unsigned long long*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, unsigned long long V, unsigned long long*S, I, J[i], V)
const int INBLOCK = 4;
// copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix
// (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of
// rows of the output matrix. iptrs specifies the columns of the input array to copy.
// outstride is stride of the output matrix
__global__ void __icopy_transpose(int *iptrs, float *in, float *out, int outstride, int nrows, int ncols) {
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int icopy_transpose(int *iptrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__icopy_transpose<<<griddims,blockdims>>>(iptrs, in, out, stride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;}
return 0;
}
// copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix
__global__ void __ocopy_transpose(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb];
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_add(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]);
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_min(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb]));
}
}
__syncthreads();
}
}
}
int ocopy_transpose_add(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose_add<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose_min(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose_min<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
#ifdef TEST
int main(int argc, char **argv) {
int m=8, n=8, opn = 0;
float *dA, *dB, *dC, *A, *B, *C;
if (argc > 1) {
sscanf(argv[1], "%d", &opn);
if (argc > 2) {
sscanf(argv[2], "%d", &m);
if (argc > 3) {
sscanf(argv[3], "%d", &n);
}
}
}
A = (float *)malloc(m*n*sizeof(float));
B = (float *)malloc(m*n*sizeof(float));
C = (float *)malloc(m*n*sizeof(float));
cudaMalloc((void**)&dA, m*n*sizeof(float));
cudaMalloc((void**)&dB, m*n*sizeof(float));
cudaMalloc((void**)&dC, m*n*sizeof(float));
for (int i = 0; i < m*n; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
cudaMemcpy(dA, A, m*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, m*n*sizeof(float), cudaMemcpyHostToDevice);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
MatKernel(dA, m, n, dB, m, n, dC, opn);
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "CUDA error %d", err);
exit(1);
}
cudaMemcpy(C, dC, m*n*sizeof(float), cudaMemcpyDeviceToHost);
printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
if (dA != NULL) cudaFree(dA);
if (dB != NULL) cudaFree(dB);
if (dC != NULL) cudaFree(dC);
if (C != NULL) free(C);
}
#endif
__global__ void __poissonrnd(int n, float *A, int *B, curandState *rstates) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
int nthreads = blockDim.x * gridDim.x;
curandState rstate = rstates[id];
for (int i = id; i < n; i += nthreads) {
int cr = curand_poisson(&rstate, A[i]);
B[i] = cr;
}
}
__global__ void __randinit(curandState *rstates) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(1234, id, 0, &rstates[id]);
}
int poissonrnd(int n, float *A, int *B, int nthreads) {
int nblocks = min(1024, max(1,nthreads/1024));
int nth = min(n, 1024);
curandState *rstates;
int err;
err = cudaMalloc(( void **)& rstates , nblocks * nth * sizeof(curandState));
if (err > 0) {
fprintf(stderr, "Error in cudaMalloc %d", err);
return err;
}
cudaDeviceSynchronize();
__randinit<<<nblocks,nth>>>(rstates);
cudaDeviceSynchronize();
__poissonrnd<<<nblocks,nth>>>(n, A, B, rstates);
cudaDeviceSynchronize();
cudaFree(rstates);
err = cudaGetLastError();
return err;
}
__global__ void __binornd(int nvals, float *A, int *C, int *Out, curandState *rstates) {
int jstart = ((long long)blockIdx.x) * nvals / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nvals / gridDim.x;
int id = threadIdx.x + blockDim.x * threadIdx.y;
int ib = id + blockIdx.x * blockDim.x * blockDim.y;
curandState rstate;
float X, Y, V;
if (ib < nvals) {
rstate = rstates[ib];
}
const float pi = 3.1415926f;
for (int j = jstart + id; j < jend; j += blockDim.x * blockDim.y) {
int n = C[j];
float p = A[j];
float np = n * p;
float pvar = np * (1 - p);
float delta1 = max(1.0f, floor(sqrt(pvar * log(128 * np / (81 * pi * (1-p))))));
float delta2 = max(1.0f, floor(sqrt(pvar * log(128 * n * (1-p) / (pi * p)))));
float sigma1 = sqrt(pvar)*(1+delta1/(4*np));
float sigma2 = sqrt(pvar)*(1+delta2/(4*n*(1-p)));
float c = 2 * delta1 / np;
float a1 = 0.5f * exp(c) * sigma1 * sqrt(2*pi);
float a2 = 0.5f * sigma2 * sqrt(2*pi);
float a3 = exp(delta1/(n*(1-p)) - delta1*delta1/(2*sigma1*sigma1))*2*sigma1*sigma1/delta1;
float a4 = exp(-delta2*delta2/(2*sigma2*sigma2))*2*sigma2*sigma2/delta2;
float s = a1 + a2 + a3 + a4;
int i = 0;
while (i < 100) { // Give up eventually
i += 1;
float U = s * curand_uniform(&rstate);
float E1 = - log(curand_uniform(&rstate)); // safe since curand_uniform wont return 0
if (U <= a1 + a2) {
float N = curand_normal(&rstate);
if (U <= a1) {
Y = sigma1 * abs(N);
if (Y >= delta1) continue;
X = floor(Y);
V = - E1 - N * N/2 + c;
} else {
Y = sigma2 * abs(N);
if (Y >= delta2) continue;
X = floor(-Y);
V = - E1 - N * N/2;
}
} else {
float E2 = - log(curand_uniform(&rstate));
if (U <= a1 + a2 + a3) {
Y = delta1 + 2*sigma1*sigma1*E1/delta1;
X = floor(Y);
V = - E2 - delta1*Y/(2*sigma1*sigma1) + delta1/(n*(1-p));
} else {
Y = delta2 + 2*sigma2*sigma2*E1/delta2;
X = floor(-Y);
V = - E2 - delta2*Y/(2*sigma2*sigma2);
}
}
if (X < - np || X > n * (1-p)) continue;
if (V > lgamma(np + X) - lgamma(np)) continue;
break;
}
Out[j] = (int)X;
}
}
int binornd(int nvals, float *A, int *C, int *Out) {
int nthreads = min(nvals, 1024);
int nblocks = min(128, 1 + (nvals-1)/nthreads);
curandState *rstates;
int err = cudaMalloc(( void **)& rstates , nthreads * nblocks * sizeof(curandState));
if (err > 0) {
fprintf(stderr, "Error in cudaMalloc %d", err);
return err;
}
cudaDeviceSynchronize();
__randinit<<<nblocks,nthreads>>>(rstates);
cudaDeviceSynchronize();
__binornd<<<nblocks,nthreads>>>(nvals, A, C, Out, rstates);
cudaDeviceSynchronize();
cudaFree(rstates);
err = cudaGetLastError();
return err;
}
int collectLVec(long long *pakeys, unsigned int *pavals, long long *pokeys, unsigned int *povals, int n) {
thrust::device_ptr<long long> akeys(pakeys);
thrust::device_ptr<long long> okeys(pokeys);
thrust::device_ptr<unsigned int> avals(pavals);
thrust::device_ptr<unsigned int> ovals(povals);
thrust::pair<thrust::device_ptr<long long>, thrust::device_ptr<unsigned int> > new_end;
new_end = thrust::reduce_by_key(akeys, akeys + n, avals, okeys, ovals);
int len = new_end.first - okeys;
return len;
}
int mergeLVecs(long long *pakeys, unsigned int *pavals, long long *pbkeys, unsigned int *pbvals, long long *pokeys, unsigned int *povals, int n1, int n2) {
thrust::device_ptr<long long> akeys(pakeys);
thrust::device_ptr<long long> bkeys(pbkeys);
thrust::device_ptr<long long> okeys(pokeys);
thrust::device_ptr<unsigned int> avals(pavals);
thrust::device_ptr<unsigned int> bvals(pbvals);
thrust::device_ptr<unsigned int> ovals(povals);
thrust::merge_by_key(akeys, akeys+n1, bkeys, bkeys+n2, avals, bvals, okeys, ovals);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
|
8f084c75545201726743ba39942ccef8c8c6beca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
__host__ __device__
double getValue(int N, int M, int row, int col, double* List) {
int index = row * M + col;
return List[index];
}
__host__ __device__
double getRowIndex(int N, int M, int index) {
return (int)(index / M);
}
__host__ __device__
int getColIndex(int N, int M, int index) {
return (int)(index % M);
}
__host__ __device__
void getMulti(int N, int M, int K, int index, double* A, double* B, double* C) {
C[index] = 0.;
int row = getRowIndex(N, K, index);
int col = getColIndex(N, K, index);
for (int i = 0; i < M; i++) {
double a = getValue(N, M, row, i, A);
double b = getValue(M, K, i, col, B);
C[index] += a * b;
}
}
__global__
void Kernel(int N, int M, int K, double* A, double* B, double* C) {
int index = 0;
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < N * K) {
getMulti(N, M, K, id, A, B, C);
}
} | 8f084c75545201726743ba39942ccef8c8c6beca.cu | #include "kernel.cuh"
__host__ __device__
double getValue(int N, int M, int row, int col, double* List) {
int index = row * M + col;
return List[index];
}
__host__ __device__
double getRowIndex(int N, int M, int index) {
return (int)(index / M);
}
__host__ __device__
int getColIndex(int N, int M, int index) {
return (int)(index % M);
}
__host__ __device__
void getMulti(int N, int M, int K, int index, double* A, double* B, double* C) {
C[index] = 0.;
int row = getRowIndex(N, K, index);
int col = getColIndex(N, K, index);
for (int i = 0; i < M; i++) {
double a = getValue(N, M, row, i, A);
double b = getValue(M, K, i, col, B);
C[index] += a * b;
}
}
__global__
void Kernel(int N, int M, int K, double* A, double* B, double* C) {
int index = 0;
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < N * K) {
getMulti(N, M, K, id, A, B, C);
}
} |
c0a8dd0e7d72a17c937348327c2219b3397217db.hip | // !!! This is a file automatically generated by hipify!!!
/*
Implements the various scatter operations on cusp vectors
*/
#include <petscconf.h>
PETSC_CUDA_EXTERN_C_BEGIN
#include <petsc-private/vecimpl.h> /*I "petscvec.h" I*/
#include <../src/vec/vec/impls/dvecimpl.h>
PETSC_CUDA_EXTERN_C_END
#include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h>
#include <hip/hip_runtime.h>
#undef __FUNCT__
#define __FUNCT__ "VecScatterCUSPIndicesCreate_StoS"
PetscErrorCode VecScatterCUSPIndicesCreate_StoS(PetscInt n,PetscInt toFirst,PetscInt fromFirst,PetscInt toStep, PetscInt fromStep,PetscInt *tslots, PetscInt *fslots,PetscCUSPIndices *ci) {
PetscCUSPIndices cci;
VecScatterCUSPIndices_StoS stos_scatter;
hipError_t err = hipSuccess;
hipStream_t stream;
PetscInt *intVecGPU;
int device;
hipDeviceProp_t props;
PetscFunctionBegin;
cci = new struct _p_PetscCUSPIndices;
stos_scatter = new struct _p_VecScatterCUSPIndices_StoS;
/* create the "from" indices */
stos_scatter->fslots = 0;
stos_scatter->fromFirst = 0;
stos_scatter->fromStep = 0;
if (n) {
if (fslots) {
/* allocate GPU memory for the to-slots */
err = hipMalloc((void **)&intVecGPU,n*sizeof(PetscInt));CHKERRCUSP((int)err);
err = hipMemcpy(intVecGPU,fslots,n*sizeof(PetscInt),hipMemcpyHostToDevice);CHKERRCUSP((int)err);
/* assign the pointer to the struct */
stos_scatter->fslots = intVecGPU;
stos_scatter->fromMode = VEC_SCATTER_CUSP_GENERAL;
} else if (fromStep) {
stos_scatter->fromFirst = fromFirst;
stos_scatter->fromStep = fromStep;
stos_scatter->fromMode = VEC_SCATTER_CUSP_STRIDED;
}
}
/* create the "to" indices */
stos_scatter->tslots = 0;
stos_scatter->toFirst = 0;
stos_scatter->toStep = 0;
if (n) {
if (tslots) {
/* allocate GPU memory for the to-slots */
err = hipMalloc((void **)&intVecGPU,n*sizeof(PetscInt));CHKERRCUSP((int)err);
err = hipMemcpy(intVecGPU,tslots,n*sizeof(PetscInt),hipMemcpyHostToDevice);CHKERRCUSP((int)err);
/* assign the pointer to the struct */
stos_scatter->tslots = intVecGPU;
stos_scatter->toMode = VEC_SCATTER_CUSP_GENERAL;
} else if (toStep) {
stos_scatter->toFirst = toFirst;
stos_scatter->toStep = toStep;
stos_scatter->toMode = VEC_SCATTER_CUSP_STRIDED;
}
}
/* allocate the stream variable */
err = hipStreamCreate(&stream);CHKERRCUSP((int)err);
stos_scatter->stream = stream;
/* the number of indices */
stos_scatter->n = n;
/* get the maximum number of coresident thread blocks */
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
stos_scatter->MAX_CORESIDENT_THREADS = props.maxThreadsPerMultiProcessor;
if (props.major>=3) {
stos_scatter->MAX_BLOCKS = 16*props.multiProcessorCount;
} else {
stos_scatter->MAX_BLOCKS = 8*props.multiProcessorCount;
}
/* assign the indices */
cci->scatter = (VecScatterCUSPIndices_StoS)stos_scatter;
cci->scatterType = VEC_SCATTER_CUSP_STOS;
*ci = cci;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecScatterCUSPIndicesCreate_PtoP"
PetscErrorCode VecScatterCUSPIndicesCreate_PtoP(PetscInt ns,PetscInt *sendIndices,PetscInt nr,PetscInt *recvIndices,PetscCUSPIndices *ci)
{
PetscCUSPIndices cci;
VecScatterCUSPIndices_PtoP ptop_scatter;
PetscFunctionBegin;
cci = new struct _p_PetscCUSPIndices;
ptop_scatter = new struct _p_VecScatterCUSPIndices_PtoP;
/* this calculation assumes that the input indices are sorted */
ptop_scatter->ns = sendIndices[ns-1]-sendIndices[0]+1;
ptop_scatter->sendLowestIndex = sendIndices[0];
ptop_scatter->nr = recvIndices[nr-1]-recvIndices[0]+1;
ptop_scatter->recvLowestIndex = recvIndices[0];
/* assign indices */
cci->scatter = (VecScatterCUSPIndices_PtoP)ptop_scatter;
cci->scatterType = VEC_SCATTER_CUSP_PTOP;
*ci = cci;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecScatterCUSPIndicesDestroy"
PetscErrorCode VecScatterCUSPIndicesDestroy(PetscCUSPIndices *ci)
{
PetscFunctionBegin;
if (!(*ci)) PetscFunctionReturn(0);
try {
if (ci) {
if ((*ci)->scatterType == VEC_SCATTER_CUSP_PTOP) {
delete (VecScatterCUSPIndices_PtoP)(*ci)->scatter;
(*ci)->scatter = 0;
} else {
hipError_t err = hipSuccess;
VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)(*ci)->scatter;
if (stos_scatter->fslots) {
err = hipFree(stos_scatter->fslots);CHKERRCUSP((int)err);
stos_scatter->fslots = 0;
}
/* free the GPU memory for the to-slots */
if (stos_scatter->tslots) {
err = hipFree(stos_scatter->tslots);CHKERRCUSP((int)err);
stos_scatter->tslots = 0;
}
/* free the stream variable */
if (stos_scatter->stream) {
err = hipStreamDestroy(stos_scatter->stream);CHKERRCUSP((int)err);
stos_scatter->stream = 0;
}
delete stos_scatter;
(*ci)->scatter = 0;
}
delete *ci;
*ci = 0;
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex);
}
PetscFunctionReturn(0);
}
/* Insert operator */
class Insert {
public:
__device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const {
return a;
}
};
/* Add operator */
class Add {
public:
__device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const {
return a+b;
}
};
/* Add operator */
class Max {
public:
__device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const {
#if !defined(PETSC_USE_COMPLEX)
return PetscMax(a,b);
#endif
}
};
/* Sequential general to sequential general GPU kernel */
template<class OPERATOR>
__global__ void VecScatterCUSP_SGtoSG_kernel(PetscInt n,PetscInt *xind,PetscScalar *x,PetscInt *yind,PetscScalar *y,OPERATOR OP) {
const int tidx = blockIdx.x*blockDim.x + threadIdx.x;
const int grid_size = gridDim.x * blockDim.x;
for (int i = tidx; i < n; i += grid_size) {
y[yind[i]] = OP(x[xind[i]],y[yind[i]]);
}
}
/* Sequential general to sequential strided GPU kernel */
template<class OPERATOR>
__global__ void VecScatterCUSP_SGtoSS_kernel(PetscInt n,PetscInt *xind,PetscScalar *x,PetscInt toFirst,PetscInt toStep,PetscScalar *y,OPERATOR OP) {
const int tidx = blockIdx.x*blockDim.x + threadIdx.x;
const int grid_size = gridDim.x * blockDim.x;
for (int i = tidx; i < n; i += grid_size) {
y[toFirst+i*toStep] = OP(x[xind[i]],y[toFirst+i*toStep]);
}
}
/* Sequential strided to sequential strided GPU kernel */
template<class OPERATOR>
__global__ void VecScatterCUSP_SStoSS_kernel(PetscInt n,PetscInt fromFirst,PetscInt fromStep,PetscScalar *x,PetscInt toFirst,PetscInt toStep,PetscScalar *y,OPERATOR OP) {
const int tidx = blockIdx.x*blockDim.x + threadIdx.x;
const int grid_size = gridDim.x * blockDim.x;
for (int i = tidx; i < n; i += grid_size) {
y[toFirst+i*toStep] = OP(x[fromFirst+i*fromStep],y[toFirst+i*toStep]);
}
}
/* Sequential strided to sequential general GPU kernel */
template<class OPERATOR>
__global__ void VecScatterCUSP_SStoSG_kernel(PetscInt n,PetscInt fromFirst,PetscInt fromStep,PetscScalar *x,PetscInt *yind,PetscScalar *y,OPERATOR OP) {
const int tidx = blockIdx.x*blockDim.x + threadIdx.x;
const int grid_size = gridDim.x * blockDim.x;
for (int i = tidx; i < n; i += grid_size) {
y[yind[i]] = OP(x[fromFirst+i*fromStep],y[yind[i]]);
}
}
template<class OPERATOR>
void VecScatterCUSP_StoS_Dispatcher(CUSPARRAY *xarray,CUSPARRAY *yarray,PetscCUSPIndices ci,ScatterMode mode,OPERATOR OP) {
PetscInt nBlocks=0,nThreads=128;
VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)ci->scatter;
nBlocks=(int)ceil(((float) stos_scatter->n)/((float) nThreads))+1;
if (nBlocks>stos_scatter->MAX_CORESIDENT_THREADS/nThreads) {
nBlocks = stos_scatter->MAX_CORESIDENT_THREADS/nThreads;
}
dim3 block(nThreads,1,1);
dim3 grid(nBlocks,1,1);
if (mode == SCATTER_FORWARD) {
if (stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL) {
hipLaunchKernelGGL(( VecScatterCUSP_SGtoSG_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->fslots,xarray->data().get(),stos_scatter->tslots,yarray->data().get(),OP);
} else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED) {
hipLaunchKernelGGL(( VecScatterCUSP_SGtoSS_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->fslots,xarray->data().get(),stos_scatter->toFirst,stos_scatter->toStep,yarray->data().get(),OP);
} else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED) {
hipLaunchKernelGGL(( VecScatterCUSP_SStoSS_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->fromFirst,stos_scatter->fromStep,xarray->data().get(),stos_scatter->toFirst,stos_scatter->toStep,yarray->data().get(),OP);
} else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL) {
hipLaunchKernelGGL(( VecScatterCUSP_SStoSG_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->fromFirst,stos_scatter->fromStep,xarray->data().get(),stos_scatter->tslots,yarray->data().get(),OP);
}
} else {
if (stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL) {
hipLaunchKernelGGL(( VecScatterCUSP_SGtoSG_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->tslots,xarray->data().get(),stos_scatter->fslots,yarray->data().get(),OP);
} else if (stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED) {
hipLaunchKernelGGL(( VecScatterCUSP_SGtoSS_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->tslots,xarray->data().get(),stos_scatter->fromFirst,stos_scatter->fromStep,yarray->data().get(),OP);
} else if (stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED) {
hipLaunchKernelGGL(( VecScatterCUSP_SStoSS_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->toFirst,stos_scatter->toStep,xarray->data().get(),stos_scatter->fromFirst,stos_scatter->fromStep,yarray->data().get(),OP);
} else if (stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL) {
hipLaunchKernelGGL(( VecScatterCUSP_SStoSG_kernel), dim3(grid),dim3(block),0,stos_scatter->stream, stos_scatter->n,stos_scatter->toFirst,stos_scatter->toStep,xarray->data().get(),stos_scatter->fslots,yarray->data().get(),OP);
}
}
}
#undef __FUNCT__
#define __FUNCT__ "VecScatterCUSP_StoS"
PetscErrorCode VecScatterCUSP_StoS(Vec x,Vec y,PetscCUSPIndices ci,InsertMode addv,ScatterMode mode)
{
PetscErrorCode ierr;
CUSPARRAY *xarray,*yarray;
VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)ci->scatter;
hipError_t err = hipSuccess;
PetscFunctionBegin;
ierr = VecCUSPAllocateCheck(x);CHKERRQ(ierr);
ierr = VecCUSPAllocateCheck(y);CHKERRQ(ierr);
ierr = VecCUSPGetArrayRead(x,&xarray);CHKERRQ(ierr);
ierr = VecCUSPGetArrayWrite(y,&yarray);CHKERRQ(ierr);
if (stos_scatter->n) {
if (addv == INSERT_VALUES)
VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Insert());
else if (addv == ADD_VALUES)
VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Add());
#if !defined(PETSC_USE_COMPLEX)
else if (addv == MAX_VALUES)
VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Max());
#endif
else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");
err = hipGetLastError();CHKERRCUSP((int)err);
err = hipStreamSynchronize(stos_scatter->stream);CHKERRCUSP((int)err);
}
ierr = VecCUSPRestoreArrayRead(x,&xarray);CHKERRQ(ierr);
ierr = VecCUSPRestoreArrayWrite(y,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
| c0a8dd0e7d72a17c937348327c2219b3397217db.cu | /*
Implements the various scatter operations on cusp vectors
*/
#include <petscconf.h>
PETSC_CUDA_EXTERN_C_BEGIN
#include <petsc-private/vecimpl.h> /*I "petscvec.h" I*/
#include <../src/vec/vec/impls/dvecimpl.h>
PETSC_CUDA_EXTERN_C_END
#include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h>
#include <cuda_runtime.h>
#undef __FUNCT__
#define __FUNCT__ "VecScatterCUSPIndicesCreate_StoS"
PetscErrorCode VecScatterCUSPIndicesCreate_StoS(PetscInt n,PetscInt toFirst,PetscInt fromFirst,PetscInt toStep, PetscInt fromStep,PetscInt *tslots, PetscInt *fslots,PetscCUSPIndices *ci) {
PetscCUSPIndices cci;
VecScatterCUSPIndices_StoS stos_scatter;
cudaError_t err = cudaSuccess;
cudaStream_t stream;
PetscInt *intVecGPU;
int device;
cudaDeviceProp props;
PetscFunctionBegin;
cci = new struct _p_PetscCUSPIndices;
stos_scatter = new struct _p_VecScatterCUSPIndices_StoS;
/* create the "from" indices */
stos_scatter->fslots = 0;
stos_scatter->fromFirst = 0;
stos_scatter->fromStep = 0;
if (n) {
if (fslots) {
/* allocate GPU memory for the to-slots */
err = cudaMalloc((void **)&intVecGPU,n*sizeof(PetscInt));CHKERRCUSP((int)err);
err = cudaMemcpy(intVecGPU,fslots,n*sizeof(PetscInt),cudaMemcpyHostToDevice);CHKERRCUSP((int)err);
/* assign the pointer to the struct */
stos_scatter->fslots = intVecGPU;
stos_scatter->fromMode = VEC_SCATTER_CUSP_GENERAL;
} else if (fromStep) {
stos_scatter->fromFirst = fromFirst;
stos_scatter->fromStep = fromStep;
stos_scatter->fromMode = VEC_SCATTER_CUSP_STRIDED;
}
}
/* create the "to" indices */
stos_scatter->tslots = 0;
stos_scatter->toFirst = 0;
stos_scatter->toStep = 0;
if (n) {
if (tslots) {
/* allocate GPU memory for the to-slots */
err = cudaMalloc((void **)&intVecGPU,n*sizeof(PetscInt));CHKERRCUSP((int)err);
err = cudaMemcpy(intVecGPU,tslots,n*sizeof(PetscInt),cudaMemcpyHostToDevice);CHKERRCUSP((int)err);
/* assign the pointer to the struct */
stos_scatter->tslots = intVecGPU;
stos_scatter->toMode = VEC_SCATTER_CUSP_GENERAL;
} else if (toStep) {
stos_scatter->toFirst = toFirst;
stos_scatter->toStep = toStep;
stos_scatter->toMode = VEC_SCATTER_CUSP_STRIDED;
}
}
/* allocate the stream variable */
err = cudaStreamCreate(&stream);CHKERRCUSP((int)err);
stos_scatter->stream = stream;
/* the number of indices */
stos_scatter->n = n;
/* get the maximum number of coresident thread blocks */
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
stos_scatter->MAX_CORESIDENT_THREADS = props.maxThreadsPerMultiProcessor;
if (props.major>=3) {
stos_scatter->MAX_BLOCKS = 16*props.multiProcessorCount;
} else {
stos_scatter->MAX_BLOCKS = 8*props.multiProcessorCount;
}
/* assign the indices */
cci->scatter = (VecScatterCUSPIndices_StoS)stos_scatter;
cci->scatterType = VEC_SCATTER_CUSP_STOS;
*ci = cci;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecScatterCUSPIndicesCreate_PtoP"
PetscErrorCode VecScatterCUSPIndicesCreate_PtoP(PetscInt ns,PetscInt *sendIndices,PetscInt nr,PetscInt *recvIndices,PetscCUSPIndices *ci)
{
PetscCUSPIndices cci;
VecScatterCUSPIndices_PtoP ptop_scatter;
PetscFunctionBegin;
cci = new struct _p_PetscCUSPIndices;
ptop_scatter = new struct _p_VecScatterCUSPIndices_PtoP;
/* this calculation assumes that the input indices are sorted */
ptop_scatter->ns = sendIndices[ns-1]-sendIndices[0]+1;
ptop_scatter->sendLowestIndex = sendIndices[0];
ptop_scatter->nr = recvIndices[nr-1]-recvIndices[0]+1;
ptop_scatter->recvLowestIndex = recvIndices[0];
/* assign indices */
cci->scatter = (VecScatterCUSPIndices_PtoP)ptop_scatter;
cci->scatterType = VEC_SCATTER_CUSP_PTOP;
*ci = cci;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "VecScatterCUSPIndicesDestroy"
PetscErrorCode VecScatterCUSPIndicesDestroy(PetscCUSPIndices *ci)
{
PetscFunctionBegin;
if (!(*ci)) PetscFunctionReturn(0);
try {
if (ci) {
if ((*ci)->scatterType == VEC_SCATTER_CUSP_PTOP) {
delete (VecScatterCUSPIndices_PtoP)(*ci)->scatter;
(*ci)->scatter = 0;
} else {
cudaError_t err = cudaSuccess;
VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)(*ci)->scatter;
if (stos_scatter->fslots) {
err = cudaFree(stos_scatter->fslots);CHKERRCUSP((int)err);
stos_scatter->fslots = 0;
}
/* free the GPU memory for the to-slots */
if (stos_scatter->tslots) {
err = cudaFree(stos_scatter->tslots);CHKERRCUSP((int)err);
stos_scatter->tslots = 0;
}
/* free the stream variable */
if (stos_scatter->stream) {
err = cudaStreamDestroy(stos_scatter->stream);CHKERRCUSP((int)err);
stos_scatter->stream = 0;
}
delete stos_scatter;
(*ci)->scatter = 0;
}
delete *ci;
*ci = 0;
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex);
}
PetscFunctionReturn(0);
}
/* Insert operator */
class Insert {
public:
__device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const {
return a;
}
};
/* Add operator */
class Add {
public:
__device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const {
return a+b;
}
};
/* Add operator */
class Max {
public:
__device__ PetscScalar operator() (PetscScalar a,PetscScalar b) const {
#if !defined(PETSC_USE_COMPLEX)
return PetscMax(a,b);
#endif
}
};
/* Sequential general to sequential general GPU kernel */
template<class OPERATOR>
__global__ void VecScatterCUSP_SGtoSG_kernel(PetscInt n,PetscInt *xind,PetscScalar *x,PetscInt *yind,PetscScalar *y,OPERATOR OP) {
const int tidx = blockIdx.x*blockDim.x + threadIdx.x;
const int grid_size = gridDim.x * blockDim.x;
for (int i = tidx; i < n; i += grid_size) {
y[yind[i]] = OP(x[xind[i]],y[yind[i]]);
}
}
/* Sequential general to sequential strided GPU kernel */
template<class OPERATOR>
__global__ void VecScatterCUSP_SGtoSS_kernel(PetscInt n,PetscInt *xind,PetscScalar *x,PetscInt toFirst,PetscInt toStep,PetscScalar *y,OPERATOR OP) {
const int tidx = blockIdx.x*blockDim.x + threadIdx.x;
const int grid_size = gridDim.x * blockDim.x;
for (int i = tidx; i < n; i += grid_size) {
y[toFirst+i*toStep] = OP(x[xind[i]],y[toFirst+i*toStep]);
}
}
/* Sequential strided to sequential strided GPU kernel */
template<class OPERATOR>
__global__ void VecScatterCUSP_SStoSS_kernel(PetscInt n,PetscInt fromFirst,PetscInt fromStep,PetscScalar *x,PetscInt toFirst,PetscInt toStep,PetscScalar *y,OPERATOR OP) {
const int tidx = blockIdx.x*blockDim.x + threadIdx.x;
const int grid_size = gridDim.x * blockDim.x;
for (int i = tidx; i < n; i += grid_size) {
y[toFirst+i*toStep] = OP(x[fromFirst+i*fromStep],y[toFirst+i*toStep]);
}
}
/* Sequential strided to sequential general GPU kernel */
template<class OPERATOR>
__global__ void VecScatterCUSP_SStoSG_kernel(PetscInt n,PetscInt fromFirst,PetscInt fromStep,PetscScalar *x,PetscInt *yind,PetscScalar *y,OPERATOR OP) {
const int tidx = blockIdx.x*blockDim.x + threadIdx.x;
const int grid_size = gridDim.x * blockDim.x;
for (int i = tidx; i < n; i += grid_size) {
y[yind[i]] = OP(x[fromFirst+i*fromStep],y[yind[i]]);
}
}
template<class OPERATOR>
void VecScatterCUSP_StoS_Dispatcher(CUSPARRAY *xarray,CUSPARRAY *yarray,PetscCUSPIndices ci,ScatterMode mode,OPERATOR OP) {
PetscInt nBlocks=0,nThreads=128;
VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)ci->scatter;
nBlocks=(int)ceil(((float) stos_scatter->n)/((float) nThreads))+1;
if (nBlocks>stos_scatter->MAX_CORESIDENT_THREADS/nThreads) {
nBlocks = stos_scatter->MAX_CORESIDENT_THREADS/nThreads;
}
dim3 block(nThreads,1,1);
dim3 grid(nBlocks,1,1);
if (mode == SCATTER_FORWARD) {
if (stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL) {
VecScatterCUSP_SGtoSG_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->fslots,xarray->data().get(),stos_scatter->tslots,yarray->data().get(),OP);
} else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED) {
VecScatterCUSP_SGtoSS_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->fslots,xarray->data().get(),stos_scatter->toFirst,stos_scatter->toStep,yarray->data().get(),OP);
} else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED) {
VecScatterCUSP_SStoSS_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->fromFirst,stos_scatter->fromStep,xarray->data().get(),stos_scatter->toFirst,stos_scatter->toStep,yarray->data().get(),OP);
} else if (stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL) {
VecScatterCUSP_SStoSG_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->fromFirst,stos_scatter->fromStep,xarray->data().get(),stos_scatter->tslots,yarray->data().get(),OP);
}
} else {
if (stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL) {
VecScatterCUSP_SGtoSG_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->tslots,xarray->data().get(),stos_scatter->fslots,yarray->data().get(),OP);
} else if (stos_scatter->toMode == VEC_SCATTER_CUSP_GENERAL && stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED) {
VecScatterCUSP_SGtoSS_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->tslots,xarray->data().get(),stos_scatter->fromFirst,stos_scatter->fromStep,yarray->data().get(),OP);
} else if (stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->fromMode == VEC_SCATTER_CUSP_STRIDED) {
VecScatterCUSP_SStoSS_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->toFirst,stos_scatter->toStep,xarray->data().get(),stos_scatter->fromFirst,stos_scatter->fromStep,yarray->data().get(),OP);
} else if (stos_scatter->toMode == VEC_SCATTER_CUSP_STRIDED && stos_scatter->fromMode == VEC_SCATTER_CUSP_GENERAL) {
VecScatterCUSP_SStoSG_kernel<<<grid,block,0,stos_scatter->stream>>>(stos_scatter->n,stos_scatter->toFirst,stos_scatter->toStep,xarray->data().get(),stos_scatter->fslots,yarray->data().get(),OP);
}
}
}
#undef __FUNCT__
#define __FUNCT__ "VecScatterCUSP_StoS"
PetscErrorCode VecScatterCUSP_StoS(Vec x,Vec y,PetscCUSPIndices ci,InsertMode addv,ScatterMode mode)
{
PetscErrorCode ierr;
CUSPARRAY *xarray,*yarray;
VecScatterCUSPIndices_StoS stos_scatter = (VecScatterCUSPIndices_StoS)ci->scatter;
cudaError_t err = cudaSuccess;
PetscFunctionBegin;
ierr = VecCUSPAllocateCheck(x);CHKERRQ(ierr);
ierr = VecCUSPAllocateCheck(y);CHKERRQ(ierr);
ierr = VecCUSPGetArrayRead(x,&xarray);CHKERRQ(ierr);
ierr = VecCUSPGetArrayWrite(y,&yarray);CHKERRQ(ierr);
if (stos_scatter->n) {
if (addv == INSERT_VALUES)
VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Insert());
else if (addv == ADD_VALUES)
VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Add());
#if !defined(PETSC_USE_COMPLEX)
else if (addv == MAX_VALUES)
VecScatterCUSP_StoS_Dispatcher(xarray,yarray,ci,mode,Max());
#endif
else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");
err = cudaGetLastError();CHKERRCUSP((int)err);
err = cudaStreamSynchronize(stos_scatter->stream);CHKERRCUSP((int)err);
}
ierr = VecCUSPRestoreArrayRead(x,&xarray);CHKERRQ(ierr);
ierr = VecCUSPRestoreArrayWrite(y,&yarray);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
b433a8598359474b008e58896cf8f9d9f87ba914.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.hip"
#include "support.cu"
int main (int argc, char *argv[])
{
//set standard seed
srand(217);
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned VecSize;
dim3 dim_grid, dim_block;
if (argc == 1) {
VecSize = 1000;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
}
else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
A_sz = VecSize;
B_sz = VecSize;
C_sz = VecSize;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, VecSize);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
return 0;
}
| b433a8598359474b008e58896cf8f9d9f87ba914.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.cu"
int main (int argc, char *argv[])
{
//set standard seed
srand(217);
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned VecSize;
dim3 dim_grid, dim_block;
if (argc == 1) {
VecSize = 1000;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
}
else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
A_sz = VecSize;
B_sz = VecSize;
C_sz = VecSize;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, VecSize);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
return 0;
}
|
f72fe0f23ca4084d249f1d9ba9e67850f3a0f89a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_helper.h"
#include "cs_header.h"
#include "cs_block.h"
#include "cs_perm_mlseq.h"
#include "cs_expand.h"
#include "cs_interpolate.h"
#include "cs_perm_selection.h"
#include "cs_copy_box.h"
#include "cs_motion_detect.h"
#include "cs_motion_detect_v2.h"
#include "cs_edge_detect.h"
#include "cs_edge_detect_v2.h"
#include "cs_analysis.h"
#define CUDA_DBG
int *dp1 = NULL, *dp2 = NULL ;
int *hp1 = NULL, *hp2 = NULL ;
// #define BUF_SIZE ( 1024 * 1024 * 32 )
#define MAGIC_SIZE 33553920
#define BUF_SIZE ( MAGIC_SIZE + 100 )
struct cd {
int tid ;
// gridDim
int gdx ;
int gdy ;
int gdz ;
// blockIdx
int blkx ;
int blky ;
int blkz ;
// blockDim
int dx ;
int dy ;
int dz ;
// threadIdx
int tx ;
int ty ;
int tz ;
int cnt ;
} ;
#define CD_SIZE_E (sizeof( struct cd ) / sizeof(int))
enum {
T2DB_1DG = 1,
T3DB_1DG,
T3DB_2DG,
T3DB_3DG,
T1DB_1DG
} ;
void cuda_test_grid ( struct cd *d_a, int n ) ;
// #define TEST_LEN 128 // AAA 2D blocks ... 1D grid
#define TEST_LEN 576 // BBB 3D blocks ... 1D grid
__global__ void d_cuda_test_grid( struct cd *a, int size, int add, int ttt )
{
int blockid, tid ;
switch ( ttt ) {
case T1DB_1DG :
tid = blockIdx.x * blockDim.x + threadIdx.x ;
break ;
case T2DB_1DG :
tid = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x ; // AAA
break ;
case T3DB_1DG :
tid = blockIdx.x * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ; // BBB
break ;
case T3DB_3DG :
// NOTE: gridDim.z is never used in the calculation, but is used
// by the engine for size limitation ... pass this limit, the
// engine stop ...
blockid = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z *
( gridDim.x * gridDim.y ) ;
tid = blockid * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ;
break ;
case T3DB_2DG :
blockid = blockIdx.x + blockIdx.y * gridDim.x ;
tid = blockid * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ;
#ifdef CUDA_OBS
tid = blockIdx.x * gridDim.x * ( blockDim.x * blockDim.y * blockDim.z ) +
blockIdx.x * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ; // BBB
#endif
break ;
default :
tid = blockIdx.x * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ; // BBB
}
while ( tid < size )
{
a[tid].tid = tid ;
a[tid].blkx = blockIdx.x ;
a[tid].blky = blockIdx.y ;
a[tid].blkz = blockIdx.z ;
a[tid].tx = threadIdx.x ;
a[tid].ty = threadIdx.y ;
a[tid].tz = threadIdx.z ;
a[tid].gdx = gridDim.x ;
a[tid].gdy = gridDim.y ;
a[tid].gdz = gridDim.z ;
a[tid].dx = blockDim.x ;
a[tid].dy = blockDim.y ;
a[tid].dz = blockDim.z ;
a[tid].cnt++ ;
tid += size ;
}
}
__global__ void d_cuda_test_inc ( struct cd *a, int size, int add, int ttt )
{
int tid = blockIdx.x * blockDim.x + threadIdx.x ;
while ( tid < size )
{
a[tid].tid = tid ;
a[tid].blkx = blockIdx.x ;
a[tid].blky = blockIdx.y ;
a[tid].blkz = blockIdx.z ;
a[tid].cnt++ ;
tid += add ;
}
}
void
cuda_test_inc ( struct cd *d_a, int n, int ttt )
{
#ifdef CUDA_DBG
fprintf( stderr, "%s: dp %p cnt %d type %d\n", __func__, d_a, n, ttt ) ;
#endif
hipLaunchKernelGGL(( d_cuda_test_inc) , dim3(2), dim3(32) , 0, 0, d_a, n, 64, ttt ) ;
hipDeviceSynchronize() ;
}
void
cuda_test_grid ( struct cd *d_a, int n, int ttt )
{
dim3 threadsPerBlock(8,8,1) ;
dim3 nBlocks( 2, 2 ) ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: dp %p cnt %d type %d\n", __func__, d_a, n, ttt ) ;
#endif
switch ( ttt ) {
case T1DB_1DG :
hipLaunchKernelGGL(( d_cuda_test_grid) , dim3(2), dim3(32) , 0, 0, d_a, n, n, ttt ) ;
break ;
case T2DB_1DG :
threadsPerBlock.x = 8 ;
threadsPerBlock.y = 8 ;
threadsPerBlock.z = 1 ;
hipLaunchKernelGGL(( d_cuda_test_grid) , dim3(2), dim3(threadsPerBlock) , 0, 0, d_a, n, n, ttt ) ;
break ;
case T3DB_1DG:
threadsPerBlock.x = 4 ;
threadsPerBlock.y = 3 ;
threadsPerBlock.z = 2 ;
hipLaunchKernelGGL(( d_cuda_test_grid) , dim3(2), dim3(threadsPerBlock) , 0, 0, d_a, n, n, ttt ) ;
break ;
case T3DB_2DG:
threadsPerBlock.x = 4 ;
threadsPerBlock.y = 3 ;
threadsPerBlock.z = 2 ;
nBlocks.x = 3 ; // ( n / threadsPerBlock.x, n / threadsPerBlock.y ) ;
nBlocks.y = 2 ;
hipLaunchKernelGGL(( d_cuda_test_grid) , dim3(nBlocks), dim3(threadsPerBlock) , 0, 0, d_a, n, n, ttt ) ;
break ;
case T3DB_3DG:
threadsPerBlock.x = 4 ;
threadsPerBlock.y = 3 ;
threadsPerBlock.z = 2 ;
nBlocks.x = 3 ;
nBlocks.y = 2 ;
nBlocks.z = 4 ;
hipLaunchKernelGGL(( d_cuda_test_grid) , dim3(nBlocks), dim3(threadsPerBlock) , 0, 0, d_a, n, n, ttt ) ;
break ;
default :
exit( 33 ) ;
}
hipDeviceSynchronize() ;
}
main( int ac, char *av[] )
{
int i, k, *dp ;
setbuf( stdout, NULL ) ;
setbuf( stderr, NULL ) ;
i = BUF_SIZE * sizeof ( struct cd ) ;
if (( k = hipMalloc( &dp, i )) != hipSuccess )
{
printf("%s: cube alloc failed %d \n", __func__, k ) ;
exit ( 0 ) ;
}
printf("size %d\n", i ) ;
set_device_mem_i ( dp, TEST_LEN * CD_SIZE_E * sizeof ( int ) + 200, 1 ) ;
dbg_init( 1024 * 1024 ) ;
// test of increments of tid in each thread
cuda_test_inc (( struct cd *) dp, TEST_LEN, T1DB_1DG ) ;
dbg_p_d_data_i_mn ("done", dp, CD_SIZE_E * TEST_LEN, CD_SIZE_E, TEST_LEN, CD_SIZE_E ) ;
#ifdef CUDA_OBS
// test of thread/block/grid ...
cuda_test_grid (( struct cd *) dp, TEST_LEN, T1DB_1DG ) ;
dbg_p_d_data_i_mn ("done", dp, CD_SIZE_E * TEST_LEN, CD_SIZE_E, TEST_LEN, CD_SIZE_E ) ;
#endif
}
| f72fe0f23ca4084d249f1d9ba9e67850f3a0f89a.cu | #include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_helper.h"
#include "cs_header.h"
#include "cs_block.h"
#include "cs_perm_mlseq.h"
#include "cs_expand.h"
#include "cs_interpolate.h"
#include "cs_perm_selection.h"
#include "cs_copy_box.h"
#include "cs_motion_detect.h"
#include "cs_motion_detect_v2.h"
#include "cs_edge_detect.h"
#include "cs_edge_detect_v2.h"
#include "cs_analysis.h"
#define CUDA_DBG
int *dp1 = NULL, *dp2 = NULL ;
int *hp1 = NULL, *hp2 = NULL ;
// #define BUF_SIZE ( 1024 * 1024 * 32 )
#define MAGIC_SIZE 33553920
#define BUF_SIZE ( MAGIC_SIZE + 100 )
struct cd {
int tid ;
// gridDim
int gdx ;
int gdy ;
int gdz ;
// blockIdx
int blkx ;
int blky ;
int blkz ;
// blockDim
int dx ;
int dy ;
int dz ;
// threadIdx
int tx ;
int ty ;
int tz ;
int cnt ;
} ;
#define CD_SIZE_E (sizeof( struct cd ) / sizeof(int))
enum {
T2DB_1DG = 1,
T3DB_1DG,
T3DB_2DG,
T3DB_3DG,
T1DB_1DG
} ;
void cuda_test_grid ( struct cd *d_a, int n ) ;
// #define TEST_LEN 128 // AAA 2D blocks ... 1D grid
#define TEST_LEN 576 // BBB 3D blocks ... 1D grid
__global__ void d_cuda_test_grid( struct cd *a, int size, int add, int ttt )
{
int blockid, tid ;
switch ( ttt ) {
case T1DB_1DG :
tid = blockIdx.x * blockDim.x + threadIdx.x ;
break ;
case T2DB_1DG :
tid = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x ; // AAA
break ;
case T3DB_1DG :
tid = blockIdx.x * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ; // BBB
break ;
case T3DB_3DG :
// NOTE: gridDim.z is never used in the calculation, but is used
// by the engine for size limitation ... pass this limit, the
// engine stop ...
blockid = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z *
( gridDim.x * gridDim.y ) ;
tid = blockid * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ;
break ;
case T3DB_2DG :
blockid = blockIdx.x + blockIdx.y * gridDim.x ;
tid = blockid * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ;
#ifdef CUDA_OBS
tid = blockIdx.x * gridDim.x * ( blockDim.x * blockDim.y * blockDim.z ) +
blockIdx.x * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ; // BBB
#endif
break ;
default :
tid = blockIdx.x * ( blockDim.x * blockDim.y * blockDim.z ) +
threadIdx.z * ( blockDim.x * blockDim.y ) +
threadIdx.y * blockDim.x + threadIdx.x ; // BBB
}
while ( tid < size )
{
a[tid].tid = tid ;
a[tid].blkx = blockIdx.x ;
a[tid].blky = blockIdx.y ;
a[tid].blkz = blockIdx.z ;
a[tid].tx = threadIdx.x ;
a[tid].ty = threadIdx.y ;
a[tid].tz = threadIdx.z ;
a[tid].gdx = gridDim.x ;
a[tid].gdy = gridDim.y ;
a[tid].gdz = gridDim.z ;
a[tid].dx = blockDim.x ;
a[tid].dy = blockDim.y ;
a[tid].dz = blockDim.z ;
a[tid].cnt++ ;
tid += size ;
}
}
__global__ void d_cuda_test_inc ( struct cd *a, int size, int add, int ttt )
{
int tid = blockIdx.x * blockDim.x + threadIdx.x ;
while ( tid < size )
{
a[tid].tid = tid ;
a[tid].blkx = blockIdx.x ;
a[tid].blky = blockIdx.y ;
a[tid].blkz = blockIdx.z ;
a[tid].cnt++ ;
tid += add ;
}
}
void
cuda_test_inc ( struct cd *d_a, int n, int ttt )
{
#ifdef CUDA_DBG
fprintf( stderr, "%s: dp %p cnt %d type %d\n", __func__, d_a, n, ttt ) ;
#endif
d_cuda_test_inc <<< 2, 32 >>> (d_a, n, 64, ttt ) ;
cudaThreadSynchronize() ;
}
void
cuda_test_grid ( struct cd *d_a, int n, int ttt )
{
dim3 threadsPerBlock(8,8,1) ;
dim3 nBlocks( 2, 2 ) ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: dp %p cnt %d type %d\n", __func__, d_a, n, ttt ) ;
#endif
switch ( ttt ) {
case T1DB_1DG :
d_cuda_test_grid <<< 2, 32 >>> (d_a, n, n, ttt ) ;
break ;
case T2DB_1DG :
threadsPerBlock.x = 8 ;
threadsPerBlock.y = 8 ;
threadsPerBlock.z = 1 ;
d_cuda_test_grid <<< 2, threadsPerBlock >>> (d_a, n, n, ttt ) ;
break ;
case T3DB_1DG:
threadsPerBlock.x = 4 ;
threadsPerBlock.y = 3 ;
threadsPerBlock.z = 2 ;
d_cuda_test_grid <<< 2, threadsPerBlock >>> (d_a, n, n, ttt ) ;
break ;
case T3DB_2DG:
threadsPerBlock.x = 4 ;
threadsPerBlock.y = 3 ;
threadsPerBlock.z = 2 ;
nBlocks.x = 3 ; // ( n / threadsPerBlock.x, n / threadsPerBlock.y ) ;
nBlocks.y = 2 ;
d_cuda_test_grid <<< nBlocks, threadsPerBlock >>> (d_a, n, n, ttt ) ;
break ;
case T3DB_3DG:
threadsPerBlock.x = 4 ;
threadsPerBlock.y = 3 ;
threadsPerBlock.z = 2 ;
nBlocks.x = 3 ;
nBlocks.y = 2 ;
nBlocks.z = 4 ;
d_cuda_test_grid <<< nBlocks, threadsPerBlock >>> (d_a, n, n, ttt ) ;
break ;
default :
exit( 33 ) ;
}
cudaThreadSynchronize() ;
}
main( int ac, char *av[] )
{
int i, k, *dp ;
setbuf( stdout, NULL ) ;
setbuf( stderr, NULL ) ;
i = BUF_SIZE * sizeof ( struct cd ) ;
if (( k = cudaMalloc( &dp, i )) != cudaSuccess )
{
printf("%s: cube alloc failed %d \n", __func__, k ) ;
exit ( 0 ) ;
}
printf("size %d\n", i ) ;
set_device_mem_i ( dp, TEST_LEN * CD_SIZE_E * sizeof ( int ) + 200, 1 ) ;
dbg_init( 1024 * 1024 ) ;
// test of increments of tid in each thread
cuda_test_inc (( struct cd *) dp, TEST_LEN, T1DB_1DG ) ;
dbg_p_d_data_i_mn ("done", dp, CD_SIZE_E * TEST_LEN, CD_SIZE_E, TEST_LEN, CD_SIZE_E ) ;
#ifdef CUDA_OBS
// test of thread/block/grid ...
cuda_test_grid (( struct cd *) dp, TEST_LEN, T1DB_1DG ) ;
dbg_p_d_data_i_mn ("done", dp, CD_SIZE_E * TEST_LEN, CD_SIZE_E, TEST_LEN, CD_SIZE_E ) ;
#endif
}
|
61475f18cde26059035fef26d5fcbbc5bc081818.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <random>
#include "paddle/platform/dynload/hiprand/hiprand.h"
#include "paddle/platform/gpu_info.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename T>
class GaussianRandomKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
float mean = context.op_.GetAttr<float>("mean");
float std = context.op_.GetAttr<float>("std");
auto* tensor = context.Output<framework::Tensor>(0);
T* data = tensor->mutable_data<T>(context.GetPlace());
int seed = context.op_.GetAttr<int>("seed");
if (seed == 0) {
seed = std::random_device()();
}
hiprandGenerator_t g;
PADDLE_ENFORCE(platform::dynload::hiprandCreateGenerator(
&g, HIPRAND_RNG_PSEUDO_DEFAULT));
PADDLE_ENFORCE(
platform::dynload::hiprandSetPseudoRandomGeneratorSeed(g, seed));
platform::dynload::hiprandGenerateNormal(
g, data, framework::product(tensor->dims()), mean, std);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(gaussian_random, ops::GaussianRandomKernel<float>);
| 61475f18cde26059035fef26d5fcbbc5bc081818.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <random>
#include "paddle/platform/dynload/curand.h"
#include "paddle/platform/gpu_info.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename T>
class GaussianRandomKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
float mean = context.op_.GetAttr<float>("mean");
float std = context.op_.GetAttr<float>("std");
auto* tensor = context.Output<framework::Tensor>(0);
T* data = tensor->mutable_data<T>(context.GetPlace());
int seed = context.op_.GetAttr<int>("seed");
if (seed == 0) {
seed = std::random_device()();
}
curandGenerator_t g;
PADDLE_ENFORCE(platform::dynload::curandCreateGenerator(
&g, CURAND_RNG_PSEUDO_DEFAULT));
PADDLE_ENFORCE(
platform::dynload::curandSetPseudoRandomGeneratorSeed(g, seed));
platform::dynload::curandGenerateNormal(
g, data, framework::product(tensor->dims()), mean, std);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(gaussian_random, ops::GaussianRandomKernel<float>);
|
8a901d80eac9890fe6b6a16da48ad3c2f4231ba9.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/*
* GPU
*/
// System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
////////////////////////////////////////////////////////////////////////////////
/**/
struct GraphNode
{
char shape[5][5]; //bool1Bcharbool
int x; //
int y; //
int fill_x; //x
int fill_y; //y
};
/*
*
*
*8
*/
struct GraphFormat
{
GraphNode format[8]; //8
int formatCount; //
char c; //123456789abc
};
/*
*
*12
*/
struct GraphAll
{
GraphFormat graph[12]; //
int graphCount; //
};
/*
*
*GPU
*/
struct MatrixNode{
char shape[20][20]; //123*333*2020*20
int x, y; //
bool solution[12][8]; //
bool graphUsed[12]; //12
int thisLevelCount; //
int depth; //
};
///////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_graph memory to process (in and out)
///////////////////////////////////////////////////////////////////////////////
__global__ void gpuTestSiukwan(GraphAll *const g_graph)
{
// write data to global memory
const unsigned int tid = threadIdx.x;
for (int k = 0; k < g_graph->graph[tid].formatCount; k++)
{
for (int i = 0; i < g_graph->graph[tid].format[k].x; i++)
{
for (int j = 0; j < g_graph->graph[tid].format[k].y; j++)
{
//g_graph->graph[tid].format[k].shape[i][j] += '0';
}
}
}
}
extern "C" bool
SiukwanTest(GraphAll *const g_graph)
{
gpuTestSiukwan << < 1, 12 >> >(g_graph);
return true;
}
/*
getFirstUnfill
mat
mat:mat
*/
__device__ void getFirstUnfill(MatrixNode &g_mat, int &x, int&y)
{
for (int i = 0; i <g_mat.x; i++)
{
for (int j = 0; j < g_mat.y; j++)
{//
if (g_mat.shape[i][j] == 0)
{//0
x = i;
y = j;
break;//
}
}
if (x != -1) break;
}
}
/*
getFirstFill
graph
graph:graph
*/
__device__ void getFirstFill(GraphNode &graph, int &x, int&y)
{
for (int j = 0; j < graph.x; j++)
{
for (int h = 0; h < graph.y; h++)
{//
if (graph.shape[j][h] != 0)
{//0
x = j;
y = h;
break;
}
}
if (x != -1) break;
}
}
/*
canFillMatrix
g_mat :
graph :
toFill_x :x
toFill_y :y
fill_x :x
fill_y :y
*/
__device__ bool canFillMatrix(MatrixNode &g_mat, GraphNode &graph, int&toFill_x, int&toFill_y, int&fill_x, int&fill_y)
{//toFill
for (int i = 0; i < graph.x; i++)
{
for (int j = 0; j < graph.y; j++)
{
if (i + toFill_x - fill_x >= g_mat.x ||
j + toFill_y - fill_y >= g_mat.y ||
i + toFill_x - fill_x < 0 ||
j + toFill_y - fill_y < 0
)
{//false
return false;
}
else if (graph.shape[i][j] != 0 && g_mat.shape[i + toFill_x - fill_x][j + toFill_y - fill_y] != 0)
{//false
return false;
}
}
}
return true;
}
__global__ void dfsCUDA_LastVersion(MatrixNode *const g_mat, GraphAll *const g_graph, int*solutionSum)
{
// write data to global memory
const unsigned int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
if (tid >= solutionSum[0]) return;
//
int unfill_x = -1, unfill_y = -1;
getFirstUnfill(g_mat[tid], unfill_x, unfill_y);
//12idid
if (g_mat[tid].depth == 0)
{
for (int j = 0; j < g_graph->graph[tid].formatCount; j++)
{
if (canFillMatrix(g_mat[tid], g_graph->graph[tid].format[j], unfill_x, unfill_y, g_graph->graph[tid].format[j].fill_x, g_graph->graph[tid].format[j].fill_y))
{//ij
//
g_mat[tid].thisLevelCount++;
g_mat[tid].solution[tid][j] = true;//ij
}
}
}
else
{
//12
for (int i = 0; i < 12; i++)
{
if (g_mat[tid].graphUsed[i])
continue;
for (int j = 0; j < g_graph->graph[i].formatCount; j++)
{
if (canFillMatrix(g_mat[tid], g_graph->graph[i].format[j], unfill_x, unfill_y, g_graph->graph[i].format[j].fill_x, g_graph->graph[i].format[j].fill_y))
{//ij
//
g_mat[tid].thisLevelCount++;
g_mat[tid].solution[i][j] = true;//ij
}
}
}
}
//++
g_mat[tid].depth++;
}
//
extern "C" void runCUDA(int blocks, int threads, MatrixNode *const g_mat, GraphAll *const g_graph, int *solutionSum)
{
dfsCUDA_LastVersion << < blocks, threads >> >(g_mat, g_graph, solutionSum);
}
| 8a901d80eac9890fe6b6a16da48ad3c2f4231ba9.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/*
* 关于智力拼图问题的GPU解法
*/
// System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
////////////////////////////////////////////////////////////////////////////////
/*记录单个图形的长宽和形状*/
struct GraphNode
{
char shape[5][5]; //图形的形状,因为bool也占用了1B,所以使用char或者bool都可以
int x; //图形的高度
int y; //图形的宽度
int fill_x; //图形的第一个有值的坐标点x
int fill_y; //图形的第一个有值的坐标点y
};
/*
*记录每种图形的变形:
*包括原始,旋转,翻转;
*最多有8种
*/
struct GraphFormat
{
GraphNode format[8]; //图形的变形,最多有8种
int formatCount; //图形的变形总数
char c; //图形的编号,123456789abc
};
/*
*存储所有图形
*一共12种图形
*/
struct GraphAll
{
GraphFormat graph[12]; //各个图形
int graphCount; //图形总量
};
/*
*需要填充的矩阵数据结构
*每条GPU线程分配一个
*/
struct MatrixNode{
char shape[20][20]; //确定了12个初始图形,图形存在3*3大小的,所以小于3的,不存在答案。至少从3*20开始,所以形状大小直接定义为20*20
int x, y; //矩阵的长宽
bool solution[12][8]; //矩阵的下一个方案的可能性
bool graphUsed[12]; //记录12个图形的使用情况
int thisLevelCount; //以当前矩阵为基础,能够填充新图形的总数
int depth; //层的深度,也相当于已经填充了多少个图形
};
///////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_graph memory to process (in and out)
///////////////////////////////////////////////////////////////////////////////
__global__ void gpuTestSiukwan(GraphAll *const g_graph)
{
// write data to global memory
const unsigned int tid = threadIdx.x;
for (int k = 0; k < g_graph->graph[tid].formatCount; k++)
{
for (int i = 0; i < g_graph->graph[tid].format[k].x; i++)
{
for (int j = 0; j < g_graph->graph[tid].format[k].y; j++)
{
//g_graph->graph[tid].format[k].shape[i][j] += '0';
}
}
}
}
extern "C" bool
SiukwanTest(GraphAll *const g_graph)
{
gpuTestSiukwan << < 1, 12 >> >(g_graph);
return true;
}
/*
函数名 :getFirstUnfill
函数功能:检测矩阵mat中第一个空白的格子(坐标)
输入参数:
mat:检测的mat
*/
__device__ void getFirstUnfill(MatrixNode &g_mat, int &x, int&y)
{
for (int i = 0; i <g_mat.x; i++)
{
for (int j = 0; j < g_mat.y; j++)
{//找出第一个未填的位置,记录下来,直接跳出
if (g_mat.shape[i][j] == 0)
{//为0,即没有填充
x = i;
y = j;
break;//找到第一个就马上跳出
}
}
if (x != -1) break;
}
}
/*
函数名 :getFirstFill
函数功能:检测graph中第一行有值的位置(坐标)
输入参数:
graph:检测的graph
*/
__device__ void getFirstFill(GraphNode &graph, int &x, int&y)
{
for (int j = 0; j < graph.x; j++)
{
for (int h = 0; h < graph.y; h++)
{//找出第一个未填的位置,记录下来,直接跳出
if (graph.shape[j][h] != 0)
{//不为0,即有值
x = j;
y = h;
break;
}
}
if (x != -1) break;
}
}
/*
函数名 :canFillMatrix
函数功能:图案能否填充到矩阵中
输入参数:
g_mat :被填充的矩阵
graph :用来填充的图形
toFill_x :被填充矩阵的第一个空格位置x
toFill_y :被填充矩阵的第一个空格位置y
fill_x :填充的图形的第一个非空格位置x
fill_y :填充的图形的第一个非空格位置y
*/
__device__ bool canFillMatrix(MatrixNode &g_mat, GraphNode &graph, int&toFill_x, int&toFill_y, int&fill_x, int&fill_y)
{//判断图案能否填充到矩阵中,其中矩阵的第一个空白点坐标为toFill
for (int i = 0; i < graph.x; i++)
{
for (int j = 0; j < graph.y; j++)
{
if (i + toFill_x - fill_x >= g_mat.x ||
j + toFill_y - fill_y >= g_mat.y ||
i + toFill_x - fill_x < 0 ||
j + toFill_y - fill_y < 0
)
{//图形填充后越界,返回false
return false;
}
else if (graph.shape[i][j] != 0 && g_mat.shape[i + toFill_x - fill_x][j + toFill_y - fill_y] != 0)
{//图形填充的部分不为空,返回false
return false;
}
}
}
return true;
}
__global__ void dfsCUDA_LastVersion(MatrixNode *const g_mat, GraphAll *const g_graph, int*solutionSum)
{
// write data to global memory
const unsigned int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
if (tid >= solutionSum[0]) return;
//获取未填充的第一个空格
int unfill_x = -1, unfill_y = -1;
getFirstUnfill(g_mat[tid], unfill_x, unfill_y);
//一开始只分12条线程,线程id即图形id
if (g_mat[tid].depth == 0)
{
for (int j = 0; j < g_graph->graph[tid].formatCount; j++)
{
if (canFillMatrix(g_mat[tid], g_graph->graph[tid].format[j], unfill_x, unfill_y, g_graph->graph[tid].format[j].fill_x, g_graph->graph[tid].format[j].fill_y))
{//记录i和j,记录下一次哪些答案可行
//该层答案数增加
g_mat[tid].thisLevelCount++;
g_mat[tid].solution[tid][j] = true;//记录方案i,j可行
}
}
}
else
{
//遍历它下面的12个图形
for (int i = 0; i < 12; i++)
{
if (g_mat[tid].graphUsed[i])
continue;
for (int j = 0; j < g_graph->graph[i].formatCount; j++)
{
if (canFillMatrix(g_mat[tid], g_graph->graph[i].format[j], unfill_x, unfill_y, g_graph->graph[i].format[j].fill_x, g_graph->graph[i].format[j].fill_y))
{//记录i和j,记录下一次哪些答案可行
//该层答案数增加
g_mat[tid].thisLevelCount++;
g_mat[tid].solution[i][j] = true;//记录方案i,j可行
}
}
}
}
//层数++
g_mat[tid].depth++;
}
//接口函数
extern "C" void runCUDA(int blocks, int threads, MatrixNode *const g_mat, GraphAll *const g_graph, int *solutionSum)
{
dfsCUDA_LastVersion << < blocks, threads >> >(g_mat, g_graph, solutionSum);
}
|
87b849eadc5be6c8589d61e52d9c22caf0b29c3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "random_number_generation_tests.cuh"
SCENARIO("[DEVICE] Uniform random number generation", "[d-urng]") {
GIVEN("An appropriate seed") {
hiprandState_t *state;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&state),
sizeof(hiprandState_t)));
initialise_rng_states(1,
state);
WHEN("The random number generator is called") {
double r;
uniform_prng_launcher(1,
state,
&r);
THEN("The result should be between 0 and 1") {
REQUIRE(r >= 0.);
REQUIRE(r <= 1.);
}
}
// WHEN("We assign the local seed to the global seed") {
// g_rng = rng;
// unif01_Gen *gen;
// char* rng_name = "g_uniform_prng";
// gen = unif01_CreateExternGen01(rng_name,
// g_uniform_prng);
// THEN("We expect to pass small crush") {
// bbattery_SmallCrush(gen);
// bool complete = true;
// REQUIRE(complete);
// }
// unif01_DeleteExternGen01(gen);
// }
hipFree(state);
}
}
SCENARIO("[DEVICE] Normally distributed random number generation", "[d-nrng]") {
GIVEN("An array of appropriate seeds") {
int num_test = 10000;
hiprandState_t *state;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&state),
num_test*sizeof(hiprandState_t)));
initialise_rng_states(num_test,
state);
WHEN("We generate 10,000 numbers using a mean of 0 and a std of 1") {
double *d_test_values;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_test_values),
num_test*sizeof(double)));
gaussian_prng(num_test,
state,
d_test_values);
double *test_values;
test_values = reinterpret_cast<double*>(calloc(num_test,
sizeof(double)));
checkCudaErrors(hipMemcpy(test_values,
d_test_values,
num_test*sizeof(double),
hipMemcpyDeviceToHost));
THEN("The result should pass the back-of-the-envelope test") {
double val_mean = mean(test_values,
num_test);
double val_std = std_dev(test_values,
num_test);
double val_max = *std::max_element(test_values,
test_values+num_test);
double val_min = *std::min_element(test_values,
test_values+num_test);
double Z_max = z_score(val_max,
val_mean,
val_std);
double Z_min = z_score(val_min,
val_mean,
val_std);
REQUIRE(Z_max <= 4.);
REQUIRE(Z_min >=-4.);
}
hipFree(d_test_values);
free(test_values);
// Also need to implement a more rigorous test
}
hipFree(state);
}
}
| 87b849eadc5be6c8589d61e52d9c22caf0b29c3a.cu | #include "random_number_generation_tests.cuh"
SCENARIO("[DEVICE] Uniform random number generation", "[d-urng]") {
GIVEN("An appropriate seed") {
curandState *state;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&state),
sizeof(curandState)));
initialise_rng_states(1,
state);
WHEN("The random number generator is called") {
double r;
uniform_prng_launcher(1,
state,
&r);
THEN("The result should be between 0 and 1") {
REQUIRE(r >= 0.);
REQUIRE(r <= 1.);
}
}
// WHEN("We assign the local seed to the global seed") {
// g_rng = rng;
// unif01_Gen *gen;
// char* rng_name = "g_uniform_prng";
// gen = unif01_CreateExternGen01(rng_name,
// g_uniform_prng);
// THEN("We expect to pass small crush") {
// bbattery_SmallCrush(gen);
// bool complete = true;
// REQUIRE(complete);
// }
// unif01_DeleteExternGen01(gen);
// }
cudaFree(state);
}
}
SCENARIO("[DEVICE] Normally distributed random number generation", "[d-nrng]") {
GIVEN("An array of appropriate seeds") {
int num_test = 10000;
curandState *state;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&state),
num_test*sizeof(curandState)));
initialise_rng_states(num_test,
state);
WHEN("We generate 10,000 numbers using a mean of 0 and a std of 1") {
double *d_test_values;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_test_values),
num_test*sizeof(double)));
gaussian_prng(num_test,
state,
d_test_values);
double *test_values;
test_values = reinterpret_cast<double*>(calloc(num_test,
sizeof(double)));
checkCudaErrors(cudaMemcpy(test_values,
d_test_values,
num_test*sizeof(double),
cudaMemcpyDeviceToHost));
THEN("The result should pass the back-of-the-envelope test") {
double val_mean = mean(test_values,
num_test);
double val_std = std_dev(test_values,
num_test);
double val_max = *std::max_element(test_values,
test_values+num_test);
double val_min = *std::min_element(test_values,
test_values+num_test);
double Z_max = z_score(val_max,
val_mean,
val_std);
double Z_min = z_score(val_min,
val_mean,
val_std);
REQUIRE(Z_max <= 4.);
REQUIRE(Z_min >=-4.);
}
cudaFree(d_test_values);
free(test_values);
// Also need to implement a more rigorous test
}
cudaFree(state);
}
}
|
098e62fcdfcb4630f27b1149d85b50a6659af993.hip | // !!! This is a file automatically generated by hipify!!!
//This code is a modification of L2 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the maximum read bandwidth of L2 cache for 64 bit
//Compile this file using the following command to disable L1 cache:
// nvcc -Xptxas -dlcm=cg -Xptxas -dscm=wt l2_bw.cu
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define BLOCKS_NUM 160
#define THREADS_NUM 1024 //thread number/block
#define TOTAL_THREADS (BLOCKS_NUM * THREADS_NUM)
#define REPEAT_TIMES 2048
#define WARP_SIZE 32
#define ARRAY_SIZE (TOTAL_THREADS + REPEAT_TIMES*WARP_SIZE) //Array size must not exceed L2 size
#define L2_SIZE 786432 //number of doubles L2 can store
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*
L2 cache is warmed up by loading posArray and adding sink
Start timing after warming up
Load posArray and add sink to generate read traffic
Repeat the previous step while offsetting posArray by one each iteration
Stop timing and store data
*/
__global__ void l2_bw (uint32_t*startClk, uint32_t*stopClk, double*dsink, double*posArray){
// block and thread index
uint32_t tid = threadIdx.x;
uint32_t bid = blockIdx.x;
uint32_t uid = bid * blockDim.x + tid;
// a register to avoid compiler optimization
double sink = 0;
// warm up l2 cache
for(uint32_t i = uid; i<ARRAY_SIZE; i+=TOTAL_THREADS){
double* ptr = posArray+i;
// every warp loads all data in l2 cache
// use cg modifier to cache the load in L2 and bypass L1
asm volatile("{\t\n"
".reg .f64 data;\n\t"
"ld.global.cg.f64 data, [%1];\n\t"
"add.f64 %0, data, %0;\n\t"
"}" : "+d"(sink) : "l"(ptr) : "memory"
);
}
asm volatile("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// benchmark starts
// load data from l2 cache and accumulate,
for(uint32_t i = 0; i<REPEAT_TIMES; i++){
double* ptr = posArray+(i*WARP_SIZE)+uid;
asm volatile("{\t\n"
".reg .f64 data;\n\t"
"ld.global.cg.f64 data, [%1];\n\t"
"add.f64 %0, data, %0;\n\t"
"}" : "+d"(sink) : "l"(ptr) : "memory"
);
}
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// store the result
startClk[bid*THREADS_NUM+tid] = start;
stopClk[bid*THREADS_NUM+tid] = stop;
dsink[bid*THREADS_NUM+tid] = sink;
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
double *posArray = (double*) malloc(ARRAY_SIZE*sizeof(double));
double *dsink = (double*) malloc(TOTAL_THREADS*sizeof(double));
double *posArray_g;
double *dsink_g;
uint32_t *startClk_g;
uint32_t *stopClk_g;
for (int i=0; i<ARRAY_SIZE; i++)
posArray[i] = (double)i;
gpuErrchk( hipMalloc(&posArray_g, ARRAY_SIZE*sizeof(double)) );
gpuErrchk( hipMalloc(&dsink_g, TOTAL_THREADS*sizeof(double)) );
gpuErrchk( hipMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( hipMemcpy(posArray_g, posArray, ARRAY_SIZE*sizeof(double), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( l2_bw), dim3(BLOCKS_NUM),dim3(THREADS_NUM), 0, 0, startClk_g, stopClk_g, dsink_g, posArray_g);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(double), hipMemcpyDeviceToHost) );
float bw;
unsigned long long data = (unsigned long long)TOTAL_THREADS*REPEAT_TIMES*8;
bw = (float)(data)/((float)(stopClk[0]-startClk[0]));
printf("L2 bandwidth = %f (byte/cycle)\n", bw);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
| 098e62fcdfcb4630f27b1149d85b50a6659af993.cu | //This code is a modification of L2 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the maximum read bandwidth of L2 cache for 64 bit
//Compile this file using the following command to disable L1 cache:
// nvcc -Xptxas -dlcm=cg -Xptxas -dscm=wt l2_bw.cu
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define BLOCKS_NUM 160
#define THREADS_NUM 1024 //thread number/block
#define TOTAL_THREADS (BLOCKS_NUM * THREADS_NUM)
#define REPEAT_TIMES 2048
#define WARP_SIZE 32
#define ARRAY_SIZE (TOTAL_THREADS + REPEAT_TIMES*WARP_SIZE) //Array size must not exceed L2 size
#define L2_SIZE 786432 //number of doubles L2 can store
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*
L2 cache is warmed up by loading posArray and adding sink
Start timing after warming up
Load posArray and add sink to generate read traffic
Repeat the previous step while offsetting posArray by one each iteration
Stop timing and store data
*/
__global__ void l2_bw (uint32_t*startClk, uint32_t*stopClk, double*dsink, double*posArray){
// block and thread index
uint32_t tid = threadIdx.x;
uint32_t bid = blockIdx.x;
uint32_t uid = bid * blockDim.x + tid;
// a register to avoid compiler optimization
double sink = 0;
// warm up l2 cache
for(uint32_t i = uid; i<ARRAY_SIZE; i+=TOTAL_THREADS){
double* ptr = posArray+i;
// every warp loads all data in l2 cache
// use cg modifier to cache the load in L2 and bypass L1
asm volatile("{\t\n"
".reg .f64 data;\n\t"
"ld.global.cg.f64 data, [%1];\n\t"
"add.f64 %0, data, %0;\n\t"
"}" : "+d"(sink) : "l"(ptr) : "memory"
);
}
asm volatile("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// benchmark starts
// load data from l2 cache and accumulate,
for(uint32_t i = 0; i<REPEAT_TIMES; i++){
double* ptr = posArray+(i*WARP_SIZE)+uid;
asm volatile("{\t\n"
".reg .f64 data;\n\t"
"ld.global.cg.f64 data, [%1];\n\t"
"add.f64 %0, data, %0;\n\t"
"}" : "+d"(sink) : "l"(ptr) : "memory"
);
}
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// store the result
startClk[bid*THREADS_NUM+tid] = start;
stopClk[bid*THREADS_NUM+tid] = stop;
dsink[bid*THREADS_NUM+tid] = sink;
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
double *posArray = (double*) malloc(ARRAY_SIZE*sizeof(double));
double *dsink = (double*) malloc(TOTAL_THREADS*sizeof(double));
double *posArray_g;
double *dsink_g;
uint32_t *startClk_g;
uint32_t *stopClk_g;
for (int i=0; i<ARRAY_SIZE; i++)
posArray[i] = (double)i;
gpuErrchk( cudaMalloc(&posArray_g, ARRAY_SIZE*sizeof(double)) );
gpuErrchk( cudaMalloc(&dsink_g, TOTAL_THREADS*sizeof(double)) );
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMemcpy(posArray_g, posArray, ARRAY_SIZE*sizeof(double), cudaMemcpyHostToDevice) );
l2_bw<<<BLOCKS_NUM,THREADS_NUM>>>(startClk_g, stopClk_g, dsink_g, posArray_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(double), cudaMemcpyDeviceToHost) );
float bw;
unsigned long long data = (unsigned long long)TOTAL_THREADS*REPEAT_TIMES*8;
bw = (float)(data)/((float)(stopClk[0]-startClk[0]));
printf("L2 bandwidth = %f (byte/cycle)\n", bw);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
|
8e8da4f4d33f5d12b39ee09c8a4cbf8554a20d4d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/conv_grad_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/fluid/framework/eigen.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#endif
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/phi/kernels/funcs/padding.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
template <typename T, typename Context>
void ConvCudnnGradGradKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
auto X = &input;
auto W = &filter;
auto dO = &out_grad;
auto ddX = input_grad_grad.get_ptr();
auto ddW = filter_grad_grad.get_ptr();
auto ddO = out_grad_grad;
auto dW = filter_grad;
auto dX = input_grad;
if (ddO) {
ctx.template Alloc<T>(ddO);
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, ddO, static_cast<T>(0));
}
if (dW) {
ctx.template Alloc<T>(dW);
}
if (dX) {
ctx.template Alloc<T>(dX);
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
std::vector<int> dilations = dilations_t;
bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t;
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic,
false,
phi::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
std::vector<int> paddings = paddings_t;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
DenseTensor transformed_X_channel(X->type());
DenseTensor transformed_dO_channel(dO->type());
DenseTensor transformed_ddX_channel(X->type());
DenseTensor transformed_ddO_channel(dO->type());
DenseTensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel);
TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel);
ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel);
TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel);
ctx.template Alloc<T>(&transformed_dX_channel);
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);
DenseTensor transformed_X(X->type());
DenseTensor transformed_ddX(X->type());
DenseTensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
ctx.template Alloc<T>(&transformed_X);
if (ddX) {
ctx.template Alloc<T>(&transformed_ddX);
}
if (dX) {
ctx.template Alloc<T>(&transformed_dX);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
funcs::PadFunction<Context, T, 4>(ctx,
input_pad,
transformed_ddX_channel,
pad_value,
&transformed_ddX);
}
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
funcs::PadFunction<Context, T, 5>(ctx,
input_pad,
transformed_ddX_channel,
pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = paddle::platform::CudnnDataType<T>::type;
auto handle = ctx.cudnn_handle();
paddle::operators::ConvArgs args1{&transformed_ddX,
W,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype};
paddle::operators::ConvArgs args2{&transformed_X,
ddW,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype};
paddle::operators::ConvArgs args3{&transformed_ddX,
dW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype};
paddle::operators::ConvArgs args4{&transformed_dX,
ddW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype};
#ifdef PADDLE_WITH_HIP
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result1;
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result2;
paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> data_result;
paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t>
filter_result;
#else
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result1;
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result2;
paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> data_result;
paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t>
filter_result;
#endif
auto layout = paddle::platform::GetCudnnTensorFormat(
paddle::platform::DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search1 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = search1::GetWorkspaceSize(args1);
fwd_result1.algo = search1::Find<T>(
args1, exhaustive_search, false, workspace_size, ctx);
#else
using search1 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result1 = search1::Find<T>(args1, exhaustive_search, false, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_result1.algo);
#endif
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search2 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size =
::max(workspace_size, search2::GetWorkspaceSize(args2));
fwd_result2.algo = search2::Find<T>(
args2, exhaustive_search, false, workspace_size, ctx);
#else
using search2 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result2 = search2::Find<T>(args2, exhaustive_search, false, ctx);
workspace_size = ::max(
workspace_size, search2::GetWorkspaceSize(args2, fwd_result2.algo));
#endif
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search3 =
paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3));
filter_result.algo = search3::Find<T>(
args3, exhaustive_search, deterministic, workspace_size, ctx);
#else
using search3 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_result =
search3::Find<T>(args3, exhaustive_search, deterministic, ctx);
workspace_size = ::max(
workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo));
#endif
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search4 =
paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4));
data_result.algo = search4::Find<T>(
args4, exhaustive_search, deterministic, workspace_size, ctx);
#else
using search4 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_result =
search4::Find<T>(args4, exhaustive_search, deterministic, ctx);
workspace_size = ::max(
workspace_size, search4::GetWorkspaceSize(args4, data_result.algo));
#endif
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(
transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(),
DataLayout::kNCHW,
&o_n,
&o_c,
&o_d,
&o_h,
&o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
paddle::operators::ScalingParamType<T> alpha = 1.0f;
paddle::operators::ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
auto wkspace_handle = ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionForward(
handle,
&alpha,
args1.idesc.desc(),
ddx,
args1.wdesc.desc(),
w,
args1.cdesc.desc(),
fwd_result1.algo,
&beta,
args1.odesc.desc(),
transformed_ddy_channel,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionForward(
handle,
&alpha,
args1.idesc.desc(),
ddx + i * group_offset_in,
args1.wdesc.desc(),
w + i * group_offset_filter,
args1.cdesc.desc(),
fwd_result1.algo,
workspace_ptr,
workspace_size,
&beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (ddW) {
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionForward(
handle,
&alpha,
args2.idesc.desc(),
x,
args2.wdesc.desc(),
ddw,
args2.cdesc.desc(),
fwd_result2.algo,
&beta,
args2.odesc.desc(),
transformed_ddy_channel,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionForward(
handle,
&alpha,
args2.idesc.desc(),
x + i * group_offset_in,
args2.wdesc.desc(),
ddw + i * group_offset_filter,
args2.cdesc.desc(),
fwd_result2.algo,
workspace_ptr,
workspace_size,
&alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionBackwardWeights(
handle,
&alpha,
args3.odesc.desc(),
transformed_dy_channel,
args3.idesc.desc(),
ddx,
args3.cdesc.desc(),
filter_result.algo,
&beta,
args3.wdesc.desc(),
dw,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionBackwardFilter(
handle,
&alpha,
args3.idesc.desc(),
ddx + i * group_offset_in,
args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(),
filter_result.algo,
workspace_ptr,
workspace_size,
&beta,
args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
#endif
}
if (dX && ddW) {
ddw = ddW->data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionBackwardData(
handle,
&alpha,
args4.odesc.desc(),
transformed_dy_channel,
args4.wdesc.desc(),
ddw,
args4.cdesc.desc(),
data_result.algo,
&beta,
args4.idesc.desc(),
transformed_dx,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionBackwardData(
handle,
&alpha,
args4.wdesc.desc(),
ddw + i * group_offset_filter,
args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(),
data_result.algo,
workspace_ptr,
workspace_size,
&beta,
args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
#endif
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
paddle::operators::RemovePaddingSlice<Context, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
paddle::operators::RemovePaddingSlice<Context, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX);
}
}
}
template <typename T, typename Context>
void DepthwiseConvCudnnGradGradKernel(
const Context& ctx,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const DenseTensor& out_grad,
const DenseTensor& input,
const DenseTensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
bool fuse_relu,
DenseTensor* out_grad_grad,
DenseTensor* input_grad,
DenseTensor* filter_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
groups,
dilations_t,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search_t,
input_grad,
filter_grad,
out_grad_grad);
}
template <typename T, typename Context>
void Conv3DCudnnGradGradKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
groups,
dilations_t,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search_t,
input_grad,
filter_grad,
out_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel,
float,
phi::dtype::float16) {}
#else
#if CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#else
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
#endif
#endif
| 8e8da4f4d33f5d12b39ee09c8a4cbf8554a20d4d.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/conv_grad_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/fluid/framework/eigen.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#endif
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/phi/kernels/funcs/padding.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
template <typename T, typename Context>
void ConvCudnnGradGradKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
auto X = &input;
auto W = &filter;
auto dO = &out_grad;
auto ddX = input_grad_grad.get_ptr();
auto ddW = filter_grad_grad.get_ptr();
auto ddO = out_grad_grad;
auto dW = filter_grad;
auto dX = input_grad;
if (ddO) {
ctx.template Alloc<T>(ddO);
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, ddO, static_cast<T>(0));
}
if (dW) {
ctx.template Alloc<T>(dW);
}
if (dX) {
ctx.template Alloc<T>(dX);
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
std::vector<int> dilations = dilations_t;
bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t;
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic,
false,
phi::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
std::vector<int> paddings = paddings_t;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
DenseTensor transformed_X_channel(X->type());
DenseTensor transformed_dO_channel(dO->type());
DenseTensor transformed_ddX_channel(X->type());
DenseTensor transformed_ddO_channel(dO->type());
DenseTensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel);
TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel);
ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel);
TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel);
ctx.template Alloc<T>(&transformed_dX_channel);
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);
DenseTensor transformed_X(X->type());
DenseTensor transformed_ddX(X->type());
DenseTensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
ctx.template Alloc<T>(&transformed_X);
if (ddX) {
ctx.template Alloc<T>(&transformed_ddX);
}
if (dX) {
ctx.template Alloc<T>(&transformed_dX);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
funcs::PadFunction<Context, T, 4>(ctx,
input_pad,
transformed_ddX_channel,
pad_value,
&transformed_ddX);
}
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
funcs::PadFunction<Context, T, 5>(ctx,
input_pad,
transformed_ddX_channel,
pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = paddle::platform::CudnnDataType<T>::type;
auto handle = ctx.cudnn_handle();
paddle::operators::ConvArgs args1{&transformed_ddX,
W,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype};
paddle::operators::ConvArgs args2{&transformed_X,
ddW,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype};
paddle::operators::ConvArgs args3{&transformed_ddX,
dW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype};
paddle::operators::ConvArgs args4{&transformed_dX,
ddW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype};
#ifdef PADDLE_WITH_HIP
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result1;
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result2;
paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> data_result;
paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t>
filter_result;
#else
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result1;
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result2;
paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> data_result;
paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t>
filter_result;
#endif
auto layout = paddle::platform::GetCudnnTensorFormat(
paddle::platform::DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search1 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = search1::GetWorkspaceSize(args1);
fwd_result1.algo = search1::Find<T>(
args1, exhaustive_search, false, workspace_size, ctx);
#else
using search1 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result1 = search1::Find<T>(args1, exhaustive_search, false, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_result1.algo);
#endif
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search2 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size =
std::max(workspace_size, search2::GetWorkspaceSize(args2));
fwd_result2.algo = search2::Find<T>(
args2, exhaustive_search, false, workspace_size, ctx);
#else
using search2 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result2 = search2::Find<T>(args2, exhaustive_search, false, ctx);
workspace_size = std::max(
workspace_size, search2::GetWorkspaceSize(args2, fwd_result2.algo));
#endif
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search3 =
paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3));
filter_result.algo = search3::Find<T>(
args3, exhaustive_search, deterministic, workspace_size, ctx);
#else
using search3 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_result =
search3::Find<T>(args3, exhaustive_search, deterministic, ctx);
workspace_size = std::max(
workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo));
#endif
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search4 =
paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4));
data_result.algo = search4::Find<T>(
args4, exhaustive_search, deterministic, workspace_size, ctx);
#else
using search4 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_result =
search4::Find<T>(args4, exhaustive_search, deterministic, ctx);
workspace_size = std::max(
workspace_size, search4::GetWorkspaceSize(args4, data_result.algo));
#endif
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(
transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(),
DataLayout::kNCHW,
&o_n,
&o_c,
&o_d,
&o_h,
&o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
paddle::operators::ScalingParamType<T> alpha = 1.0f;
paddle::operators::ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
auto wkspace_handle = ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionForward(
handle,
&alpha,
args1.idesc.desc(),
ddx,
args1.wdesc.desc(),
w,
args1.cdesc.desc(),
fwd_result1.algo,
&beta,
args1.odesc.desc(),
transformed_ddy_channel,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionForward(
handle,
&alpha,
args1.idesc.desc(),
ddx + i * group_offset_in,
args1.wdesc.desc(),
w + i * group_offset_filter,
args1.cdesc.desc(),
fwd_result1.algo,
workspace_ptr,
workspace_size,
&beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (ddW) {
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionForward(
handle,
&alpha,
args2.idesc.desc(),
x,
args2.wdesc.desc(),
ddw,
args2.cdesc.desc(),
fwd_result2.algo,
&beta,
args2.odesc.desc(),
transformed_ddy_channel,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionForward(
handle,
&alpha,
args2.idesc.desc(),
x + i * group_offset_in,
args2.wdesc.desc(),
ddw + i * group_offset_filter,
args2.cdesc.desc(),
fwd_result2.algo,
workspace_ptr,
workspace_size,
&alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionBackwardWeights(
handle,
&alpha,
args3.odesc.desc(),
transformed_dy_channel,
args3.idesc.desc(),
ddx,
args3.cdesc.desc(),
filter_result.algo,
&beta,
args3.wdesc.desc(),
dw,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionBackwardFilter(
handle,
&alpha,
args3.idesc.desc(),
ddx + i * group_offset_in,
args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(),
filter_result.algo,
workspace_ptr,
workspace_size,
&beta,
args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
#endif
}
if (dX && ddW) {
ddw = ddW->data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionBackwardData(
handle,
&alpha,
args4.odesc.desc(),
transformed_dy_channel,
args4.wdesc.desc(),
ddw,
args4.cdesc.desc(),
data_result.algo,
&beta,
args4.idesc.desc(),
transformed_dx,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionBackwardData(
handle,
&alpha,
args4.wdesc.desc(),
ddw + i * group_offset_filter,
args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(),
data_result.algo,
workspace_ptr,
workspace_size,
&beta,
args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
#endif
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
paddle::operators::RemovePaddingSlice<Context, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
paddle::operators::RemovePaddingSlice<Context, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX);
}
}
}
template <typename T, typename Context>
void DepthwiseConvCudnnGradGradKernel(
const Context& ctx,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const DenseTensor& out_grad,
const DenseTensor& input,
const DenseTensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
bool fuse_relu,
DenseTensor* out_grad_grad,
DenseTensor* input_grad,
DenseTensor* filter_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
groups,
dilations_t,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search_t,
input_grad,
filter_grad,
out_grad_grad);
}
template <typename T, typename Context>
void Conv3DCudnnGradGradKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
groups,
dilations_t,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search_t,
input_grad,
filter_grad,
out_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel,
float,
phi::dtype::float16) {}
#else
#if CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#else
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
#endif
#endif
|
1684bfc54de2245d7ec33e6fc040f7ce26e54e83.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "shortcut_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int minw = 1;
int minh = 1;
int minc = 1;
int stride = 2;
int sample = 1;
int batch = 2;
int w1 = 1;
int h1 = 1;
int c1 = 1;
float *add = NULL;
hipMalloc(&add, XSIZE*YSIZE);
int w2 = 1;
int h2 = 1;
int c2 = 1;
float s1 = 1;
float s2 = 1;
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
shortcut_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,minw,minh,minc,stride,sample,batch,w1,h1,c1,add,w2,h2,c2,s1,s2,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
shortcut_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,minw,minh,minc,stride,sample,batch,w1,h1,c1,add,w2,h2,c2,s1,s2,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
shortcut_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,minw,minh,minc,stride,sample,batch,w1,h1,c1,add,w2,h2,c2,s1,s2,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1684bfc54de2245d7ec33e6fc040f7ce26e54e83.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "shortcut_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int minw = 1;
int minh = 1;
int minc = 1;
int stride = 2;
int sample = 1;
int batch = 2;
int w1 = 1;
int h1 = 1;
int c1 = 1;
float *add = NULL;
cudaMalloc(&add, XSIZE*YSIZE);
int w2 = 1;
int h2 = 1;
int c2 = 1;
float s1 = 1;
float s2 = 1;
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
shortcut_kernel<<<gridBlock,threadBlock>>>(size,minw,minh,minc,stride,sample,batch,w1,h1,c1,add,w2,h2,c2,s1,s2,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
shortcut_kernel<<<gridBlock,threadBlock>>>(size,minw,minh,minc,stride,sample,batch,w1,h1,c1,add,w2,h2,c2,s1,s2,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
shortcut_kernel<<<gridBlock,threadBlock>>>(size,minw,minh,minc,stride,sample,batch,w1,h1,c1,add,w2,h2,c2,s1,s2,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
60e1422e13a8882162657a4dcb89b9771fd61e1e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <limits.h>
#include "binary_search.h"
#include <chrono>
#include <iostream>
#define BLOCKDIM 512
#define SEARCH_CHUNK 16
#define BLOCK_CHUNK (BLOCKDIM*SEARCH_CHUNK)
__global__ void search_kernel(const long int *arr,
const long int len, const long int *querys, const long int num_querys, long int *res, bool *flag)
{
int search;
if(*flag == false) {
int tid = threadIdx.x;
__shared__ int s_arr[BLOCK_CHUNK];
/* Since each value is being copied to shared memory, the rest of the
following uncommented code is unncessary, since a direct comparison
can be done at the time of copy below. */
// for(int i = 0; i < BLOCKDIM; ++i) {
// int shared_loc = i*SEARCH_CHUNK + tid;
// int global_loc = shared_loc + BLOCK_CHUNK * blockIdx.x;
// if(arr[global_loc] == search) {
// *flag = true;
// *res = global_loc;
// }
// __syncthreads();
// }
/* Copy chunk of array that this entire block of threads will read
from the slower global memory to the faster shared memory. */
for(long int i = 0; i < SEARCH_CHUNK; ++i) {
int shared_loc = tid*SEARCH_CHUNK + i;
int global_loc = shared_loc + BLOCK_CHUNK * blockIdx.x;
/* Make sure to stay within the bounds of the global array,
else assign a dummy value. */
if(global_loc < len) {
s_arr[shared_loc] = arr[global_loc];
}
else {
s_arr[shared_loc] = INT_MAX;
}
}
__syncthreads();
for(long int i = 0; i < num_querys; i++)
{
search = querys[i];
/* For each thread, set the initial search range. */
int L = 0;
int R = SEARCH_CHUNK - 1;
int m = (L + R) / 2;
/* Pointer to the part of the shared array for this thread. */
int *s_ptr = &s_arr[tid*SEARCH_CHUNK];
/* Each thread will search a chunk of the block array.
Many blocks will not find a solution so the search must
be allowed to fail on a per block basis. The loop will
break (fail) when L >= R. */
while(L <= R && *flag == false)
{
if(s_ptr[m] < search) {
L = m + 1;
}
else if(s_ptr[m] > search) {
R = m - 1;
}
else {
*flag = true;
*res = m += tid*SEARCH_CHUNK + BLOCK_CHUNK * blockIdx.x;
}
m = (L + R) / 2;
}
}
}
}
int binary_search(const long int *arr, const long int len, const long int *querys, const long int num_querys)
{
long int *d_arr, *d_querys, *d_res;
bool *d_flag;
size_t arr_size = len * sizeof(long int);
size_t querys_size = num_querys * sizeof(long int);
size_t res_size = sizeof(long int);
size_t flag_size = sizeof(bool);
hipMalloc(&d_arr, arr_size);
hipMalloc(&d_querys, querys_size);
hipMalloc(&d_res, res_size);
hipMalloc(&d_flag, flag_size);
hipMemcpy(d_arr, arr, arr_size, hipMemcpyHostToDevice);
hipMemcpy(d_querys, querys, querys_size, hipMemcpyHostToDevice);
hipMemset(d_flag, 0, flag_size);
/* Set res value to -1, so that if the function returns -1, that
indicates an algorithm failure. */
hipMemset(d_res, -0x1, res_size);
int blockSize = BLOCKDIM;
int gridSize = (len-1)/BLOCK_CHUNK + 1;
auto start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( search_kernel), dim3(gridSize),dim3(blockSize), 0, 0, d_arr, len, d_querys, num_querys ,d_res, d_flag);
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::cout << "Kernel Time: " <<
std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count() <<
" ms" << std::endl;
long int res;
hipMemcpy(&res, d_res, res_size, hipMemcpyDeviceToHost);
return res;
}
| 60e1422e13a8882162657a4dcb89b9771fd61e1e.cu | #include <cuda.h>
#include <limits.h>
#include "binary_search.h"
#include <chrono>
#include <iostream>
#define BLOCKDIM 512
#define SEARCH_CHUNK 16
#define BLOCK_CHUNK (BLOCKDIM*SEARCH_CHUNK)
__global__ void search_kernel(const long int *arr,
const long int len, const long int *querys, const long int num_querys, long int *res, bool *flag)
{
int search;
if(*flag == false) {
int tid = threadIdx.x;
__shared__ int s_arr[BLOCK_CHUNK];
/* Since each value is being copied to shared memory, the rest of the
following uncommented code is unncessary, since a direct comparison
can be done at the time of copy below. */
// for(int i = 0; i < BLOCKDIM; ++i) {
// int shared_loc = i*SEARCH_CHUNK + tid;
// int global_loc = shared_loc + BLOCK_CHUNK * blockIdx.x;
// if(arr[global_loc] == search) {
// *flag = true;
// *res = global_loc;
// }
// __syncthreads();
// }
/* Copy chunk of array that this entire block of threads will read
from the slower global memory to the faster shared memory. */
for(long int i = 0; i < SEARCH_CHUNK; ++i) {
int shared_loc = tid*SEARCH_CHUNK + i;
int global_loc = shared_loc + BLOCK_CHUNK * blockIdx.x;
/* Make sure to stay within the bounds of the global array,
else assign a dummy value. */
if(global_loc < len) {
s_arr[shared_loc] = arr[global_loc];
}
else {
s_arr[shared_loc] = INT_MAX;
}
}
__syncthreads();
for(long int i = 0; i < num_querys; i++)
{
search = querys[i];
/* For each thread, set the initial search range. */
int L = 0;
int R = SEARCH_CHUNK - 1;
int m = (L + R) / 2;
/* Pointer to the part of the shared array for this thread. */
int *s_ptr = &s_arr[tid*SEARCH_CHUNK];
/* Each thread will search a chunk of the block array.
Many blocks will not find a solution so the search must
be allowed to fail on a per block basis. The loop will
break (fail) when L >= R. */
while(L <= R && *flag == false)
{
if(s_ptr[m] < search) {
L = m + 1;
}
else if(s_ptr[m] > search) {
R = m - 1;
}
else {
*flag = true;
*res = m += tid*SEARCH_CHUNK + BLOCK_CHUNK * blockIdx.x;
}
m = (L + R) / 2;
}
}
}
}
int binary_search(const long int *arr, const long int len, const long int *querys, const long int num_querys)
{
long int *d_arr, *d_querys, *d_res;
bool *d_flag;
size_t arr_size = len * sizeof(long int);
size_t querys_size = num_querys * sizeof(long int);
size_t res_size = sizeof(long int);
size_t flag_size = sizeof(bool);
cudaMalloc(&d_arr, arr_size);
cudaMalloc(&d_querys, querys_size);
cudaMalloc(&d_res, res_size);
cudaMalloc(&d_flag, flag_size);
cudaMemcpy(d_arr, arr, arr_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_querys, querys, querys_size, cudaMemcpyHostToDevice);
cudaMemset(d_flag, 0, flag_size);
/* Set res value to -1, so that if the function returns -1, that
indicates an algorithm failure. */
cudaMemset(d_res, -0x1, res_size);
int blockSize = BLOCKDIM;
int gridSize = (len-1)/BLOCK_CHUNK + 1;
auto start = std::chrono::high_resolution_clock::now();
search_kernel<<<gridSize,blockSize>>>(d_arr, len, d_querys, num_querys ,d_res, d_flag);
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::cout << "Kernel Time: " <<
std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count() <<
" ms" << std::endl;
long int res;
cudaMemcpy(&res, d_res, res_size, cudaMemcpyDeviceToHost);
return res;
}
|
6b63f345d5f72fbbbcb89d673abbc69690bd05da.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/solver/lower_trs_kernels.hpp"
#include <memory>
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/solver/triangular.hpp>
#include "cuda/base/cusparse_bindings.hpp"
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/solver/common_trs_kernels.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The LOWER_TRS solver namespace.
*
* @ingroup lower_trs
*/
namespace lower_trs {
void should_perform_transpose(std::shared_ptr<const CudaExecutor> exec,
bool& do_transpose)
{
should_perform_transpose_kernel(exec, do_transpose);
}
template <typename ValueType, typename IndexType>
void generate(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* matrix,
std::shared_ptr<solver::SolveStruct>& solve_struct,
bool unit_diag, const solver::trisolve_algorithm algorithm,
const size_type num_rhs)
{
if (algorithm == solver::trisolve_algorithm::sparselib) {
generate_kernel<ValueType, IndexType>(exec, matrix, solve_struct,
num_rhs, false, unit_diag);
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_LOWER_TRS_GENERATE_KERNEL);
template <typename ValueType, typename IndexType>
void solve(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* matrix,
const solver::SolveStruct* solve_struct, bool unit_diag,
const solver::trisolve_algorithm algorithm,
matrix::Dense<ValueType>* trans_b, matrix::Dense<ValueType>* trans_x,
const matrix::Dense<ValueType>* b, matrix::Dense<ValueType>* x)
{
if (algorithm == solver::trisolve_algorithm::sparselib) {
solve_kernel<ValueType, IndexType>(exec, matrix, solve_struct, trans_b,
trans_x, b, x);
} else {
sptrsv_naive_caching<false>(exec, matrix, unit_diag, b, x);
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_LOWER_TRS_SOLVE_KERNEL);
} // namespace lower_trs
} // namespace cuda
} // namespace kernels
} // namespace gko
| 6b63f345d5f72fbbbcb89d673abbc69690bd05da.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/solver/lower_trs_kernels.hpp"
#include <memory>
#include <cuda.h>
#include <cusparse.h>
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/solver/triangular.hpp>
#include "cuda/base/cusparse_bindings.hpp"
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/solver/common_trs_kernels.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The LOWER_TRS solver namespace.
*
* @ingroup lower_trs
*/
namespace lower_trs {
void should_perform_transpose(std::shared_ptr<const CudaExecutor> exec,
bool& do_transpose)
{
should_perform_transpose_kernel(exec, do_transpose);
}
template <typename ValueType, typename IndexType>
void generate(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* matrix,
std::shared_ptr<solver::SolveStruct>& solve_struct,
bool unit_diag, const solver::trisolve_algorithm algorithm,
const size_type num_rhs)
{
if (algorithm == solver::trisolve_algorithm::sparselib) {
generate_kernel<ValueType, IndexType>(exec, matrix, solve_struct,
num_rhs, false, unit_diag);
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_LOWER_TRS_GENERATE_KERNEL);
template <typename ValueType, typename IndexType>
void solve(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType>* matrix,
const solver::SolveStruct* solve_struct, bool unit_diag,
const solver::trisolve_algorithm algorithm,
matrix::Dense<ValueType>* trans_b, matrix::Dense<ValueType>* trans_x,
const matrix::Dense<ValueType>* b, matrix::Dense<ValueType>* x)
{
if (algorithm == solver::trisolve_algorithm::sparselib) {
solve_kernel<ValueType, IndexType>(exec, matrix, solve_struct, trans_b,
trans_x, b, x);
} else {
sptrsv_naive_caching<false>(exec, matrix, unit_diag, b, x);
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_LOWER_TRS_SOLVE_KERNEL);
} // namespace lower_trs
} // namespace cuda
} // namespace kernels
} // namespace gko
|
71769b6333800f508f2e722243af85ac91067642.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc iso2PCF.cu -o par.out && ./par.out data_5K.dat rand0_5K.dat 5000 30 180
#include <iostream>
#include <fstream>
#include <string.h>
#include <time.h>
#include <math.h>
using namespace std;
//Point with weight value. Structure
struct PointW3D{
float x;
float y;
float z;
float w;
};
struct Node{
int len; // Number of points in the node
PointW3D *elements; // Points in the node
};
void open_files(string name_file, int pts, PointW3D *datos){
/* Opens the daya files. Receives the file location, number of points to read and the array of points where the data is stored */
ifstream file;
string mypathto_files = "../../../fake_DATA/DATOS/";
//This creates the full path to where I have my data files
name_file.insert(0,mypathto_files);
file.open(name_file.c_str(), ios::in | ios::binary); //Tells the program this is a binary file using ios::binary
if (file.fail()){
cout << "Failed to load the file in " << name_file << endl;
exit(1);
}
for ( int c = 0; c < pts; c++) //Reads line by line and stores each c line in the c PointW3D element of the array
{
file >> datos[c].x >> datos[c].y >> datos[c].z >> datos[c].w;
}
file.close();
}
//====================================================================
void save_histogram(string name, int bns, double *histo){
/* This function saves a one dimensional histogram in a file.
Receives the name of the file, number of bins in the histogram and the histogram array
*/
ofstream file2;
file2.open(name.c_str(), ios::out | ios::binary);
if (file2.fail()){
cout << "Failed to save the the histogram in " << name << endl;
exit(1);
}
for (int i = 0; i < bns; i++){
file2 << histo[i] << endl;
}
file2.close();
}
//====================================================================
void save_histogram(string name, int bns, float *histo){
/* This function saves a one dimensional histogram in a file.
Receives the name of the file, number of bins in the histogram and the histogram array
*/
ofstream file2;
file2.open(name.c_str(), ios::out | ios::binary);
if (file2.fail()){
cout << "Failed to save the the histogram in " << name << endl;
exit(1);
}
for (int i = 0; i < bns; i++){
file2 << histo[i] << endl;
}
file2.close();
}
//===================================================================
void add(PointW3D *&array, int &lon, float _x, float _y, float _z, float _w){
/*
This function manages adding points to an specific Node. It receives the previous array, longitude and point to add
and updates the previous array and length with the same array with the new point at the end and adds +1 to the length +1
It manages the memory allocation and free of the previous and new elements.
*/
lon++;
PointW3D *array_aux;
hipMallocManaged(&array_aux, lon*sizeof(PointW3D));
for (int i=0; i<lon-1; i++){
array_aux[i].x = array[i].x;
array_aux[i].y = array[i].y;
array_aux[i].z = array[i].z;
array_aux[i].w = array[i].w;
}
hipFree(array);
array = array_aux;
array[lon-1].x = _x;
array[lon-1].y = _y;
array[lon-1].z = _z;
array[lon-1].w = _w;
}
void make_nodos(Node ***nod, PointW3D *dat, unsigned int partitions, float size_node, unsigned int np){
/*
This function classifies the data in the nodes
Args
nod: Node 3D array where the data will be classified
dat: array of PointW3D data to be classified and stored in the nodes
partitions: number nodes in each direction
size_node: dimensions of a single node
np: number of points in the dat array
*/
int row, col, mom;
// First allocate memory as an empty node:
for (row=0; row<partitions; row++){
for (col=0; col<partitions; col++){
for (mom=0; mom<partitions; mom++){
nod[row][col][mom].len = 0;
hipMallocManaged(&nod[row][col][mom].elements, sizeof(PointW3D));
}
}
}
// Classificate the ith elment of the data into a node and add that point to the node with the add function:
for (int i=0; i<np; i++){
row = (int)(dat[i].x/size_node);
col = (int)(dat[i].y/size_node);
mom = (int)(dat[i].z/size_node);
add(nod[row][col][mom].elements, nod[row][col][mom].len, dat[i].x, dat[i].y, dat[i].z, dat[i].w);
}
}
//====================================================================
//============ Kernels Section =======================================
//====================================================================
__device__ void count_distances11(float *XX, PointW3D *elements, int len, float ds, float dd_max, int sum){
/*
This device function counts the distances betweeen points within one node.
Args:
XX: The histogram where the distances are counted in
elements: Array of PointW3D points inside the node
len: lenght of the elements array
ds: number of bins divided by the maximum distance. Used to calculate the bin it should be counted at
dd_max: The maximum distance of interest.
*/
int bin;
float d, v;
float x1,y1,z1,w1,x2,y2,z2,w2;
for (int i=0; i<len-1; ++i){
x1 = elements[i].x;
y1 = elements[i].y;
z1 = elements[i].z;
w1 = elements[i].w;
for (int j=i+1; j<len; ++j){
x2 = elements[j].x;
y2 = elements[j].y;
z2 = elements[j].z;
w2 = elements[j].w;
d = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<=dd_max+1){
bin = (int)(sqrt(d)*ds);
v = 2*w1*w2;
atomicAdd(&XX[bin],sum);
}
}
}
}
__device__ void count_distances12(float *XX, PointW3D *elements1, int len1, PointW3D *elements2, int len2, float ds, float dd_max, int sum){
/*
This device function counts the distances betweeen points between two different nodes.
Args:
XX: The histogram where the distances are counted in
elements1: Array of PointW3D points inside the first node
len1: lenght of the first elements array
elements2: Array of PointW3D points inside the second node
len2: lenght of the second elements array
ds: number of bins divided by the maximum distance. Used to calculate the bin it should be counted at
dd_max: The maximum distance of interest.
*/
int bin;
float d, v;
float x1,y1,z1,w1,x2,y2,z2,w2;
for (int i=0; i<len1; ++i){
x1 = elements1[i].x;
y1 = elements1[i].y;
z1 = elements1[i].z;
w1 = elements1[i].w;
for (int j=0; j<len2; ++j){
x2 = elements2[j].x;
y2 = elements2[j].y;
z2 = elements2[j].z;
w2 = elements2[j].w;
d = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<=dd_max+1){
bin = (int)(sqrt(d)*ds);
v = 2*w1*w2;
atomicAdd(&XX[bin],sum);
}
}
}
}
__device__ void BPC_loop(float *XX, Node ***nodeD, int row, int col, int mom, int partitions, int did_max, float dd_max, float ds, int sum, float size_box, bool x_border, bool y_border, bool z_border, bool x_upperborder, bool y_upperborder, bool z_upperborder, bool x_lowerborder, bool y_lowerborder, bool z_lowerborder){
/*
This device function counts the distances betweeen points between two different nodes from periodic boundary conditiojns.
Args:
XX: The histogram where the distances are counted in
elements1: Array of PointW3D points inside the first node
len1: lenght of the first elements array
elements2: Array of PointW3D points inside the second node
len2: lenght of the second elements array
ds: number of bins divided by the maximum distance. Used to calculate the bin it should be counted at
dd_max: The maximum distance of interest.
*/
int bin, d_node, u, v, w, did_max2=did_max*did_max;
float d, s;
float x1,y1,z1,w1,dx12,dy12,dz12,w2;
int x_from = 10; //((row-did_max)*(row>did_max))*(!x_border) + (partitions-(did_max-row))*(x_lowerborder&&!x_upperborder);
int x_to = partitions-10; //(partitions-1)*((row+did_max>partitions-1 && !x_upperborder)||x_lowerborder) + (row+did_max)*((row+did_max<partitions)&&!x_border) + (!x_lowerborder&&x_upperborder)*(x_from+(did_max-(partitions-1-row)));
int y_from = 10; //((col-did_max)*(col>did_max))*(!y_border) + (partitions-(did_max-col))*(y_lowerborder&&!y_upperborder);
int y_to = partitions-10; // (partitions-1)*((col+did_max>partitions-1 && !y_upperborder)||y_lowerborder) + (col+did_max)*((col+did_max<partitions)&&!y_border) + (!y_lowerborder&&y_upperborder)*(y_from+(did_max-(partitions-1-col)));
int z_from = 10; // ((mom-did_max)*(mom>did_max))*(!z_border) + (partitions-(did_max-mom))*(z_lowerborder&&!z_upperborder);
int z_to = partitions-10; //(partitions-1)*((mom+did_max>partitions-1 && !z_upperborder)||z_lowerborder) + (mom+did_max)*((mom+did_max<partitions)&&!z_border) + (!z_lowerborder&&z_upperborder)*(z_from+(did_max-(partitions-1-mom)));
//If the z direction is not the nearest border the z index it is 0 if mom<did_max or mom-did-max otherwise.
//If both z borders or ONLY the upper z border are the nearest borders the z index starts from 0
//If ONLY the lower z border is the nearest the z index starts from partitions-(did_max-mom)
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//If both borders are the nearest the highest limit is partitions-1
//If the lower border is the nearest the highes limit is partitions-1
//If the upper border is not the nerarest and mom+did_max>partitions the highest limit is partitions-1
//If this is not the border side and mom+did_max< paritions then the highest limit is mom+did_max
//If only the upper border is the nearest border the higher limit is the lower limit + (did_max-(partitions-1-mom))
for (u=x_from; u<=x_to; u++){
for (v=y_from; v<=y_to; v++){
for (w=z_from; w<=z_to; w++){
d_node=(w-mom)*(w-mom) + (v-col)*(v-col) + (u-row)*(u-row);
if (d_node<=did_max2){
for (int i=0; i<nodeD[row][col][mom].len; ++i){
x1 = nodeD[row][col][mom].elements[i].x;
y1 = nodeD[row][col][mom].elements[i].y;
z1 = nodeD[row][col][mom].elements[i].z;
w1 = nodeD[row][col][mom].elements[i].w;
for (int j=0; j<nodeD[u][v][w].len; ++j){
dx12 = fabsf(x1-nodeD[u][v][w].elements[j].x) - size_box*x_border;
dy12 = fabsf(y1-nodeD[u][v][w].elements[j].y) - size_box*y_border;
dz12 = fabsf(z1-nodeD[u][v][w].elements[j].z) - size_box*z_border;
w2 = nodeD[u][v][w].elements[j].w;
d = dx12*dx12 + dy12*dy12 + dz12*dz12;
if (d<=dd_max+1){
bin = (int)(sqrt(d)*ds);
s = 2*w1*w2;
atomicAdd(&XX[bin],sum);
}
}
}
}
}
}
}
}
__global__ void BPC_XX(float *XX_A, float *XX_B, Node ***nodeD, float ds, float d_max, float size_node, float size_box){
/*
This device function counts the distances betweeen points between a node and a node reproduction in the border.
Args:
XX: The histogram where the distances are counted in
nodeD: Full array of nodes
ds: number of bins divided by the maximum distance. Used to calculate the bin it should be counted at
dd_max: The maximum distance of interest.
did_max: maximum number of node between two nodes be considered
did_max2: did_max*did_max
size_box: Size of the whole box
*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int partitions = (int)(ceilf(size_box/size_node));
if (idx<(partitions*partitions*partitions)){
//Get the node positon in this thread
int mom = (int) (idx/(partitions*partitions));
int col = (int) ((idx%(partitions*partitions))/partitions);
int row = idx%partitions;
//printf("%i, %i, %i \n", mom, col,row);
//This may see redundant but with this these often checked values are upgraded to device memory
float dd_max = d_max*d_max;
int did_max = (int)(ceilf((d_max+size_node*sqrt(3.0))/size_node));
if (idx==0){
printf("Partitions: %i, did_max: %i\n", partitions, did_max);
}
if (nodeD[row][col][mom].len > 0 && (row<did_max-1 || partitions-row<did_max || col<did_max-1 || partitions-col<did_max || mom<did_max-1 || partitions-mom<did_max)){
//Only if the current node has elements and it is near to any border does the thread will be active
atomicAdd(&XX_A[0],1); //Count how many nodes are considered as near a border
/*
bool x_border=false, y_border=false, z_border=false, x_upperborder=false, y_upperborder=false, z_upperborder=false, x_lowerborder=false, y_lowerborder=false, z_lowerborder=false;
x_border=(row<did_max-1 || partitions-row<did_max);
if (x_border){
x_upperborder=partitions-row<did_max;
x_lowerborder=row<did_max-1;
BPC_loop(XX_A, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, x_border, false, false, x_upperborder, false, false, x_lowerborder, false, false);
}
y_border=(col<did_max-1 || partitions-col<did_max);
if (y_border){
y_upperborder=partitions-col<did_max;
y_lowerborder=col<did_max-1;
//Only Y boundaries
BPC_loop(XX_B, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, false, y_border, false, false, y_upperborder, false, false, y_lowerborder, false);
if (x_border){
//Boundaries in the XY walls
BPC_loop(XX_A, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, x_border, y_border, false, x_upperborder, y_upperborder, false, x_lowerborder, y_lowerborder, false);
}
}
z_border=(mom<did_max-1 || partitions-mom<did_max);
if (z_border){
z_upperborder=partitions-mom<did_max;
z_lowerborder=mom<did_max-1;
//Only Z boundaries
BPC_loop(XX_B, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, false, false, z_border, false, false, z_upperborder, false, false, z_lowerborder);
if (x_border){
//For the ZY corner
BPC_loop(XX_A, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, x_border, false, z_border, x_upperborder, false, z_upperborder, x_lowerborder, false, z_lowerborder);
if (y_border){
//For the XYZ corner
BPC_loop(XX_B, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, x_border, y_border, z_border, x_upperborder, y_upperborder, z_upperborder, x_lowerborder, y_lowerborder, z_lowerborder);
}
}
if (y_border){
//For the YZ
BPC_loop(XX_A, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, false, y_border, z_border, false, y_upperborder, z_upperborder, false, y_lowerborder, z_lowerborder);
}
}
*/
}
}
}
__global__ void make_histoXX(float *XX_A, float *XX_B, Node ***nodeD, float ds, float d_max, float size_node, float size_box){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int partitions = (int)(ceilf(size_box/size_node));
if (idx<(partitions*partitions*partitions)){
//Get the node positon in this thread
int mom = (int) (idx/(partitions*partitions));
int col = (int) ((idx%(partitions*partitions))/partitions);
int row = idx%partitions;
//printf("%i, %i, %i \n", mom, col,row)
if (nodeD[row][col][mom].len > 0){
//This may see redundant but with this these often checked values are upgraded to device memory
float dd_max = d_max*d_max;
int did_max = (int)(ceilf((d_max+size_node*sqrt(3.0))/size_node));
int did_max2 = did_max*did_max;
// Counts distances betweeen the same node
if (idx%2==0){ //If the main index is even stores the countings in the XX_A subhistogram
count_distances11(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, ds, dd_max, 2);
} else { //If the main index is odd stores the countings in the XX_B subhistogram
count_distances11(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, ds, dd_max, 2);
}
int u,v,w; // Position index of the second node
int dx_nod12, dy_nod12, dz_nod12, dd_nod12; //Internodal distance
//Second node movil in Z direction
for(w = mom+1; w<partitions && w-row<=did_max; w++){
if (idx%2==0){ //If the main index is even stores the countings in the XX_A subhistogram
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][col][w].elements, nodeD[row][col][w].len, ds, dd_max, 2);
} else { //If the main index is odd stores the countings in the XX_B subhistogram
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][col][w].elements, nodeD[row][col][w].len, ds, dd_max, 2);
}
}
//Second node movil in YZ
for(v=col+1; v<partitions && v-col<=did_max; v++){
dy_nod12 = v-col;
for(w=(mom-did_max)*(mom>did_max); w<partitions && w-mom<=did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][v][w].elements, nodeD[row][v][w].len, ds, dd_max, 2);
} else {
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][v][w].elements, nodeD[row][v][w].len, ds, dd_max, 2);
}
}
//}
}
}
//Second node movil in XYZ
for(u = row+1; u < partitions && u-row< did_max; u++){
dx_nod12 = u-row;
for(v = (col-did_max)*(col>did_max); v < partitions && v-col< did_max; v++){
dy_nod12 = v-col;
for(w = (mom-did_max)*(mom>did_max); w < partitions && w-mom< did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12 + dx_nod12*dx_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[u][v][w].elements, nodeD[u][v][w].len, ds, dd_max, 2);
} else {
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[u][v][w].elements, nodeD[u][v][w].len, ds, dd_max, 2);
}
}
}
}
}
}
}
}
__global__ void make_histoXY(float *XY_A, float *XY_B, Node ***nodeD, Node ***nodeR, float ds, float d_max, float size_node, float size_box){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int partitions = (int)(ceilf(size_box/size_node));
if (idx<(partitions*partitions*partitions)){
//Get the node positon in this thread
int mom = (int) (idx/(partitions*partitions));
int col = (int) ((idx%(partitions*partitions))/partitions);
int row = idx%partitions;
if (nodeD[row][col][mom].len > 0){
//This may see redundant but with this these often checked values are upgraded to device memory
float dd_max = d_max*d_max;
int did_max = (int)(ceilf((d_max+size_node*sqrt(3.0))/size_node));
int did_max2 = did_max*did_max;
int u,v,w; //Position of the second node
unsigned int dx_nod12, dy_nod12, dz_nod12, dd_nod12;
//Second node movil in XYZ
//for(u = (row-did_max)*(row>did_max); u < partitions && u-row< did_max; u++){
for(u = 0; u < partitions && u-row< did_max; u++){
dx_nod12 = u-row;
//for(v = (col-did_max)*(col>did_max); v < partitions && v-col< did_max; v++){
for(v = 0; v < partitions && v-col< did_max; v++){
dy_nod12 = v-col;
//for(w = (mom-did_max)*(mom>did_max); w < partitions && w-mom< did_max; w++){
for(w = 0; w < partitions && w-mom< did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12 + dx_nod12*dx_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){
count_distances12(XY_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[u][v][w].elements, nodeR[u][v][w].len, ds, dd_max, 1);
} else {
count_distances12(XY_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[u][v][w].elements, nodeR[u][v][w].len, ds, dd_max, 1);
}
}
}
}
}
}
}
}
__global__ void make_analyticRR(float *RR, float d_max, int bn, float size_box, int n_pts){
/*
Analytic calculation of the RR histogram
*/
int a = threadIdx.x;
if (a < bn){
float dr = (d_max/bn);
float V = size_box*size_box*size_box;
float beta1 = n_pts*n_pts/V;
float alph = 4*(2*acosf(0.0))*(beta1)*dr*dr*dr/3;
float r1, r2;
r2 = (float) a;
r1 = r2+1;
float sum = alph*((r1*r1*r1)-(r2*r2*r2));
atomicAdd(&RR[a],sum);
}
}
int main(int argc, char **argv){
unsigned int np = stoi(argv[3]), bn = stoi(argv[4]);
float dmax = stof(argv[5]);
float ds = ((float)(bn))/dmax, size_box = 250.0, alpha = 2.176;
float size_node = alpha*(size_box/pow((float)(np),1/3.));
unsigned int partitions = (int)(ceil(size_box/size_node));
float *DD_A, *DR_A, *DD_B, *DR_B, *RR;
double *DD, *DR;
PointW3D *dataD;
PointW3D *dataR;
hipMallocManaged(&dataD, np*sizeof(PointW3D));
hipMallocManaged(&dataR, np*sizeof(PointW3D));
// Name of the files where the results are saved
string nameDD = "DDiso.dat", nameRR = "RRiso.dat", nameDR = "DRiso.dat";
// Allocate memory for the histogram as double
// And the subhistograms as simple presision floats
DD = new double[bn];
RR = new float[bn];
DR = new double[bn];
hipMallocManaged(&DD_A, bn*sizeof(float));
hipMallocManaged(&DR_A, bn*sizeof(float));
hipMallocManaged(&DD_B, bn*sizeof(float));
hipMallocManaged(&DR_B, bn*sizeof(float));
//Initialize the histograms in 0
for (int i = 0; i < bn; i++){
*(DD+i) = 0;
*(RR+i) = 0;
*(DR+i) = 0;
*(DD_A+i) = 0;
*(DR_A+i) = 0;
*(DD_B+i) = 0;
*(DR_B+i) = 0;
}
// Open and read the files to store the data in the arrays
open_files(argv[1], np, dataD);
open_files(argv[2], np, dataR);
//Init the nodes arrays
Node ***nodeD;
Node ***nodeR;
hipMallocManaged(&nodeR, partitions*sizeof(Node**));
hipMallocManaged(&nodeD, partitions*sizeof(Node**));
for (int i=0; i<partitions; i++){
hipMallocManaged(&*(nodeR+i), partitions*sizeof(Node*));
hipMallocManaged(&*(nodeD+i), partitions*sizeof(Node*));
for (int j=0; j<partitions; j++){
hipMallocManaged(&*(*(nodeR+i)+j), partitions*sizeof(Node));
hipMallocManaged(&*(*(nodeD+i)+j), partitions*sizeof(Node));
}
}
//Classificate the data into the nodes
make_nodos(nodeD, dataD, partitions, size_node, np);
make_nodos(nodeR, dataR, partitions, size_node, np);
//Get the dimensions of the GPU grid
int blocks = (int)(ceil((float)((partitions*partitions*partitions)/512.0)));
dim3 grid(blocks,1,1);
dim3 block(512,1,1);
clock_t begin = clock();
//Launch the kernels
//make_histoXX<<<grid,block>>>(DD_A, DD_B, nodeD, ds, dmax, size_node, size_box);
hipLaunchKernelGGL(( BPC_XX), dim3(grid),dim3(block), 0, 0, DD_A, DD_B, nodeD, ds, dmax, size_node, size_box);
//make_histoXY<<<grid,block>>>(DR_A, DR_B, nodeD, nodeR, ds, dmax, size_node, size_box);
blocks = (int)(ceil((float)(bn)/512.0));
dim3 grid_a(blocks,1,1);
dim3 block_a(512,1,1);
hipLaunchKernelGGL(( make_analyticRR), dim3(grid_a),dim3(block_a), 0, 0, RR, dmax, bn, size_box, np);
//Waits for the GPU to finish
hipDeviceSynchronize();
cout << "Size of a node " << size_node << endl;
cout << "Nodes considered as boundary: " << DD_A[0] <<endl;
//Check here for errors
hipError_t error = hipGetLastError();
cout << "The error code is " << error << endl;
if(error != 0)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nSpent time = %.4f seg.\n", time_spent );
//Collect the subhistograms data into the double precision main histograms
//THis has to be done in CPU since GPU only allows single precision
for (int i = 0; i < bn; i++){
DD[i] = (double)(DD_A[i]+ DD_B[i]);
DR[i] = (double)(DR_A[i]+ DR_B[i]);
}
cout << "Termine de hacer todos los histogramas" << endl;
/*
// Shows the histograms
cout << "\nHistograma DD:" << endl;
int sum = 0;
for (int k = 0; k<bn; k++){
cout << DD[k] << "\t";
sum += DD[k];
}
cout << "Total: " << sum << endl;
cout << "\nHistograma RR:" << endl;
for (int k = 0; k<bn; k++){
cout << RR[k] << "\t";
}
cout << "\nHistograma DR:" << endl;
for (int k = 0; k<bn; k++){
cout << DR[k] << "\t";
}
*/
// Guardamos los histogramas
save_histogram(nameDD, bn, DD);
cout << "Guarde histograma DD..." << endl;
save_histogram(nameRR, bn, RR);
cout << "Guarde histograma RR..." << endl;
save_histogram(nameDR, bn, DR);
cout << "Guarde histograma DR..." << endl;
//Free the memory
hipFree(&dataD);
hipFree(&dataR);
delete[] DD;
delete[] DR;
delete[] RR;
hipFree(&DD_A);
hipFree(&DR_A);
hipFree(&DD_B);
hipFree(&DR_B);
for (int i=0; i<partitions; i++){
for (int j=0; j<partitions; j++){
hipFree(&*(*(nodeR+i)+j));
hipFree(&*(*(nodeD+i)+j));
}
hipFree(&*(nodeR+i));
hipFree(&*(nodeD+i));
}
hipFree(&nodeR);
hipFree(&nodeD);
cout << "Programa Terminado..." << endl;
return 0;
}
| 71769b6333800f508f2e722243af85ac91067642.cu | // nvcc iso2PCF.cu -o par.out && ./par.out data_5K.dat rand0_5K.dat 5000 30 180
#include <iostream>
#include <fstream>
#include <string.h>
#include <time.h>
#include <math.h>
using namespace std;
//Point with weight value. Structure
struct PointW3D{
float x;
float y;
float z;
float w;
};
struct Node{
int len; // Number of points in the node
PointW3D *elements; // Points in the node
};
void open_files(string name_file, int pts, PointW3D *datos){
/* Opens the daya files. Receives the file location, number of points to read and the array of points where the data is stored */
ifstream file;
string mypathto_files = "../../../fake_DATA/DATOS/";
//This creates the full path to where I have my data files
name_file.insert(0,mypathto_files);
file.open(name_file.c_str(), ios::in | ios::binary); //Tells the program this is a binary file using ios::binary
if (file.fail()){
cout << "Failed to load the file in " << name_file << endl;
exit(1);
}
for ( int c = 0; c < pts; c++) //Reads line by line and stores each c line in the c PointW3D element of the array
{
file >> datos[c].x >> datos[c].y >> datos[c].z >> datos[c].w;
}
file.close();
}
//====================================================================
void save_histogram(string name, int bns, double *histo){
/* This function saves a one dimensional histogram in a file.
Receives the name of the file, number of bins in the histogram and the histogram array
*/
ofstream file2;
file2.open(name.c_str(), ios::out | ios::binary);
if (file2.fail()){
cout << "Failed to save the the histogram in " << name << endl;
exit(1);
}
for (int i = 0; i < bns; i++){
file2 << histo[i] << endl;
}
file2.close();
}
//====================================================================
void save_histogram(string name, int bns, float *histo){
/* This function saves a one dimensional histogram in a file.
Receives the name of the file, number of bins in the histogram and the histogram array
*/
ofstream file2;
file2.open(name.c_str(), ios::out | ios::binary);
if (file2.fail()){
cout << "Failed to save the the histogram in " << name << endl;
exit(1);
}
for (int i = 0; i < bns; i++){
file2 << histo[i] << endl;
}
file2.close();
}
//===================================================================
void add(PointW3D *&array, int &lon, float _x, float _y, float _z, float _w){
/*
This function manages adding points to an specific Node. It receives the previous array, longitude and point to add
and updates the previous array and length with the same array with the new point at the end and adds +1 to the length +1
It manages the memory allocation and free of the previous and new elements.
*/
lon++;
PointW3D *array_aux;
cudaMallocManaged(&array_aux, lon*sizeof(PointW3D));
for (int i=0; i<lon-1; i++){
array_aux[i].x = array[i].x;
array_aux[i].y = array[i].y;
array_aux[i].z = array[i].z;
array_aux[i].w = array[i].w;
}
cudaFree(array);
array = array_aux;
array[lon-1].x = _x;
array[lon-1].y = _y;
array[lon-1].z = _z;
array[lon-1].w = _w;
}
void make_nodos(Node ***nod, PointW3D *dat, unsigned int partitions, float size_node, unsigned int np){
/*
This function classifies the data in the nodes
Args
nod: Node 3D array where the data will be classified
dat: array of PointW3D data to be classified and stored in the nodes
partitions: number nodes in each direction
size_node: dimensions of a single node
np: number of points in the dat array
*/
int row, col, mom;
// First allocate memory as an empty node:
for (row=0; row<partitions; row++){
for (col=0; col<partitions; col++){
for (mom=0; mom<partitions; mom++){
nod[row][col][mom].len = 0;
cudaMallocManaged(&nod[row][col][mom].elements, sizeof(PointW3D));
}
}
}
// Classificate the ith elment of the data into a node and add that point to the node with the add function:
for (int i=0; i<np; i++){
row = (int)(dat[i].x/size_node);
col = (int)(dat[i].y/size_node);
mom = (int)(dat[i].z/size_node);
add(nod[row][col][mom].elements, nod[row][col][mom].len, dat[i].x, dat[i].y, dat[i].z, dat[i].w);
}
}
//====================================================================
//============ Kernels Section =======================================
//====================================================================
__device__ void count_distances11(float *XX, PointW3D *elements, int len, float ds, float dd_max, int sum){
/*
This device function counts the distances betweeen points within one node.
Args:
XX: The histogram where the distances are counted in
elements: Array of PointW3D points inside the node
len: lenght of the elements array
ds: number of bins divided by the maximum distance. Used to calculate the bin it should be counted at
dd_max: The maximum distance of interest.
*/
int bin;
float d, v;
float x1,y1,z1,w1,x2,y2,z2,w2;
for (int i=0; i<len-1; ++i){
x1 = elements[i].x;
y1 = elements[i].y;
z1 = elements[i].z;
w1 = elements[i].w;
for (int j=i+1; j<len; ++j){
x2 = elements[j].x;
y2 = elements[j].y;
z2 = elements[j].z;
w2 = elements[j].w;
d = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<=dd_max+1){
bin = (int)(sqrt(d)*ds);
v = 2*w1*w2;
atomicAdd(&XX[bin],sum);
}
}
}
}
__device__ void count_distances12(float *XX, PointW3D *elements1, int len1, PointW3D *elements2, int len2, float ds, float dd_max, int sum){
/*
This device function counts the distances betweeen points between two different nodes.
Args:
XX: The histogram where the distances are counted in
elements1: Array of PointW3D points inside the first node
len1: lenght of the first elements array
elements2: Array of PointW3D points inside the second node
len2: lenght of the second elements array
ds: number of bins divided by the maximum distance. Used to calculate the bin it should be counted at
dd_max: The maximum distance of interest.
*/
int bin;
float d, v;
float x1,y1,z1,w1,x2,y2,z2,w2;
for (int i=0; i<len1; ++i){
x1 = elements1[i].x;
y1 = elements1[i].y;
z1 = elements1[i].z;
w1 = elements1[i].w;
for (int j=0; j<len2; ++j){
x2 = elements2[j].x;
y2 = elements2[j].y;
z2 = elements2[j].z;
w2 = elements2[j].w;
d = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<=dd_max+1){
bin = (int)(sqrt(d)*ds);
v = 2*w1*w2;
atomicAdd(&XX[bin],sum);
}
}
}
}
__device__ void BPC_loop(float *XX, Node ***nodeD, int row, int col, int mom, int partitions, int did_max, float dd_max, float ds, int sum, float size_box, bool x_border, bool y_border, bool z_border, bool x_upperborder, bool y_upperborder, bool z_upperborder, bool x_lowerborder, bool y_lowerborder, bool z_lowerborder){
/*
This device function counts the distances betweeen points between two different nodes from periodic boundary conditiojns.
Args:
XX: The histogram where the distances are counted in
elements1: Array of PointW3D points inside the first node
len1: lenght of the first elements array
elements2: Array of PointW3D points inside the second node
len2: lenght of the second elements array
ds: number of bins divided by the maximum distance. Used to calculate the bin it should be counted at
dd_max: The maximum distance of interest.
*/
int bin, d_node, u, v, w, did_max2=did_max*did_max;
float d, s;
float x1,y1,z1,w1,dx12,dy12,dz12,w2;
int x_from = 10; //((row-did_max)*(row>did_max))*(!x_border) + (partitions-(did_max-row))*(x_lowerborder&&!x_upperborder);
int x_to = partitions-10; //(partitions-1)*((row+did_max>partitions-1 && !x_upperborder)||x_lowerborder) + (row+did_max)*((row+did_max<partitions)&&!x_border) + (!x_lowerborder&&x_upperborder)*(x_from+(did_max-(partitions-1-row)));
int y_from = 10; //((col-did_max)*(col>did_max))*(!y_border) + (partitions-(did_max-col))*(y_lowerborder&&!y_upperborder);
int y_to = partitions-10; // (partitions-1)*((col+did_max>partitions-1 && !y_upperborder)||y_lowerborder) + (col+did_max)*((col+did_max<partitions)&&!y_border) + (!y_lowerborder&&y_upperborder)*(y_from+(did_max-(partitions-1-col)));
int z_from = 10; // ((mom-did_max)*(mom>did_max))*(!z_border) + (partitions-(did_max-mom))*(z_lowerborder&&!z_upperborder);
int z_to = partitions-10; //(partitions-1)*((mom+did_max>partitions-1 && !z_upperborder)||z_lowerborder) + (mom+did_max)*((mom+did_max<partitions)&&!z_border) + (!z_lowerborder&&z_upperborder)*(z_from+(did_max-(partitions-1-mom)));
//If the z direction is not the nearest border the z index it is 0 if mom<did_max or mom-did-max otherwise.
//If both z borders or ONLY the upper z border are the nearest borders the z index starts from 0
//If ONLY the lower z border is the nearest the z index starts from partitions-(did_max-mom)
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//If both borders are the nearest the highest limit is partitions-1
//If the lower border is the nearest the highes limit is partitions-1
//If the upper border is not the nerarest and mom+did_max>partitions the highest limit is partitions-1
//If this is not the border side and mom+did_max< paritions then the highest limit is mom+did_max
//If only the upper border is the nearest border the higher limit is the lower limit + (did_max-(partitions-1-mom))
for (u=x_from; u<=x_to; u++){
for (v=y_from; v<=y_to; v++){
for (w=z_from; w<=z_to; w++){
d_node=(w-mom)*(w-mom) + (v-col)*(v-col) + (u-row)*(u-row);
if (d_node<=did_max2){
for (int i=0; i<nodeD[row][col][mom].len; ++i){
x1 = nodeD[row][col][mom].elements[i].x;
y1 = nodeD[row][col][mom].elements[i].y;
z1 = nodeD[row][col][mom].elements[i].z;
w1 = nodeD[row][col][mom].elements[i].w;
for (int j=0; j<nodeD[u][v][w].len; ++j){
dx12 = fabsf(x1-nodeD[u][v][w].elements[j].x) - size_box*x_border;
dy12 = fabsf(y1-nodeD[u][v][w].elements[j].y) - size_box*y_border;
dz12 = fabsf(z1-nodeD[u][v][w].elements[j].z) - size_box*z_border;
w2 = nodeD[u][v][w].elements[j].w;
d = dx12*dx12 + dy12*dy12 + dz12*dz12;
if (d<=dd_max+1){
bin = (int)(sqrt(d)*ds);
s = 2*w1*w2;
atomicAdd(&XX[bin],sum);
}
}
}
}
}
}
}
}
__global__ void BPC_XX(float *XX_A, float *XX_B, Node ***nodeD, float ds, float d_max, float size_node, float size_box){
/*
This device function counts the distances betweeen points between a node and a node reproduction in the border.
Args:
XX: The histogram where the distances are counted in
nodeD: Full array of nodes
ds: number of bins divided by the maximum distance. Used to calculate the bin it should be counted at
dd_max: The maximum distance of interest.
did_max: maximum number of node between two nodes be considered
did_max2: did_max*did_max
size_box: Size of the whole box
*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int partitions = (int)(ceilf(size_box/size_node));
if (idx<(partitions*partitions*partitions)){
//Get the node positon in this thread
int mom = (int) (idx/(partitions*partitions));
int col = (int) ((idx%(partitions*partitions))/partitions);
int row = idx%partitions;
//printf("%i, %i, %i \n", mom, col,row);
//This may see redundant but with this these often checked values are upgraded to device memory
float dd_max = d_max*d_max;
int did_max = (int)(ceilf((d_max+size_node*sqrt(3.0))/size_node));
if (idx==0){
printf("Partitions: %i, did_max: %i\n", partitions, did_max);
}
if (nodeD[row][col][mom].len > 0 && (row<did_max-1 || partitions-row<did_max || col<did_max-1 || partitions-col<did_max || mom<did_max-1 || partitions-mom<did_max)){
//Only if the current node has elements and it is near to any border does the thread will be active
atomicAdd(&XX_A[0],1); //Count how many nodes are considered as near a border
/*
bool x_border=false, y_border=false, z_border=false, x_upperborder=false, y_upperborder=false, z_upperborder=false, x_lowerborder=false, y_lowerborder=false, z_lowerborder=false;
x_border=(row<did_max-1 || partitions-row<did_max);
if (x_border){
x_upperborder=partitions-row<did_max;
x_lowerborder=row<did_max-1;
BPC_loop(XX_A, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, x_border, false, false, x_upperborder, false, false, x_lowerborder, false, false);
}
y_border=(col<did_max-1 || partitions-col<did_max);
if (y_border){
y_upperborder=partitions-col<did_max;
y_lowerborder=col<did_max-1;
//Only Y boundaries
BPC_loop(XX_B, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, false, y_border, false, false, y_upperborder, false, false, y_lowerborder, false);
if (x_border){
//Boundaries in the XY walls
BPC_loop(XX_A, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, x_border, y_border, false, x_upperborder, y_upperborder, false, x_lowerborder, y_lowerborder, false);
}
}
z_border=(mom<did_max-1 || partitions-mom<did_max);
if (z_border){
z_upperborder=partitions-mom<did_max;
z_lowerborder=mom<did_max-1;
//Only Z boundaries
BPC_loop(XX_B, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, false, false, z_border, false, false, z_upperborder, false, false, z_lowerborder);
if (x_border){
//For the ZY corner
BPC_loop(XX_A, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, x_border, false, z_border, x_upperborder, false, z_upperborder, x_lowerborder, false, z_lowerborder);
if (y_border){
//For the XYZ corner
BPC_loop(XX_B, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, x_border, y_border, z_border, x_upperborder, y_upperborder, z_upperborder, x_lowerborder, y_lowerborder, z_lowerborder);
}
}
if (y_border){
//For the YZ
BPC_loop(XX_A, nodeD, row, col, mom, partitions, did_max, dd_max, ds, 2, size_box, false, y_border, z_border, false, y_upperborder, z_upperborder, false, y_lowerborder, z_lowerborder);
}
}
*/
}
}
}
__global__ void make_histoXX(float *XX_A, float *XX_B, Node ***nodeD, float ds, float d_max, float size_node, float size_box){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int partitions = (int)(ceilf(size_box/size_node));
if (idx<(partitions*partitions*partitions)){
//Get the node positon in this thread
int mom = (int) (idx/(partitions*partitions));
int col = (int) ((idx%(partitions*partitions))/partitions);
int row = idx%partitions;
//printf("%i, %i, %i \n", mom, col,row)
if (nodeD[row][col][mom].len > 0){
//This may see redundant but with this these often checked values are upgraded to device memory
float dd_max = d_max*d_max;
int did_max = (int)(ceilf((d_max+size_node*sqrt(3.0))/size_node));
int did_max2 = did_max*did_max;
// Counts distances betweeen the same node
if (idx%2==0){ //If the main index is even stores the countings in the XX_A subhistogram
count_distances11(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, ds, dd_max, 2);
} else { //If the main index is odd stores the countings in the XX_B subhistogram
count_distances11(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, ds, dd_max, 2);
}
int u,v,w; // Position index of the second node
int dx_nod12, dy_nod12, dz_nod12, dd_nod12; //Internodal distance
//Second node movil in Z direction
for(w = mom+1; w<partitions && w-row<=did_max; w++){
if (idx%2==0){ //If the main index is even stores the countings in the XX_A subhistogram
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][col][w].elements, nodeD[row][col][w].len, ds, dd_max, 2);
} else { //If the main index is odd stores the countings in the XX_B subhistogram
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][col][w].elements, nodeD[row][col][w].len, ds, dd_max, 2);
}
}
//Second node movil in YZ
for(v=col+1; v<partitions && v-col<=did_max; v++){
dy_nod12 = v-col;
for(w=(mom-did_max)*(mom>did_max); w<partitions && w-mom<=did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][v][w].elements, nodeD[row][v][w].len, ds, dd_max, 2);
} else {
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][v][w].elements, nodeD[row][v][w].len, ds, dd_max, 2);
}
}
//}
}
}
//Second node movil in XYZ
for(u = row+1; u < partitions && u-row< did_max; u++){
dx_nod12 = u-row;
for(v = (col-did_max)*(col>did_max); v < partitions && v-col< did_max; v++){
dy_nod12 = v-col;
for(w = (mom-did_max)*(mom>did_max); w < partitions && w-mom< did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12 + dx_nod12*dx_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[u][v][w].elements, nodeD[u][v][w].len, ds, dd_max, 2);
} else {
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[u][v][w].elements, nodeD[u][v][w].len, ds, dd_max, 2);
}
}
}
}
}
}
}
}
__global__ void make_histoXY(float *XY_A, float *XY_B, Node ***nodeD, Node ***nodeR, float ds, float d_max, float size_node, float size_box){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int partitions = (int)(ceilf(size_box/size_node));
if (idx<(partitions*partitions*partitions)){
//Get the node positon in this thread
int mom = (int) (idx/(partitions*partitions));
int col = (int) ((idx%(partitions*partitions))/partitions);
int row = idx%partitions;
if (nodeD[row][col][mom].len > 0){
//This may see redundant but with this these often checked values are upgraded to device memory
float dd_max = d_max*d_max;
int did_max = (int)(ceilf((d_max+size_node*sqrt(3.0))/size_node));
int did_max2 = did_max*did_max;
int u,v,w; //Position of the second node
unsigned int dx_nod12, dy_nod12, dz_nod12, dd_nod12;
//Second node movil in XYZ
//for(u = (row-did_max)*(row>did_max); u < partitions && u-row< did_max; u++){
for(u = 0; u < partitions && u-row< did_max; u++){
dx_nod12 = u-row;
//for(v = (col-did_max)*(col>did_max); v < partitions && v-col< did_max; v++){
for(v = 0; v < partitions && v-col< did_max; v++){
dy_nod12 = v-col;
//for(w = (mom-did_max)*(mom>did_max); w < partitions && w-mom< did_max; w++){
for(w = 0; w < partitions && w-mom< did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12 + dx_nod12*dx_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){
count_distances12(XY_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[u][v][w].elements, nodeR[u][v][w].len, ds, dd_max, 1);
} else {
count_distances12(XY_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[u][v][w].elements, nodeR[u][v][w].len, ds, dd_max, 1);
}
}
}
}
}
}
}
}
__global__ void make_analyticRR(float *RR, float d_max, int bn, float size_box, int n_pts){
/*
Analytic calculation of the RR histogram
*/
int a = threadIdx.x;
if (a < bn){
float dr = (d_max/bn);
float V = size_box*size_box*size_box;
float beta1 = n_pts*n_pts/V;
float alph = 4*(2*acosf(0.0))*(beta1)*dr*dr*dr/3;
float r1, r2;
r2 = (float) a;
r1 = r2+1;
float sum = alph*((r1*r1*r1)-(r2*r2*r2));
atomicAdd(&RR[a],sum);
}
}
int main(int argc, char **argv){
unsigned int np = stoi(argv[3]), bn = stoi(argv[4]);
float dmax = stof(argv[5]);
float ds = ((float)(bn))/dmax, size_box = 250.0, alpha = 2.176;
float size_node = alpha*(size_box/pow((float)(np),1/3.));
unsigned int partitions = (int)(ceil(size_box/size_node));
float *DD_A, *DR_A, *DD_B, *DR_B, *RR;
double *DD, *DR;
PointW3D *dataD;
PointW3D *dataR;
cudaMallocManaged(&dataD, np*sizeof(PointW3D));
cudaMallocManaged(&dataR, np*sizeof(PointW3D));
// Name of the files where the results are saved
string nameDD = "DDiso.dat", nameRR = "RRiso.dat", nameDR = "DRiso.dat";
// Allocate memory for the histogram as double
// And the subhistograms as simple presision floats
DD = new double[bn];
RR = new float[bn];
DR = new double[bn];
cudaMallocManaged(&DD_A, bn*sizeof(float));
cudaMallocManaged(&DR_A, bn*sizeof(float));
cudaMallocManaged(&DD_B, bn*sizeof(float));
cudaMallocManaged(&DR_B, bn*sizeof(float));
//Initialize the histograms in 0
for (int i = 0; i < bn; i++){
*(DD+i) = 0;
*(RR+i) = 0;
*(DR+i) = 0;
*(DD_A+i) = 0;
*(DR_A+i) = 0;
*(DD_B+i) = 0;
*(DR_B+i) = 0;
}
// Open and read the files to store the data in the arrays
open_files(argv[1], np, dataD);
open_files(argv[2], np, dataR);
//Init the nodes arrays
Node ***nodeD;
Node ***nodeR;
cudaMallocManaged(&nodeR, partitions*sizeof(Node**));
cudaMallocManaged(&nodeD, partitions*sizeof(Node**));
for (int i=0; i<partitions; i++){
cudaMallocManaged(&*(nodeR+i), partitions*sizeof(Node*));
cudaMallocManaged(&*(nodeD+i), partitions*sizeof(Node*));
for (int j=0; j<partitions; j++){
cudaMallocManaged(&*(*(nodeR+i)+j), partitions*sizeof(Node));
cudaMallocManaged(&*(*(nodeD+i)+j), partitions*sizeof(Node));
}
}
//Classificate the data into the nodes
make_nodos(nodeD, dataD, partitions, size_node, np);
make_nodos(nodeR, dataR, partitions, size_node, np);
//Get the dimensions of the GPU grid
int blocks = (int)(ceil((float)((partitions*partitions*partitions)/512.0)));
dim3 grid(blocks,1,1);
dim3 block(512,1,1);
clock_t begin = clock();
//Launch the kernels
//make_histoXX<<<grid,block>>>(DD_A, DD_B, nodeD, ds, dmax, size_node, size_box);
BPC_XX<<<grid,block>>>(DD_A, DD_B, nodeD, ds, dmax, size_node, size_box);
//make_histoXY<<<grid,block>>>(DR_A, DR_B, nodeD, nodeR, ds, dmax, size_node, size_box);
blocks = (int)(ceil((float)(bn)/512.0));
dim3 grid_a(blocks,1,1);
dim3 block_a(512,1,1);
make_analyticRR<<<grid_a,block_a>>>(RR, dmax, bn, size_box, np);
//Waits for the GPU to finish
cudaDeviceSynchronize();
cout << "Size of a node " << size_node << endl;
cout << "Nodes considered as boundary: " << DD_A[0] <<endl;
//Check here for errors
cudaError_t error = cudaGetLastError();
cout << "The error code is " << error << endl;
if(error != 0)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nSpent time = %.4f seg.\n", time_spent );
//Collect the subhistograms data into the double precision main histograms
//THis has to be done in CPU since GPU only allows single precision
for (int i = 0; i < bn; i++){
DD[i] = (double)(DD_A[i]+ DD_B[i]);
DR[i] = (double)(DR_A[i]+ DR_B[i]);
}
cout << "Termine de hacer todos los histogramas" << endl;
/*
// Shows the histograms
cout << "\nHistograma DD:" << endl;
int sum = 0;
for (int k = 0; k<bn; k++){
cout << DD[k] << "\t";
sum += DD[k];
}
cout << "Total: " << sum << endl;
cout << "\nHistograma RR:" << endl;
for (int k = 0; k<bn; k++){
cout << RR[k] << "\t";
}
cout << "\nHistograma DR:" << endl;
for (int k = 0; k<bn; k++){
cout << DR[k] << "\t";
}
*/
// Guardamos los histogramas
save_histogram(nameDD, bn, DD);
cout << "Guarde histograma DD..." << endl;
save_histogram(nameRR, bn, RR);
cout << "Guarde histograma RR..." << endl;
save_histogram(nameDR, bn, DR);
cout << "Guarde histograma DR..." << endl;
//Free the memory
cudaFree(&dataD);
cudaFree(&dataR);
delete[] DD;
delete[] DR;
delete[] RR;
cudaFree(&DD_A);
cudaFree(&DR_A);
cudaFree(&DD_B);
cudaFree(&DR_B);
for (int i=0; i<partitions; i++){
for (int j=0; j<partitions; j++){
cudaFree(&*(*(nodeR+i)+j));
cudaFree(&*(*(nodeD+i)+j));
}
cudaFree(&*(nodeR+i));
cudaFree(&*(nodeD+i));
}
cudaFree(&nodeR);
cudaFree(&nodeD);
cout << "Programa Terminado..." << endl;
return 0;
}
|
b30f98ce14d4b3c88134b8c8c467431574c1a5c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2\core.hpp>
#include <opencv2\highgui.hpp>
#include <ctime>
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <time.h>
#include <iostream>
#define K 16 // K is the size of block KxK
#define W 1 // size of filter is (2W+1)x(2W+1)
using namespace cv;
using namespace std;
__global__ void gpuKernel(unsigned char * dst, const unsigned char *src ,int width, int height )
// this kernel uses the global memory without shared memory, i used width and height as two parameters for juding whether
// the pixels are inside the image size.
{
int index; // pixel location in global memory
//int neighbor; // neighbor location in global memory // i did not use this parameter.
// TODO: define global X and global Y and use these to calculate global offset
// Pixel X and Y in global memory
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
// TODO: if global X and global Y are within the image proceed
if (0<=X <width && 0<=Y < height);
// TODO: run the filter 3x3 with two nested for loops
float sum = 0;
index = X + Y*blockDim.x * gridDim.x;
for (int j = -W; j <= W; j++) // W is the filter half width
{
for (int i = -W; i <= W; i++) // W is the filter half length
{
// TODO: calculate X and Y for neighboring pixel
// TODO: if neighbor X and Y are not outside of the image boundary do the calculation of neighbor pixel offset
if ((Y + W <= height) && (Y - W >= 0) && (X + W <= width) && (X - W >= 0))
// TODO: calculate the filtered value
{
sum = sum + src[(X + i) + (Y + j)*blockDim.x * gridDim.x]; // calculate the sum of 9 pixels
dst[index] = sum / ((2 * W + 1) * (2 * W + 1));
}
else {
dst[index] = src[index]; //I wanted to directly copy the rest of the input image to the output image, so I can get rid of the black line,
// but not successful, I don't how to fix this.
}
}
}
//dst[index] = sum / ((2 * W + 1) * (2 * W + 1)); // calculate average of the 9 values and put into the output image.
}
__global__ void gpuKernelTiled(unsigned char * dst, const unsigned char * src, int width, int height) // This is the shared memory version
{
__shared__ unsigned char Tile[K][K];
// TODO: Declare Tile as shared memory
int lx, ly; // lx and ly are location in shared memory
// TODO: define lx and ly
lx = threadIdx.x;
ly = threadIdx.y;
int X, Y, index = 0;
// X and Y are location of pixel in global memory and index is actual pixel location in global memory
X = threadIdx.x + blockIdx.x * blockDim.x;
Y = threadIdx.y + blockIdx.y * blockDim.y;
index = X + Y*blockDim.x * gridDim.x;
// TODO: Read from global memory and put in shared memory
Tile[lx][ly] = src[index];
__syncthreads();
// TODO: fill shared memory
float sum = 0; // sum is the filtered value that you will calculate
// TODO: run your for loops for the filtered values
for (int j = -W; j <= W; j++) // W is the filter half width
{
for (int i = -W; i <= W; i++) // W is the filter half length
{
int tmpx, tmpy;
tmpx = lx + i;
tmpy = ly + j;
if ((tmpx>=0)&&(tmpx<K)&&(tmpy>=0)&&(tmpy<K)) // if the pixels are within the block
{
sum += Tile[tmpx][tmpy];
dst[index] = sum / ((2 * W + 1) * (2 * W + 1));
}
else
{
if ((Y + W <= height) && (Y - W >= 0) && (X + W <= width) && (X - W >= 0))
// TODO: calculate the filtered value
{
sum = sum + src[(X + i) + (Y + j)*blockDim.x * gridDim.x]; // calculate the sum of 9 pixels
dst[index] = sum / ((2 * W + 1) * (2 * W + 1));
}
else {
dst[index] = src[index]; //I wanted to directly copy the rest of the input image to the output image, so I can get rid of the black line,
// but not successful, I don't how to fix this.
}
}
}
}
//dst[index] = sum / ((2 * W + 1) * (2 * W + 1)); // Here the filtered value will be stored in the output
}
// CPU
void cpuFilter(Mat &, const Mat&);
// GPU helper code
hipError_t thresholdWithCudaNoShared(Mat&, const Mat&);
hipError_t thresholdWithCudaWithShared(Mat&, const Mat&);
int main()
{
hipError_t cudaStatus;
clock_t tStart;
int ch = 0;
//Mat inputImage = imread("C:\\Users\\sw5\\Desktop\\course books\\6398\\Home works\\Project 2\\LightHouse_gray.jpg",0);
//Mat inputImage = imread("C:\\Users\\sw5\\Desktop\\course books\\6398\\Home works\\Project 2\\Hydrangeas_gray.jpg",0);
Mat inputImage = imread("C:\\Users\\sw5\\Desktop\\course books\\6398\\Home works\\Project 2\\Desert_gray.jpg", 0);
Mat cpuTHImage(inputImage.rows, inputImage.cols, CV_8UC1);
Mat gpuTHImage(inputImage.rows, inputImage.cols, CV_8UC1);
if (!inputImage.data)
{
printf("Image didn't load properly!\n");
}
else
{
cout << "Enter 1 for CPU \n";
cout << "Enter 2 for GPU \n";
cout << "Enter 3 for CPU + GPU \n";
//cout << "Enter 0 to Exit \n";
cin >> ch;
switch (ch)
{
case 1:
// Calling CPU function
tStart = clock(); //Starting clock
cpuFilter(cpuTHImage, inputImage);
printf("Time taken: %.2fms\n", (double)(clock() - tStart) / (CLOCKS_PER_SEC / 1000)); //Stopping and displaying time
//Displaying Input and Output Images
imshow("Input_Image", inputImage);
imshow("CPU_Output_Image", cpuTHImage);
break;
case 2:
// Calling GPU fucntion
tStart = clock(); //Starting clock
thresholdWithCudaNoShared(gpuTHImage, inputImage);
printf("Time taken: %.2fms\n", (double)(clock() - tStart)/(CLOCKS_PER_SEC/1000)); //Stopping and displaying time
//Displaying Input and Output Images
imshow("Input_Image", inputImage);
imshow("GPU_Output_Image", gpuTHImage);
break;
case 3:
//Calling CPU and GPU function
tStart = clock();
cpuFilter(cpuTHImage, inputImage);
printf("Time taken (CPU): %.2fms\n", (double)(clock() - tStart) / (CLOCKS_PER_SEC / 1000)); //Stopping and displaying time
tStart = clock();
thresholdWithCudaWithShared(gpuTHImage, inputImage);
printf("Time taken (GPU): %.2fms\n", (double)(clock() - tStart)/(CLOCKS_PER_SEC/1000)); //Stopping and displaying time
//Displaying Input and Output Images
imshow("Input_Image", inputImage);
imshow("CPU_Output_Image", cpuTHImage);
imshow("SharedGPU_Output_Image", gpuTHImage);
break;
default:
break;
}
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
cvWaitKey(0);
return 0;
}
//CPU Implemenation Code
void cpuFilter(Mat& dest, const Mat& src)
{
int rows = src.rows;
int cols = src.cols;
int sum;
// method to deal with edges of the image, bascially use the edge of the original image or the new image.
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
dest.data[j + i*cols] = src.data[j + i*cols];
}
}
for (int i = 1; i < rows-1; i++)
{
for (int j = 1; j < cols-1; j++)
{
if ((0 < i < rows - 1) && (0 < j < cols - 1))
{
sum = 0;
for (int x = -W; x <= W; x++)
{
for (int y = -W; y <= W; y++)
{
sum = sum + src.data[(j + x) + (i + y)*cols];
}
}
int outindex = j + i*cols;
dest.data[outindex] = sum / ((2 * W + 1)*(2 * W + 1));
}
}
}
}
// Helper function for using CUDA to add vectors in parallel.
//******************************************************************************************************************************************
// Helper function for using CUDA to perform image thresholding in parallel. Takes as input the thresholded image (bwImage), the input image (input), and the threshold value.
hipError_t thresholdWithCudaNoShared(Mat & outputImg, const Mat & inputImg)
{
// Allocate GPU buffers for the buffers (one input, one output)
unsigned char *dev_dst = 0;
unsigned char *dev_src = 0; // these are the gpu side ouput and input pointers
int width = inputImg.size().width;
int height = inputImg.size().height;
hipError_t cudaStatus;
hipEvent_t start, stop; // These are your start and stop events to calculate your GPU performance
float time = 0; // This is the gpu time
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// TODO: add your code here to allocate the input pointer on the device. Note the size of the pointer in hipMalloc
cudaStatus = hipMalloc((void**)& dev_src, sizeof(unsigned char)*inputImg.rows*inputImg.cols);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Cuda failed");
goto Error;
}
// TODO: add your code here to allocate the output pointer on the device. Note the size of the pointer in hipMalloc
cudaStatus = hipMalloc((void**)& dev_dst, sizeof(unsigned char)*outputImg.rows*outputImg.cols);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Cuda failed");
goto Error;
}
// Copy input data from host memory to GPU buffers.
// TODO: Add your code here. Use hipMemcpy
cudaStatus = hipMemcpy(dev_src, inputImg.data, sizeof(unsigned char)*inputImg.rows*inputImg.cols, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Cuda failed");
goto Error;
}
// TODO: Launch a kernel on the GPU with one thread for each element. use <<< grid_size (or number of blocks), block_size(or number of threads) >>>
dim3 block(K, K, 1);
dim3 grid(inputImg.cols / K, inputImg.rows / K, 1);
// lauch a kernel on the GPU with one thread for each element.
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( gpuKernel) , dim3(grid), dim3(block) , 0, 0, dev_dst, dev_src, width, height);
// TODO: record your stop event on GPU
hipEventRecord(stop);
// TODO: Synchronize stop event
hipEventSynchronize(stop);
// TODO: calculate the time ellaped on GPU
hipEventElapsedTime(&time, start, stop);
printf("Global Memory time=%3.2f ms\n", time);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// TODO: Copy output data from GPU buffer to host memory. use hipMemcpy
cudaStatus = hipMemcpy(outputImg.data, dev_dst, sizeof(unsigned char)*outputImg.rows*outputImg.cols, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "Cuda failed");
goto Error;
}
Error:
hipFree(dev_src);
hipFree(dev_dst);
return cudaStatus;
}
//******************************************************************************************************
hipError_t thresholdWithCudaWithShared(Mat & destImg, const Mat & srcImg)
{
unsigned char *dev_src = 0;
unsigned char *dev_dst = 0;
int width = srcImg.size().width;
int height = srcImg.size().height;
hipError_t cudaStatus; // cuda status variable for errors on GPU
hipEvent_t start, stop; // These are your start and stop events to calculate your GPU performance
float time = 0; // This is the gpu time
// TODO: register your events for GPU
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors (One input, one output)
cudaStatus = hipMalloc((void**)& dev_src, sizeof(unsigned char) * srcImg.rows * srcImg.cols);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
//target image
cudaStatus = hipMalloc((void **)& dev_dst, sizeof(unsigned char) * destImg.rows * destImg.cols);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_src, srcImg.data, sizeof(unsigned char) * srcImg.rows * srcImg.cols, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy (CPU ->GPU) failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
dim3 block(K, K, 1);
dim3 grid(srcImg.cols / K, srcImg.rows / K, 1);
// TODO: record your start event on GPU
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
gpuKernelTiled <<<grid, block >> >(dev_dst, dev_src, width, height); // invking the kernel with tiled shared memory
// TODO: record your stop event on GPU
hipEventRecord(stop);
// TODO: Synchronize stop event
hipEventSynchronize(stop);
// TODO: calculate the time ellaped on GPU
hipEventElapsedTime(&time, start, stop);
printf("Shared Memory time=%3.2f ms\n", time);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(destImg.data, dev_dst, sizeof(unsigned char) * destImg.rows * destImg.cols, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy (GPU -> CPU) failed!");
goto Error;
}
Error:
hipFree(dev_src);
hipFree(dev_dst);
return cudaStatus;
}
| b30f98ce14d4b3c88134b8c8c467431574c1a5c3.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2\core.hpp>
#include <opencv2\highgui.hpp>
#include <ctime>
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <time.h>
#include <iostream>
#define K 16 // K is the size of block KxK
#define W 1 // size of filter is (2W+1)x(2W+1)
using namespace cv;
using namespace std;
__global__ void gpuKernel(unsigned char * dst, const unsigned char *src ,int width, int height )
// this kernel uses the global memory without shared memory, i used width and height as two parameters for juding whether
// the pixels are inside the image size.
{
int index; // pixel location in global memory
//int neighbor; // neighbor location in global memory // i did not use this parameter.
// TODO: define global X and global Y and use these to calculate global offset
// Pixel X and Y in global memory
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
// TODO: if global X and global Y are within the image proceed
if (0<=X <width && 0<=Y < height);
// TODO: run the filter 3x3 with two nested for loops
float sum = 0;
index = X + Y*blockDim.x * gridDim.x;
for (int j = -W; j <= W; j++) // W is the filter half width
{
for (int i = -W; i <= W; i++) // W is the filter half length
{
// TODO: calculate X and Y for neighboring pixel
// TODO: if neighbor X and Y are not outside of the image boundary do the calculation of neighbor pixel offset
if ((Y + W <= height) && (Y - W >= 0) && (X + W <= width) && (X - W >= 0))
// TODO: calculate the filtered value
{
sum = sum + src[(X + i) + (Y + j)*blockDim.x * gridDim.x]; // calculate the sum of 9 pixels
dst[index] = sum / ((2 * W + 1) * (2 * W + 1));
}
else {
dst[index] = src[index]; //I wanted to directly copy the rest of the input image to the output image, so I can get rid of the black line,
// but not successful, I don't how to fix this.
}
}
}
//dst[index] = sum / ((2 * W + 1) * (2 * W + 1)); // calculate average of the 9 values and put into the output image.
}
__global__ void gpuKernelTiled(unsigned char * dst, const unsigned char * src, int width, int height) // This is the shared memory version
{
__shared__ unsigned char Tile[K][K];
// TODO: Declare Tile as shared memory
int lx, ly; // lx and ly are location in shared memory
// TODO: define lx and ly
lx = threadIdx.x;
ly = threadIdx.y;
int X, Y, index = 0;
// X and Y are location of pixel in global memory and index is actual pixel location in global memory
X = threadIdx.x + blockIdx.x * blockDim.x;
Y = threadIdx.y + blockIdx.y * blockDim.y;
index = X + Y*blockDim.x * gridDim.x;
// TODO: Read from global memory and put in shared memory
Tile[lx][ly] = src[index];
__syncthreads();
// TODO: fill shared memory
float sum = 0; // sum is the filtered value that you will calculate
// TODO: run your for loops for the filtered values
for (int j = -W; j <= W; j++) // W is the filter half width
{
for (int i = -W; i <= W; i++) // W is the filter half length
{
int tmpx, tmpy;
tmpx = lx + i;
tmpy = ly + j;
if ((tmpx>=0)&&(tmpx<K)&&(tmpy>=0)&&(tmpy<K)) // if the pixels are within the block
{
sum += Tile[tmpx][tmpy];
dst[index] = sum / ((2 * W + 1) * (2 * W + 1));
}
else
{
if ((Y + W <= height) && (Y - W >= 0) && (X + W <= width) && (X - W >= 0))
// TODO: calculate the filtered value
{
sum = sum + src[(X + i) + (Y + j)*blockDim.x * gridDim.x]; // calculate the sum of 9 pixels
dst[index] = sum / ((2 * W + 1) * (2 * W + 1));
}
else {
dst[index] = src[index]; //I wanted to directly copy the rest of the input image to the output image, so I can get rid of the black line,
// but not successful, I don't how to fix this.
}
}
}
}
//dst[index] = sum / ((2 * W + 1) * (2 * W + 1)); // Here the filtered value will be stored in the output
}
// CPU
void cpuFilter(Mat &, const Mat&);
// GPU helper code
cudaError_t thresholdWithCudaNoShared(Mat&, const Mat&);
cudaError_t thresholdWithCudaWithShared(Mat&, const Mat&);
int main()
{
cudaError_t cudaStatus;
clock_t tStart;
int ch = 0;
//Mat inputImage = imread("C:\\Users\\sw5\\Desktop\\course books\\6398\\Home works\\Project 2\\LightHouse_gray.jpg",0);
//Mat inputImage = imread("C:\\Users\\sw5\\Desktop\\course books\\6398\\Home works\\Project 2\\Hydrangeas_gray.jpg",0);
Mat inputImage = imread("C:\\Users\\sw5\\Desktop\\course books\\6398\\Home works\\Project 2\\Desert_gray.jpg", 0);
Mat cpuTHImage(inputImage.rows, inputImage.cols, CV_8UC1);
Mat gpuTHImage(inputImage.rows, inputImage.cols, CV_8UC1);
if (!inputImage.data)
{
printf("Image didn't load properly!\n");
}
else
{
cout << "Enter 1 for CPU \n";
cout << "Enter 2 for GPU \n";
cout << "Enter 3 for CPU + GPU \n";
//cout << "Enter 0 to Exit \n";
cin >> ch;
switch (ch)
{
case 1:
// Calling CPU function
tStart = clock(); //Starting clock
cpuFilter(cpuTHImage, inputImage);
printf("Time taken: %.2fms\n", (double)(clock() - tStart) / (CLOCKS_PER_SEC / 1000)); //Stopping and displaying time
//Displaying Input and Output Images
imshow("Input_Image", inputImage);
imshow("CPU_Output_Image", cpuTHImage);
break;
case 2:
// Calling GPU fucntion
tStart = clock(); //Starting clock
thresholdWithCudaNoShared(gpuTHImage, inputImage);
printf("Time taken: %.2fms\n", (double)(clock() - tStart)/(CLOCKS_PER_SEC/1000)); //Stopping and displaying time
//Displaying Input and Output Images
imshow("Input_Image", inputImage);
imshow("GPU_Output_Image", gpuTHImage);
break;
case 3:
//Calling CPU and GPU function
tStart = clock();
cpuFilter(cpuTHImage, inputImage);
printf("Time taken (CPU): %.2fms\n", (double)(clock() - tStart) / (CLOCKS_PER_SEC / 1000)); //Stopping and displaying time
tStart = clock();
thresholdWithCudaWithShared(gpuTHImage, inputImage);
printf("Time taken (GPU): %.2fms\n", (double)(clock() - tStart)/(CLOCKS_PER_SEC/1000)); //Stopping and displaying time
//Displaying Input and Output Images
imshow("Input_Image", inputImage);
imshow("CPU_Output_Image", cpuTHImage);
imshow("SharedGPU_Output_Image", gpuTHImage);
break;
default:
break;
}
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
cvWaitKey(0);
return 0;
}
//CPU Implemenation Code
void cpuFilter(Mat& dest, const Mat& src)
{
int rows = src.rows;
int cols = src.cols;
int sum;
// method to deal with edges of the image, bascially use the edge of the original image or the new image.
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
dest.data[j + i*cols] = src.data[j + i*cols];
}
}
for (int i = 1; i < rows-1; i++)
{
for (int j = 1; j < cols-1; j++)
{
if ((0 < i < rows - 1) && (0 < j < cols - 1))
{
sum = 0;
for (int x = -W; x <= W; x++)
{
for (int y = -W; y <= W; y++)
{
sum = sum + src.data[(j + x) + (i + y)*cols];
}
}
int outindex = j + i*cols;
dest.data[outindex] = sum / ((2 * W + 1)*(2 * W + 1));
}
}
}
}
// Helper function for using CUDA to add vectors in parallel.
//******************************************************************************************************************************************
// Helper function for using CUDA to perform image thresholding in parallel. Takes as input the thresholded image (bwImage), the input image (input), and the threshold value.
cudaError_t thresholdWithCudaNoShared(Mat & outputImg, const Mat & inputImg)
{
// Allocate GPU buffers for the buffers (one input, one output)
unsigned char *dev_dst = 0;
unsigned char *dev_src = 0; // these are the gpu side ouput and input pointers
int width = inputImg.size().width;
int height = inputImg.size().height;
cudaError_t cudaStatus;
cudaEvent_t start, stop; // These are your start and stop events to calculate your GPU performance
float time = 0; // This is the gpu time
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// TODO: add your code here to allocate the input pointer on the device. Note the size of the pointer in cudaMalloc
cudaStatus = cudaMalloc((void**)& dev_src, sizeof(unsigned char)*inputImg.rows*inputImg.cols);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Cuda failed");
goto Error;
}
// TODO: add your code here to allocate the output pointer on the device. Note the size of the pointer in cudaMalloc
cudaStatus = cudaMalloc((void**)& dev_dst, sizeof(unsigned char)*outputImg.rows*outputImg.cols);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Cuda failed");
goto Error;
}
// Copy input data from host memory to GPU buffers.
// TODO: Add your code here. Use cudaMemcpy
cudaStatus = cudaMemcpy(dev_src, inputImg.data, sizeof(unsigned char)*inputImg.rows*inputImg.cols, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Cuda failed");
goto Error;
}
// TODO: Launch a kernel on the GPU with one thread for each element. use <<< grid_size (or number of blocks), block_size(or number of threads) >>>
dim3 block(K, K, 1);
dim3 grid(inputImg.cols / K, inputImg.rows / K, 1);
// lauch a kernel on the GPU with one thread for each element.
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
gpuKernel <<<grid, block >>> (dev_dst, dev_src, width, height);
// TODO: record your stop event on GPU
cudaEventRecord(stop);
// TODO: Synchronize stop event
cudaEventSynchronize(stop);
// TODO: calculate the time ellaped on GPU
cudaEventElapsedTime(&time, start, stop);
printf("Global Memory time=%3.2f ms\n", time);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// TODO: Copy output data from GPU buffer to host memory. use cudaMemcpy
cudaStatus = cudaMemcpy(outputImg.data, dev_dst, sizeof(unsigned char)*outputImg.rows*outputImg.cols, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "Cuda failed");
goto Error;
}
Error:
cudaFree(dev_src);
cudaFree(dev_dst);
return cudaStatus;
}
//******************************************************************************************************
cudaError_t thresholdWithCudaWithShared(Mat & destImg, const Mat & srcImg)
{
unsigned char *dev_src = 0;
unsigned char *dev_dst = 0;
int width = srcImg.size().width;
int height = srcImg.size().height;
cudaError_t cudaStatus; // cuda status variable for errors on GPU
cudaEvent_t start, stop; // These are your start and stop events to calculate your GPU performance
float time = 0; // This is the gpu time
// TODO: register your events for GPU
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors (One input, one output)
cudaStatus = cudaMalloc((void**)& dev_src, sizeof(unsigned char) * srcImg.rows * srcImg.cols);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//target image
cudaStatus = cudaMalloc((void **)& dev_dst, sizeof(unsigned char) * destImg.rows * destImg.cols);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_src, srcImg.data, sizeof(unsigned char) * srcImg.rows * srcImg.cols, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy (CPU ->GPU) failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
dim3 block(K, K, 1);
dim3 grid(srcImg.cols / K, srcImg.rows / K, 1);
// TODO: record your start event on GPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
gpuKernelTiled <<<grid, block >> >(dev_dst, dev_src, width, height); // invking the kernel with tiled shared memory
// TODO: record your stop event on GPU
cudaEventRecord(stop);
// TODO: Synchronize stop event
cudaEventSynchronize(stop);
// TODO: calculate the time ellaped on GPU
cudaEventElapsedTime(&time, start, stop);
printf("Shared Memory time=%3.2f ms\n", time);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(destImg.data, dev_dst, sizeof(unsigned char) * destImg.rows * destImg.cols, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy (GPU -> CPU) failed!");
goto Error;
}
Error:
cudaFree(dev_src);
cudaFree(dev_dst);
return cudaStatus;
}
|
cbc5cdc95004a31887b7a1d99ac7676650b3bff4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void _HardSigmoid(
const int nthreads,
const T alpha,
const T beta,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = max(T(0), min(T(1), fma(x[i], alpha, beta)));
}
}
__global__ void _HardSigmoid(
const int nthreads,
const float alpha,
const float beta,
const half* x,
half* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] =
__float2half(max(0.f, min(1.f, fma(__half2float(x[i]), alpha, beta))));
}
}
template <typename T>
__global__ void _HardSigmoidGrad(
const int nthreads,
const float alpha,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] = (y[i] > T(0) && y[i] < T(1)) ? dy[i] * alpha : T(0);
}
}
template <>
__global__ void _HardSigmoidGrad<half>(
const int nthreads,
const float alpha,
const half* dy,
const half* y,
half* dx) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const float val = __half2float(y[i]);
dx[i] = __half2float(
(val > 0.f && val < 1.f) ? __half2float(dy[i]) * alpha : 0.f);
}
} // HardSigmoidGrad
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void HardSigmoid<float16, CUDAContext>(
const int count,
const float alpha,
const float beta,
const float16* x,
float16* y,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _HardSigmoid), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
count,
alpha,
beta,
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
template <>
void HardSigmoidGrad<float16, CUDAContext>(
const int count,
const float alpha,
const float16* dy,
const float16* y,
float16* dx,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _HardSigmoidGrad), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
count,
alpha,
reinterpret_cast<const half*>(dy),
reinterpret_cast<const half*>(y),
reinterpret_cast<half*>(dx));
} // HardSigmoidGrad
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void HardSigmoid<T, CUDAContext>( \
const int count, \
const float alpha, \
const float beta, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
hipLaunchKernelGGL(( _HardSigmoid), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
count, T(alpha), T(beta), x, y); \
}
#define DEFINE_GRAD_KERNEL_LAUNCHER(T) \
template <> \
void HardSigmoidGrad<T, CUDAContext>( \
const int count, \
const float alpha, \
const T* dy, \
const T* y, \
T* dx, \
CUDAContext* ctx) { \
hipLaunchKernelGGL(( _HardSigmoidGrad), \
CUDA_BLOCKS(count), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), count, alpha, dy, y, dx); \
}
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
DEFINE_GRAD_KERNEL_LAUNCHER(float);
DEFINE_GRAD_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
#undef DEFINE_GRAD_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| cbc5cdc95004a31887b7a1d99ac7676650b3bff4.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void _HardSigmoid(
const int nthreads,
const T alpha,
const T beta,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = max(T(0), min(T(1), fma(x[i], alpha, beta)));
}
}
__global__ void _HardSigmoid(
const int nthreads,
const float alpha,
const float beta,
const half* x,
half* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] =
__float2half(max(0.f, min(1.f, fma(__half2float(x[i]), alpha, beta))));
}
}
template <typename T>
__global__ void _HardSigmoidGrad(
const int nthreads,
const float alpha,
const T* dy,
const T* y,
T* dx) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
dx[i] = (y[i] > T(0) && y[i] < T(1)) ? dy[i] * alpha : T(0);
}
}
template <>
__global__ void _HardSigmoidGrad<half>(
const int nthreads,
const float alpha,
const half* dy,
const half* y,
half* dx) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const float val = __half2float(y[i]);
dx[i] = __half2float(
(val > 0.f && val < 1.f) ? __half2float(dy[i]) * alpha : 0.f);
}
} // HardSigmoidGrad
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void HardSigmoid<float16, CUDAContext>(
const int count,
const float alpha,
const float beta,
const float16* x,
float16* y,
CUDAContext* ctx) {
_HardSigmoid<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
count,
alpha,
beta,
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
template <>
void HardSigmoidGrad<float16, CUDAContext>(
const int count,
const float alpha,
const float16* dy,
const float16* y,
float16* dx,
CUDAContext* ctx) {
_HardSigmoidGrad<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
count,
alpha,
reinterpret_cast<const half*>(dy),
reinterpret_cast<const half*>(y),
reinterpret_cast<half*>(dx));
} // HardSigmoidGrad
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void HardSigmoid<T, CUDAContext>( \
const int count, \
const float alpha, \
const float beta, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
_HardSigmoid<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
count, T(alpha), T(beta), x, y); \
}
#define DEFINE_GRAD_KERNEL_LAUNCHER(T) \
template <> \
void HardSigmoidGrad<T, CUDAContext>( \
const int count, \
const float alpha, \
const T* dy, \
const T* y, \
T* dx, \
CUDAContext* ctx) { \
_HardSigmoidGrad<<< \
CUDA_BLOCKS(count), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>(count, alpha, dy, y, dx); \
}
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
DEFINE_GRAD_KERNEL_LAUNCHER(float);
DEFINE_GRAD_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
#undef DEFINE_GRAD_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
835a04869a78f38331c162504477db7e714e1a7e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "GoniometricFunctionKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
const int size = 1;
const int type = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
GoniometricFunctionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size,type);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
GoniometricFunctionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size,type);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
GoniometricFunctionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size,type);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 835a04869a78f38331c162504477db7e714e1a7e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "GoniometricFunctionKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
const int size = 1;
const int type = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
GoniometricFunctionKernel<<<gridBlock,threadBlock>>>(input,output,size,type);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
GoniometricFunctionKernel<<<gridBlock,threadBlock>>>(input,output,size,type);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
GoniometricFunctionKernel<<<gridBlock,threadBlock>>>(input,output,size,type);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bf65a8c1a3161be1906f7491d4773755f13ce2e8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cuspatial/cubic_spline.hpp>
#include <cuspatial/cusparse_error.hpp>
#include <cuspatial/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <hipsparse.h>
namespace { // anonymous
// This functor performs one linear search for each input point in query_coords
struct parallel_search {
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, std::unique_ptr<cudf::column>> operator()(
cudf::column_view const& t,
cudf::column_view const& curve_ids,
cudf::column_view const& prefixes,
cudf::column_view const& query_coords,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const T* p_t = t.data<T>();
const int32_t* p_curve_ids = curve_ids.data<int32_t>();
const int32_t* p_prefixes = prefixes.data<int32_t>();
const T* p_query_coords = query_coords.data<T>();
auto result = cudf::make_numeric_column(
curve_ids.type(), t.size(), cudf::mask_state::UNALLOCATED, stream, mr);
int32_t* p_result = result->mutable_view().data<int32_t>();
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(query_coords.size()),
[p_t, p_curve_ids, p_prefixes, p_query_coords, p_result] __device__(int index) {
int curve = p_curve_ids[index];
int len = p_prefixes[curve + 1] - p_prefixes[curve];
int h = p_prefixes[curve];
int dh = p_prefixes[curve] - (curve);
// O(n) search, can do log(n) easily
for (int32_t i = 0; i < len; ++i) {
if ((p_t[h + i] + 0.0001 - p_query_coords[index]) > 0.00001) {
p_result[index] = dh + i - 1;
if (i == 0) p_result[index] = index - curve;
return;
}
}
// TODO: Important failure case:
// This will use the final set of coefficients
// for t_ values that are outside of the original
// interpolation range.
p_result[index] = h + len - 2;
});
return result;
};
template <typename T, typename... Args>
std::enable_if_t<not std::is_floating_point<T>::value, std::unique_ptr<cudf::column>> operator()(
Args&&... args)
{
CUSPATIAL_FAIL("Non-floating point operation is not supported.");
}
};
// This functor simply computes the interpolation of each coordinate `t[i]`
// using the coefficients from row `coef_indices[i]`.
struct interpolate {
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, std::unique_ptr<cudf::column>> operator()(
cudf::column_view const& t,
cudf::column_view const& ids,
cudf::column_view const& coef_indices,
cudf::table_view const& coefficients,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const T* p_t = t.data<T>();
const int32_t* p_ids = ids.data<int32_t>();
const int32_t* p_coef_indices = coef_indices.data<int32_t>();
const T* p_d3 = coefficients.column(3).data<T>();
const T* p_d2 = coefficients.column(2).data<T>();
const T* p_d1 = coefficients.column(1).data<T>();
const T* p_d0 = coefficients.column(0).data<T>();
auto result =
cudf::make_numeric_column(t.type(), t.size(), cudf::mask_state::UNALLOCATED, stream, mr);
T* p_result = result->mutable_view().data<T>();
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(t.size()),
[p_t, p_ids, p_coef_indices, p_d3, p_d2, p_d1, p_d0, p_result] __device__(int index) {
int h = p_coef_indices[index];
p_result[index] =
p_d3[h] + p_t[index] * (p_d2[h] + p_t[index] * (p_d1[h] + (p_t[index] * p_d0[h])));
});
return result;
};
template <typename T, typename... Args>
std::enable_if_t<not std::is_floating_point<T>::value, std::unique_ptr<cudf::column>> operator()(
Args&&... args)
{
CUSPATIAL_FAIL("Non-floating point operation is not supported.");
}
};
// This functor computes the coefficients table for the cubic hermite spline
// specified by the inputs `t` and `y`.
struct coefficients_compute {
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, void> operator()(
cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& prefixes,
cudf::mutable_column_view const& h,
cudf::mutable_column_view const& i,
cudf::mutable_column_view const& z,
cudf::mutable_column_view const& d3,
cudf::mutable_column_view const& d2,
cudf::mutable_column_view const& d1,
cudf::mutable_column_view const& d0,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const T* p_t = t.data<T>();
const T* p_y = y.data<T>();
const int32_t* p_prefixes = prefixes.data<int32_t>();
T* p_h = h.data<T>();
T* p_i = i.data<T>();
T* p_z = z.data<T>();
T* p_d3 = d3.data<T>();
T* p_d2 = d2.data<T>();
T* p_d1 = d1.data<T>();
T* p_d0 = d0.data<T>();
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator<int>(1),
thrust::make_counting_iterator<int>(prefixes.size()),
[p_t, p_y, p_prefixes, p_h, p_i, p_z, p_d3, p_d2, p_d1, p_d0] __device__(int index) {
int n = p_prefixes[index] - p_prefixes[index - 1];
int h = p_prefixes[index - 1];
int dh = p_prefixes[index - 1] - (index - 1);
int ci = 0;
for (ci = 0; ci < n - 1; ++ci) {
T a = p_y[h + ci];
T b = p_i[h + ci] - p_h[h + ci] * (p_z[h + ci + 1] + 2 * p_z[h + ci]) / 6;
T c = p_z[h + ci] / 2.0;
T d = (p_z[h + ci + 1] - p_z[h + ci]) / 6 * p_h[h + ci];
T t = p_t[h + ci];
p_d3[dh + ci] = d;
p_d2[dh + ci] = c - 3 * d * t;
p_d1[dh + ci] = b - t * (2 * c - t * (3 * d));
p_d0[dh + ci] = a - t * (b - t * (c - t * d)); // horners
}
});
};
template <typename T>
std::enable_if_t<not std::is_floating_point<T>::value, void> operator()(
cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& prefixes,
cudf::mutable_column_view const& h,
cudf::mutable_column_view const& i,
cudf::mutable_column_view const& z,
cudf::mutable_column_view const& d3,
cudf::mutable_column_view const& d2,
cudf::mutable_column_view const& d1,
cudf::mutable_column_view const& d0,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUSPATIAL_FAIL("Non-floating point operation is not supported.");
}
};
// Computes the diagonal `D` of a large sparse matrix, and also the upper and
// lower diagonals `Dlu`, which in this case are equal.
struct compute_spline_tridiagonals {
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, void> operator()(
cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& prefixes,
cudf::mutable_column_view const& D,
cudf::mutable_column_view const& Dlu,
cudf::mutable_column_view const& u,
cudf::mutable_column_view const& h,
cudf::mutable_column_view const& i,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const T* p_t = t.data<T>();
const T* p_y = y.data<T>();
const int32_t* p_prefixes = prefixes.data<int32_t>();
T* p_d = D.data<T>();
T* p_dlu = Dlu.data<T>();
T* p_u = u.data<T>();
T* p_h = h.data<T>();
T* p_i = i.data<T>();
thrust::for_each(rmm::exec_policy(stream),
thrust::make_counting_iterator<int>(1),
thrust::make_counting_iterator<int>(prefixes.size()),
[p_t, p_y, p_prefixes, p_d, p_dlu, p_u, p_h, p_i] __device__(int index) {
int n = p_prefixes[index] - p_prefixes[index - 1];
int h = p_prefixes[index - 1];
int ci = 0;
for (ci = 0; ci < n - 1; ++ci) {
p_h[h + ci] = p_t[h + ci + 1] - p_t[h + ci];
p_i[h + ci] = (p_y[h + ci + 1] - p_y[h + ci]) / p_h[h + ci];
}
for (ci = 0; ci < n - 2; ++ci) {
p_d[h + ci + 1] = (p_h[h + ci + 1] + p_h[h + (n - 2) - ci]) * 2;
p_u[h + ci + 1] = (p_i[h + ci + 1] - p_i[h + (n - 2) - ci]) * 6;
}
for (ci = 0; ci < n - 3; ++ci) { p_dlu[h + ci + 1] = p_i[h + ci + 1]; }
});
}
template <typename T>
std::enable_if_t<not std::is_floating_point<T>::value, void> operator()(
cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& prefixes,
cudf::mutable_column_view const& D,
cudf::mutable_column_view const& Dlu,
cudf::mutable_column_view const& u,
cudf::mutable_column_view const& h,
cudf::mutable_column_view const& i,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUSPATIAL_FAIL("Non-floating point operation is not supported.");
}
};
} // anonymous namespace
namespace cuspatial {
namespace detail {
/**
* @brief Compute cubic interpolations of a set of points based on their
* ids and a coefficient matrix.
*
* @param[in] query_points column of coordinate values to be interpolated.
* @param[in] spline_ids ids that identift the spline to interpolate each
* coordinate into.
* @param[in] offsets int32 column of offset of the source_points.
* This is used to calculate which values from the coefficients are
* used for each interpolation.
* @param[in] source_points column of the original `t` values used
* to compute the coefficients matrix. These source points are used to
* identify which specific spline a given query_point is interpolated with.
* @param[in] coefficients table of spline coefficients produced by
* cubicspline_coefficients.
* @param[in] mr the optional caller specified RMM memory resource
* @param[in] stream the optional caller specified cudaStream
*
* @return cudf::column `y` coordinates interpolated from `x` and `coefs`.
**/
std::unique_ptr<cudf::column> cubicspline_interpolate(cudf::column_view const& query_points,
cudf::column_view const& curve_ids,
cudf::column_view const& prefixes,
cudf::column_view const& source_points,
cudf::table_view const& coefficients,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto coefficient_indices = cudf::type_dispatcher(query_points.type(),
parallel_search{},
query_points,
curve_ids,
prefixes,
source_points,
stream,
mr);
// TPRINT(coefficient_indices->mutable_view(), "parallel_search_");
// TPRINT(query_points, "query_points_");
// TPRINT(curve_ids, "curve_ids_");
// TPRINT(prefixes, "prefixes_");
auto result = cudf::type_dispatcher(query_points.type(),
interpolate{},
query_points,
curve_ids,
coefficient_indices->view(),
coefficients,
stream,
mr);
// TPRINT(query_points, "query_points_");
// TPRINT(curve_ids, "curve_ids_");
// TPRINT(prefixes, "prefixes_");
// cudf::column_view result_view = result->view();
////TPRINT(result_view, "interpolate_");
return result;
}
/**
* @brief Create a table of cubic spline coefficients from columns of coordinates.
*
* Computes coefficients for a natural cubic spline similar to the method
* found on http://mathworld.wolfram.com/CubicSpline.html .
*
* The input data arrays `t` and `y` contain the vertices of many concatenated
* splines.
*
* Currently, all input splines must be the same length. The minimum supported
* length is 5.
*
* @note Ids should be prefixed with a 0, even when only a single spline
* is fit, ids will be {0, 0}
*
* @param[in] t column_view of independent coordinates for fitting splines
* @param[in] y column_view of dependent variables to be fit along t axis
* @param[in] ids of incoming coordinate sets
* @param[in] offsets the exclusive scan of the spline sizes, prefixed by
* 0. For example, for 3 splines of 5 vertices each, the offsets input array
* is {0, 5, 10, 15}.
* @param[in] mr the optional caller specified RMM memory resource
* @param[in] stream the optional caller specified cudaStream
*
* @return cudf::table_view of coefficients for spline interpolation. The size
* of the table is ((M-n), 4) where M is `t.size()` and and n is
* `ids.size()-1`.
**/
std::unique_ptr<cudf::table> cubicspline_coefficients(cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& ids,
cudf::column_view const& prefixes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// rmm::device_vector<float>::iterator t_rd = rmm::device_vector<float>(t.data<float>());
// TPRINT(t, "t_");
// TPRINT(y, "y_");
// TPRINT(ids, "ids");
// TPRINT(prefixes, "prefixes");
int64_t n = y.size();
auto h_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto i_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto D_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto Dlu_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto Dll_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto u_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto h_buffer = h_col->mutable_view();
auto i_buffer = i_col->mutable_view();
auto D_buffer = D_col->mutable_view();
auto Dlu_buffer = Dll_col->mutable_view();
auto Dll_buffer = Dll_col->mutable_view();
auto u_buffer = u_col->mutable_view();
auto zero = cudf::numeric_scalar<float>(0.0);
auto one = cudf::numeric_scalar<float>(1.0);
cudf::fill_in_place(h_buffer, 0, h_col->size(), zero);
cudf::fill_in_place(i_buffer, 0, i_col->size(), zero);
cudf::fill_in_place(D_buffer, 0, D_col->size(), one);
cudf::fill_in_place(Dlu_buffer, 0, Dlu_col->size(), zero);
cudf::fill_in_place(u_buffer, 0, u_col->size(), zero);
// TPRINT(h_buffer, "h_zero");
// TPRINT(D_buffer, "D_one");
// TPRINT(Dlu_buffer, "Dlu_zero");
cudf::type_dispatcher(y.type(),
compute_spline_tridiagonals{},
t,
y,
prefixes,
D_buffer,
Dlu_buffer,
u_buffer,
h_buffer,
i_buffer,
stream,
mr);
// TPRINT(h_buffer, "h_i");
// TPRINT(i_buffer, "i_i");
// TPRINT(D_buffer, "D_i");
// TPRINT(Dlu_buffer, "Dlu_i");
// TPRINT(u_buffer, "u_i");
// cusparse solve n length m tridiagonal systems
// 4. call cusparse<T>gtsv2() to solve
// 4.1 Get cuSparse library context
// compute inputs:
// handle: the cuSparse library context
// m: size
// n: number of columns of solution matrix B
// dl, d, du: vectors of the diagonal
// B: (ldb, n) dimensional dense matrix to be solved for
// ldb: leading dimension of B
// pBuffer: get size of thisu by gtsv2_bufferSizeExt
hipsparseHandle_t handle;
CUDA_TRY(hipMalloc(&handle, sizeof(hipsparseHandle_t)));
CUSPARSE_TRY(hipsparseCreate(&handle));
size_t pBufferSize;
int32_t batchStride = y.size() / (prefixes.size() - 1);
int32_t batchSize = batchStride;
CUSPARSE_TRY(hipsparseSgtsv2StridedBatch_bufferSizeExt(handle,
batchSize,
Dll_buffer.data<float>(),
D_buffer.data<float>(),
Dlu_buffer.data<float>(),
u_buffer.data<float>(),
prefixes.size() - 1,
batchStride,
&pBufferSize));
rmm::device_vector<float> pBuffer(pBufferSize);
CUSPARSE_TRY(hipsparseSgtsv2StridedBatch(handle,
batchSize,
Dll_buffer.data<float>(),
D_buffer.data<float>(),
Dlu_buffer.data<float>(),
u_buffer.data<float>(),
prefixes.size() - 1,
batchStride,
pBuffer.data().get()));
CUSPARSE_TRY(hipsparseDestroy(handle));
int dn = n - (prefixes.size() - 1);
// Finally, compute coefficients via Horner's scheme
auto d3_col = make_numeric_column(y.type(), dn, cudf::mask_state::UNALLOCATED, stream, mr);
auto d2_col = make_numeric_column(y.type(), dn, cudf::mask_state::UNALLOCATED, stream, mr);
auto d1_col = make_numeric_column(y.type(), dn, cudf::mask_state::UNALLOCATED, stream, mr);
auto d0_col = make_numeric_column(y.type(), dn, cudf::mask_state::UNALLOCATED, stream, mr);
auto d3 = d3_col->mutable_view();
auto d2 = d2_col->mutable_view();
auto d1 = d1_col->mutable_view();
auto d0 = d0_col->mutable_view();
cudf::type_dispatcher(y.type(),
coefficients_compute{},
t,
y,
prefixes,
h_buffer,
i_buffer,
u_buffer,
d3,
d2,
d1,
d0,
stream,
mr);
// TPRINT(h_buffer, "h_buffer_");
// TPRINT(i_buffer, "i_buffer_");
// TPRINT(u_buffer, "u_buffer_");
// TPRINT(d3, "d3");
// TPRINT(d2, "d2");
// TPRINT(d1, "d1");
// TPRINT(d0, "d0");
// Place d3..0 into a table and return
std::vector<std::unique_ptr<cudf::column>> table;
table.push_back(std::move(d3_col));
table.push_back(std::move(d2_col));
table.push_back(std::move(d1_col));
table.push_back(std::move(d0_col));
std::unique_ptr<cudf::table> result = std::make_unique<cudf::table>(move(table));
return result;
}
} // namespace detail
// Calls the interpolate function using default memory resources.
std::unique_ptr<cudf::column> cubicspline_interpolate(cudf::column_view const& query_points,
cudf::column_view const& curve_ids,
cudf::column_view const& prefixes,
cudf::column_view const& source_points,
cudf::table_view const& coefficients,
rmm::mr::device_memory_resource* mr)
{
return cuspatial::detail::cubicspline_interpolate(
query_points, curve_ids, prefixes, source_points, coefficients, rmm::cuda_stream_default, mr);
}
// Calls the coeffiecients function using default memory resources.
std::unique_ptr<cudf::table> cubicspline_coefficients(cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& ids,
cudf::column_view const& prefixes,
rmm::mr::device_memory_resource* mr)
{
return cuspatial::detail::cubicspline_coefficients(
t, y, ids, prefixes, rmm::cuda_stream_default, mr);
}
} // namespace cuspatial
| bf65a8c1a3161be1906f7491d4773755f13ce2e8.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cuspatial/cubic_spline.hpp>
#include <cuspatial/cusparse_error.hpp>
#include <cuspatial/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <cusparse.h>
namespace { // anonymous
// This functor performs one linear search for each input point in query_coords
struct parallel_search {
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, std::unique_ptr<cudf::column>> operator()(
cudf::column_view const& t,
cudf::column_view const& curve_ids,
cudf::column_view const& prefixes,
cudf::column_view const& query_coords,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const T* p_t = t.data<T>();
const int32_t* p_curve_ids = curve_ids.data<int32_t>();
const int32_t* p_prefixes = prefixes.data<int32_t>();
const T* p_query_coords = query_coords.data<T>();
auto result = cudf::make_numeric_column(
curve_ids.type(), t.size(), cudf::mask_state::UNALLOCATED, stream, mr);
int32_t* p_result = result->mutable_view().data<int32_t>();
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(query_coords.size()),
[p_t, p_curve_ids, p_prefixes, p_query_coords, p_result] __device__(int index) {
int curve = p_curve_ids[index];
int len = p_prefixes[curve + 1] - p_prefixes[curve];
int h = p_prefixes[curve];
int dh = p_prefixes[curve] - (curve);
// O(n) search, can do log(n) easily
for (int32_t i = 0; i < len; ++i) {
if ((p_t[h + i] + 0.0001 - p_query_coords[index]) > 0.00001) {
p_result[index] = dh + i - 1;
if (i == 0) p_result[index] = index - curve;
return;
}
}
// TODO: Important failure case:
// This will use the final set of coefficients
// for t_ values that are outside of the original
// interpolation range.
p_result[index] = h + len - 2;
});
return result;
};
template <typename T, typename... Args>
std::enable_if_t<not std::is_floating_point<T>::value, std::unique_ptr<cudf::column>> operator()(
Args&&... args)
{
CUSPATIAL_FAIL("Non-floating point operation is not supported.");
}
};
// This functor simply computes the interpolation of each coordinate `t[i]`
// using the coefficients from row `coef_indices[i]`.
struct interpolate {
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, std::unique_ptr<cudf::column>> operator()(
cudf::column_view const& t,
cudf::column_view const& ids,
cudf::column_view const& coef_indices,
cudf::table_view const& coefficients,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const T* p_t = t.data<T>();
const int32_t* p_ids = ids.data<int32_t>();
const int32_t* p_coef_indices = coef_indices.data<int32_t>();
const T* p_d3 = coefficients.column(3).data<T>();
const T* p_d2 = coefficients.column(2).data<T>();
const T* p_d1 = coefficients.column(1).data<T>();
const T* p_d0 = coefficients.column(0).data<T>();
auto result =
cudf::make_numeric_column(t.type(), t.size(), cudf::mask_state::UNALLOCATED, stream, mr);
T* p_result = result->mutable_view().data<T>();
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(t.size()),
[p_t, p_ids, p_coef_indices, p_d3, p_d2, p_d1, p_d0, p_result] __device__(int index) {
int h = p_coef_indices[index];
p_result[index] =
p_d3[h] + p_t[index] * (p_d2[h] + p_t[index] * (p_d1[h] + (p_t[index] * p_d0[h])));
});
return result;
};
template <typename T, typename... Args>
std::enable_if_t<not std::is_floating_point<T>::value, std::unique_ptr<cudf::column>> operator()(
Args&&... args)
{
CUSPATIAL_FAIL("Non-floating point operation is not supported.");
}
};
// This functor computes the coefficients table for the cubic hermite spline
// specified by the inputs `t` and `y`.
struct coefficients_compute {
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, void> operator()(
cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& prefixes,
cudf::mutable_column_view const& h,
cudf::mutable_column_view const& i,
cudf::mutable_column_view const& z,
cudf::mutable_column_view const& d3,
cudf::mutable_column_view const& d2,
cudf::mutable_column_view const& d1,
cudf::mutable_column_view const& d0,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const T* p_t = t.data<T>();
const T* p_y = y.data<T>();
const int32_t* p_prefixes = prefixes.data<int32_t>();
T* p_h = h.data<T>();
T* p_i = i.data<T>();
T* p_z = z.data<T>();
T* p_d3 = d3.data<T>();
T* p_d2 = d2.data<T>();
T* p_d1 = d1.data<T>();
T* p_d0 = d0.data<T>();
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator<int>(1),
thrust::make_counting_iterator<int>(prefixes.size()),
[p_t, p_y, p_prefixes, p_h, p_i, p_z, p_d3, p_d2, p_d1, p_d0] __device__(int index) {
int n = p_prefixes[index] - p_prefixes[index - 1];
int h = p_prefixes[index - 1];
int dh = p_prefixes[index - 1] - (index - 1);
int ci = 0;
for (ci = 0; ci < n - 1; ++ci) {
T a = p_y[h + ci];
T b = p_i[h + ci] - p_h[h + ci] * (p_z[h + ci + 1] + 2 * p_z[h + ci]) / 6;
T c = p_z[h + ci] / 2.0;
T d = (p_z[h + ci + 1] - p_z[h + ci]) / 6 * p_h[h + ci];
T t = p_t[h + ci];
p_d3[dh + ci] = d;
p_d2[dh + ci] = c - 3 * d * t;
p_d1[dh + ci] = b - t * (2 * c - t * (3 * d));
p_d0[dh + ci] = a - t * (b - t * (c - t * d)); // horners
}
});
};
template <typename T>
std::enable_if_t<not std::is_floating_point<T>::value, void> operator()(
cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& prefixes,
cudf::mutable_column_view const& h,
cudf::mutable_column_view const& i,
cudf::mutable_column_view const& z,
cudf::mutable_column_view const& d3,
cudf::mutable_column_view const& d2,
cudf::mutable_column_view const& d1,
cudf::mutable_column_view const& d0,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUSPATIAL_FAIL("Non-floating point operation is not supported.");
}
};
// Computes the diagonal `D` of a large sparse matrix, and also the upper and
// lower diagonals `Dlu`, which in this case are equal.
struct compute_spline_tridiagonals {
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, void> operator()(
cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& prefixes,
cudf::mutable_column_view const& D,
cudf::mutable_column_view const& Dlu,
cudf::mutable_column_view const& u,
cudf::mutable_column_view const& h,
cudf::mutable_column_view const& i,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
const T* p_t = t.data<T>();
const T* p_y = y.data<T>();
const int32_t* p_prefixes = prefixes.data<int32_t>();
T* p_d = D.data<T>();
T* p_dlu = Dlu.data<T>();
T* p_u = u.data<T>();
T* p_h = h.data<T>();
T* p_i = i.data<T>();
thrust::for_each(rmm::exec_policy(stream),
thrust::make_counting_iterator<int>(1),
thrust::make_counting_iterator<int>(prefixes.size()),
[p_t, p_y, p_prefixes, p_d, p_dlu, p_u, p_h, p_i] __device__(int index) {
int n = p_prefixes[index] - p_prefixes[index - 1];
int h = p_prefixes[index - 1];
int ci = 0;
for (ci = 0; ci < n - 1; ++ci) {
p_h[h + ci] = p_t[h + ci + 1] - p_t[h + ci];
p_i[h + ci] = (p_y[h + ci + 1] - p_y[h + ci]) / p_h[h + ci];
}
for (ci = 0; ci < n - 2; ++ci) {
p_d[h + ci + 1] = (p_h[h + ci + 1] + p_h[h + (n - 2) - ci]) * 2;
p_u[h + ci + 1] = (p_i[h + ci + 1] - p_i[h + (n - 2) - ci]) * 6;
}
for (ci = 0; ci < n - 3; ++ci) { p_dlu[h + ci + 1] = p_i[h + ci + 1]; }
});
}
template <typename T>
std::enable_if_t<not std::is_floating_point<T>::value, void> operator()(
cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& prefixes,
cudf::mutable_column_view const& D,
cudf::mutable_column_view const& Dlu,
cudf::mutable_column_view const& u,
cudf::mutable_column_view const& h,
cudf::mutable_column_view const& i,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUSPATIAL_FAIL("Non-floating point operation is not supported.");
}
};
} // anonymous namespace
namespace cuspatial {
namespace detail {
/**
* @brief Compute cubic interpolations of a set of points based on their
* ids and a coefficient matrix.
*
* @param[in] query_points column of coordinate values to be interpolated.
* @param[in] spline_ids ids that identift the spline to interpolate each
* coordinate into.
* @param[in] offsets int32 column of offset of the source_points.
* This is used to calculate which values from the coefficients are
* used for each interpolation.
* @param[in] source_points column of the original `t` values used
* to compute the coefficients matrix. These source points are used to
* identify which specific spline a given query_point is interpolated with.
* @param[in] coefficients table of spline coefficients produced by
* cubicspline_coefficients.
* @param[in] mr the optional caller specified RMM memory resource
* @param[in] stream the optional caller specified cudaStream
*
* @return cudf::column `y` coordinates interpolated from `x` and `coefs`.
**/
std::unique_ptr<cudf::column> cubicspline_interpolate(cudf::column_view const& query_points,
cudf::column_view const& curve_ids,
cudf::column_view const& prefixes,
cudf::column_view const& source_points,
cudf::table_view const& coefficients,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto coefficient_indices = cudf::type_dispatcher(query_points.type(),
parallel_search{},
query_points,
curve_ids,
prefixes,
source_points,
stream,
mr);
// TPRINT(coefficient_indices->mutable_view(), "parallel_search_");
// TPRINT(query_points, "query_points_");
// TPRINT(curve_ids, "curve_ids_");
// TPRINT(prefixes, "prefixes_");
auto result = cudf::type_dispatcher(query_points.type(),
interpolate{},
query_points,
curve_ids,
coefficient_indices->view(),
coefficients,
stream,
mr);
// TPRINT(query_points, "query_points_");
// TPRINT(curve_ids, "curve_ids_");
// TPRINT(prefixes, "prefixes_");
// cudf::column_view result_view = result->view();
////TPRINT(result_view, "interpolate_");
return result;
}
/**
* @brief Create a table of cubic spline coefficients from columns of coordinates.
*
* Computes coefficients for a natural cubic spline similar to the method
* found on http://mathworld.wolfram.com/CubicSpline.html .
*
* The input data arrays `t` and `y` contain the vertices of many concatenated
* splines.
*
* Currently, all input splines must be the same length. The minimum supported
* length is 5.
*
* @note Ids should be prefixed with a 0, even when only a single spline
* is fit, ids will be {0, 0}
*
* @param[in] t column_view of independent coordinates for fitting splines
* @param[in] y column_view of dependent variables to be fit along t axis
* @param[in] ids of incoming coordinate sets
* @param[in] offsets the exclusive scan of the spline sizes, prefixed by
* 0. For example, for 3 splines of 5 vertices each, the offsets input array
* is {0, 5, 10, 15}.
* @param[in] mr the optional caller specified RMM memory resource
* @param[in] stream the optional caller specified cudaStream
*
* @return cudf::table_view of coefficients for spline interpolation. The size
* of the table is ((M-n), 4) where M is `t.size()` and and n is
* `ids.size()-1`.
**/
std::unique_ptr<cudf::table> cubicspline_coefficients(cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& ids,
cudf::column_view const& prefixes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// rmm::device_vector<float>::iterator t_rd = rmm::device_vector<float>(t.data<float>());
// TPRINT(t, "t_");
// TPRINT(y, "y_");
// TPRINT(ids, "ids");
// TPRINT(prefixes, "prefixes");
int64_t n = y.size();
auto h_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto i_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto D_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto Dlu_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto Dll_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto u_col = make_numeric_column(y.type(), n, cudf::mask_state::UNALLOCATED, stream, mr);
auto h_buffer = h_col->mutable_view();
auto i_buffer = i_col->mutable_view();
auto D_buffer = D_col->mutable_view();
auto Dlu_buffer = Dll_col->mutable_view();
auto Dll_buffer = Dll_col->mutable_view();
auto u_buffer = u_col->mutable_view();
auto zero = cudf::numeric_scalar<float>(0.0);
auto one = cudf::numeric_scalar<float>(1.0);
cudf::fill_in_place(h_buffer, 0, h_col->size(), zero);
cudf::fill_in_place(i_buffer, 0, i_col->size(), zero);
cudf::fill_in_place(D_buffer, 0, D_col->size(), one);
cudf::fill_in_place(Dlu_buffer, 0, Dlu_col->size(), zero);
cudf::fill_in_place(u_buffer, 0, u_col->size(), zero);
// TPRINT(h_buffer, "h_zero");
// TPRINT(D_buffer, "D_one");
// TPRINT(Dlu_buffer, "Dlu_zero");
cudf::type_dispatcher(y.type(),
compute_spline_tridiagonals{},
t,
y,
prefixes,
D_buffer,
Dlu_buffer,
u_buffer,
h_buffer,
i_buffer,
stream,
mr);
// TPRINT(h_buffer, "h_i");
// TPRINT(i_buffer, "i_i");
// TPRINT(D_buffer, "D_i");
// TPRINT(Dlu_buffer, "Dlu_i");
// TPRINT(u_buffer, "u_i");
// cusparse solve n length m tridiagonal systems
// 4. call cusparse<T>gtsv2() to solve
// 4.1 Get cuSparse library context
// compute inputs:
// handle: the cuSparse library context
// m: size
// n: number of columns of solution matrix B
// dl, d, du: vectors of the diagonal
// B: (ldb, n) dimensional dense matrix to be solved for
// ldb: leading dimension of B
// pBuffer: get size of thisu by gtsv2_bufferSizeExt
cusparseHandle_t handle;
CUDA_TRY(cudaMalloc(&handle, sizeof(cusparseHandle_t)));
CUSPARSE_TRY(cusparseCreate(&handle));
size_t pBufferSize;
int32_t batchStride = y.size() / (prefixes.size() - 1);
int32_t batchSize = batchStride;
CUSPARSE_TRY(cusparseSgtsv2StridedBatch_bufferSizeExt(handle,
batchSize,
Dll_buffer.data<float>(),
D_buffer.data<float>(),
Dlu_buffer.data<float>(),
u_buffer.data<float>(),
prefixes.size() - 1,
batchStride,
&pBufferSize));
rmm::device_vector<float> pBuffer(pBufferSize);
CUSPARSE_TRY(cusparseSgtsv2StridedBatch(handle,
batchSize,
Dll_buffer.data<float>(),
D_buffer.data<float>(),
Dlu_buffer.data<float>(),
u_buffer.data<float>(),
prefixes.size() - 1,
batchStride,
pBuffer.data().get()));
CUSPARSE_TRY(cusparseDestroy(handle));
int dn = n - (prefixes.size() - 1);
// Finally, compute coefficients via Horner's scheme
auto d3_col = make_numeric_column(y.type(), dn, cudf::mask_state::UNALLOCATED, stream, mr);
auto d2_col = make_numeric_column(y.type(), dn, cudf::mask_state::UNALLOCATED, stream, mr);
auto d1_col = make_numeric_column(y.type(), dn, cudf::mask_state::UNALLOCATED, stream, mr);
auto d0_col = make_numeric_column(y.type(), dn, cudf::mask_state::UNALLOCATED, stream, mr);
auto d3 = d3_col->mutable_view();
auto d2 = d2_col->mutable_view();
auto d1 = d1_col->mutable_view();
auto d0 = d0_col->mutable_view();
cudf::type_dispatcher(y.type(),
coefficients_compute{},
t,
y,
prefixes,
h_buffer,
i_buffer,
u_buffer,
d3,
d2,
d1,
d0,
stream,
mr);
// TPRINT(h_buffer, "h_buffer_");
// TPRINT(i_buffer, "i_buffer_");
// TPRINT(u_buffer, "u_buffer_");
// TPRINT(d3, "d3");
// TPRINT(d2, "d2");
// TPRINT(d1, "d1");
// TPRINT(d0, "d0");
// Place d3..0 into a table and return
std::vector<std::unique_ptr<cudf::column>> table;
table.push_back(std::move(d3_col));
table.push_back(std::move(d2_col));
table.push_back(std::move(d1_col));
table.push_back(std::move(d0_col));
std::unique_ptr<cudf::table> result = std::make_unique<cudf::table>(move(table));
return result;
}
} // namespace detail
// Calls the interpolate function using default memory resources.
std::unique_ptr<cudf::column> cubicspline_interpolate(cudf::column_view const& query_points,
cudf::column_view const& curve_ids,
cudf::column_view const& prefixes,
cudf::column_view const& source_points,
cudf::table_view const& coefficients,
rmm::mr::device_memory_resource* mr)
{
return cuspatial::detail::cubicspline_interpolate(
query_points, curve_ids, prefixes, source_points, coefficients, rmm::cuda_stream_default, mr);
}
// Calls the coeffiecients function using default memory resources.
std::unique_ptr<cudf::table> cubicspline_coefficients(cudf::column_view const& t,
cudf::column_view const& y,
cudf::column_view const& ids,
cudf::column_view const& prefixes,
rmm::mr::device_memory_resource* mr)
{
return cuspatial::detail::cubicspline_coefficients(
t, y, ids, prefixes, rmm::cuda_stream_default, mr);
}
} // namespace cuspatial
|
5b51be65a9def983859ed8666e560ea32aa2fa36.hip | // !!! This is a file automatically generated by hipify!!!
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#pragma once
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "utils.h"
__global__ void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int colIdx = threadIdx.x;
int rowIdx = blockIdx.x;
int numCols = blockDim.x;
int index = colIdx + (numCols * rowIdx);
uchar4 rgba = rgbaImage[index];
float channelSum = .299f * (rgba.x) + .587f * (rgba.y) + .114f * (rgba.z);
//float channelSum = (rgba.x + rgba.y + rgba.z ) / 3;
greyImage[index] = channelSum;
}
void test_cuda(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numCols, 1, 1); //TODO
const dim3 gridSize(numRows, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage);
//hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 5b51be65a9def983859ed8666e560ea32aa2fa36.cu |
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "utils.h"
__global__ void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int colIdx = threadIdx.x;
int rowIdx = blockIdx.x;
int numCols = blockDim.x;
int index = colIdx + (numCols * rowIdx);
uchar4 rgba = rgbaImage[index];
float channelSum = .299f * (rgba.x) + .587f * (rgba.y) + .114f * (rgba.z);
//float channelSum = (rgba.x + rgba.y + rgba.z ) / 3;
greyImage[index] = channelSum;
}
void test_cuda(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numCols, 1, 1); //TODO
const dim3 gridSize(numRows, 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage);
//cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
42301a0c464d882329ff0f46415502820d1ff458.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduction_neighbored_pairs_improved_1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *int_array = NULL;
hipMalloc(&int_array, XSIZE*YSIZE);
int *temp_array = NULL;
hipMalloc(&temp_array, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduction_neighbored_pairs_improved_1), dim3(gridBlock),dim3(threadBlock), 0, 0, int_array,temp_array,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduction_neighbored_pairs_improved_1), dim3(gridBlock),dim3(threadBlock), 0, 0, int_array,temp_array,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduction_neighbored_pairs_improved_1), dim3(gridBlock),dim3(threadBlock), 0, 0, int_array,temp_array,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 42301a0c464d882329ff0f46415502820d1ff458.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduction_neighbored_pairs_improved_1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *int_array = NULL;
cudaMalloc(&int_array, XSIZE*YSIZE);
int *temp_array = NULL;
cudaMalloc(&temp_array, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduction_neighbored_pairs_improved_1<<<gridBlock,threadBlock>>>(int_array,temp_array,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduction_neighbored_pairs_improved_1<<<gridBlock,threadBlock>>>(int_array,temp_array,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduction_neighbored_pairs_improved_1<<<gridBlock,threadBlock>>>(int_array,temp_array,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2fd4bd560e98c24c56e20663ae7230f56c49ca86.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
int main(int argc,char **argv)
{
std::ofstream myfile;
myfile.open ("seq_mapping.csv");
// set these variables
unsigned int times = 10;
unsigned int IN_SIZE;
unsigned int IN_BYTES;
unsigned int OUT_SIZE;
unsigned int OUT_BYTES;
for (unsigned int rounds = 0; rounds<30; rounds++)
{
// Setting up variables
IN_SIZE = 1<<rounds;
IN_BYTES = sizeof(unsigned int)*IN_SIZE;
OUT_SIZE = IN_SIZE;
OUT_BYTES = IN_BYTES;
printf("\ni = %d\n", rounds);
printf("\n ARRAY_SIZE = %d\n", IN_SIZE);
printf(" ARRAY_BYTES = %d\n", IN_BYTES);
// Setting host pointers
unsigned int * h_in = (unsigned int*)malloc(IN_BYTES);
unsigned int * h_out = (unsigned int*)malloc(OUT_BYTES);
// Filling h_in
for (unsigned int j = 0; j<IN_SIZE; j++) {h_in[j] = 1;}
// setting up time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// running the code on the CPU $times times
for (unsigned int k = 0; k<times; k++)
{
for (unsigned int j = 0; j<OUT_SIZE; j++) {h_out[j] = h_in[j];}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculating time
float elapsedTime = .0f;
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
printf(" time: %.5f\n", elapsedTime);
free(h_in);
free(h_out);
myfile << elapsedTime << ",";
}
myfile.close();
return 0;
}
| 2fd4bd560e98c24c56e20663ae7230f56c49ca86.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
int main(int argc,char **argv)
{
std::ofstream myfile;
myfile.open ("seq_mapping.csv");
// set these variables
unsigned int times = 10;
unsigned int IN_SIZE;
unsigned int IN_BYTES;
unsigned int OUT_SIZE;
unsigned int OUT_BYTES;
for (unsigned int rounds = 0; rounds<30; rounds++)
{
// Setting up variables
IN_SIZE = 1<<rounds;
IN_BYTES = sizeof(unsigned int)*IN_SIZE;
OUT_SIZE = IN_SIZE;
OUT_BYTES = IN_BYTES;
printf("\ni = %d\n", rounds);
printf("\n ARRAY_SIZE = %d\n", IN_SIZE);
printf(" ARRAY_BYTES = %d\n", IN_BYTES);
// Setting host pointers
unsigned int * h_in = (unsigned int*)malloc(IN_BYTES);
unsigned int * h_out = (unsigned int*)malloc(OUT_BYTES);
// Filling h_in
for (unsigned int j = 0; j<IN_SIZE; j++) {h_in[j] = 1;}
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// running the code on the CPU $times times
for (unsigned int k = 0; k<times; k++)
{
for (unsigned int j = 0; j<OUT_SIZE; j++) {h_out[j] = h_in[j];}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime = .0f;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
printf(" time: %.5f\n", elapsedTime);
free(h_in);
free(h_out);
myfile << elapsedTime << ",";
}
myfile.close();
return 0;
}
|
4ae8288882cbf39110013bc61f91a79ab11b5ecd.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018-2019, Michael P. Howard
// This file is part of the azplugins project, released under the Modified BSD License.
// Maintainer: mphoward
#include "PairPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for spline pair potential
template hipError_t compute_pair_potential<azplugins::detail::PairEvaluatorSpline>
(const pair_args_t& pair_args,
const typename azplugins::detail::PairEvaluatorSpline::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
| 4ae8288882cbf39110013bc61f91a79ab11b5ecd.cu | // Copyright (c) 2018-2019, Michael P. Howard
// This file is part of the azplugins project, released under the Modified BSD License.
// Maintainer: mphoward
#include "PairPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for spline pair potential
template cudaError_t compute_pair_potential<azplugins::detail::PairEvaluatorSpline>
(const pair_args_t& pair_args,
const typename azplugins::detail::PairEvaluatorSpline::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
|
94c898d320c8b6ef938cabd94a43ab7c3308d134.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "decoding.h"
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include <iostream>
namespace fastertransformer
{
template <typename T>
__global__ void embedding_lookup_kernel(const T* embedding_table, const int* word_ids,
const int hidden_units, T* from_tensor)
{
int write_pos = threadIdx.x + blockIdx.x * hidden_units;
from_tensor[write_pos] = embedding_table[word_ids[blockIdx.x] * hidden_units + threadIdx.x] * 32.0f;
}
template <typename T>
__global__ void embedding_init_lookup_kernel(const T* embedding_table, const int* word_ids,
const int hidden_units, T* from_tensor)
{
int write_pos = threadIdx.x + blockIdx.x * hidden_units;
from_tensor[write_pos] = embedding_table[word_ids[blockIdx.x] * hidden_units + threadIdx.x] ;
}
/*i*************************************************************************/
template <typename T>
void embedding_init_lookup(const T* embedding_table, const int* word_ids, T* from_tensor,
const int batch_size, const int hidden_units, hipStream_t stream)
{
dim3 grid(batch_size);
dim3 block(hidden_units);
assert(hidden_units <= 1024);
hipLaunchKernelGGL(( embedding_init_lookup_kernel), dim3(grid), dim3(block), 0, stream, embedding_table, word_ids,
hidden_units, from_tensor);
}
template <typename T>
void embedding_lookup(const T* embedding_table, const int* word_ids, T* from_tensor, const int batch_size, const int hidden_units, hipStream_t stream)
{
dim3 grid(batch_size);
dim3 block(hidden_units);
assert(hidden_units <= 1024);
hipLaunchKernelGGL(( embedding_lookup_kernel), dim3(grid), dim3(block), 0, stream, embedding_table, word_ids, hidden_units, from_tensor);
}
template <OperationType OpType_>
DecodingOpenNMT<OpType_>::DecodingOpenNMT(const IAllocator &allocator,
const int batch_size,
const int max_decode_length,
const int head_num,
const int size_per_head,
const int vocab_size,
const int decoder_layers,
const int hidden_units,
const int seq_len,
int* start_id,
const int end_id) :
allocator_(allocator),
batch_size_(batch_size),
max_decode_length_(max_decode_length),
head_num_(head_num),
size_per_head_(size_per_head),
vocab_size_(vocab_size),
decoder_layers_(decoder_layers),
hidden_units_(hidden_units),
start_id_(start_id),
end_id_(end_id),
seq_len_(seq_len)
{
K_cache_ = new DataType_ *[decoder_layers_];
V_cache_ = new DataType_ *[decoder_layers_];
K_mem_cache_ = new DataType_ *[decoder_layers_];
V_mem_cache_ = new DataType_ *[decoder_layers_];
decoder_ = new OpenDecoder<OpType_>(allocator, batch_size, seq_len, head_num, size_per_head, hidden_units, max_decode_length_);
int tensor_size = batch_size_ * hidden_units_; //decoding input
int decoder_workspace_size = decoder_->getWorkspaceSize(); // decoder_buf
int cache_size = batch_size_ * max_decode_length_ * hidden_units_; // cache size
int mem_size = batch_size_ * seq_len_ * hidden_units_;
int decoder_result_size = batch_size * hidden_units_;
int logits_size = batch_size_ * vocab_size_; // type float
int word_ids_size = batch_size_ ; //type int
int finished_size = batch_size_ ; //type bool
int output_size = batch_size_ * max_decode_length_;
int datatype_size = tensor_size * 2 + decoder_workspace_size + (cache_size + mem_size) * 6 * decoder_layers_ + decoder_result_size;
buf_ = reinterpret_cast<void *>(allocator_.malloc(sizeof(DataType_) * datatype_size + sizeof(float) * (logits_size ) + sizeof(int) * word_ids_size + sizeof(bool) * finished_size + sizeof(int) * output_size));
from_tensor_[0] = (DataType_ *)buf_;
from_tensor_[1] = (DataType_ *)(from_tensor_[0] + tensor_size);
for (int i = 0; i < decoder_layers_; ++i)
{
K_mem_cache_[i] = from_tensor_[1] + tensor_size + i * mem_size * 2;
V_mem_cache_[i] = K_mem_cache_[i] + mem_size;
}
for (int i = 0; i < decoder_layers_; ++i)
{
K_cache_[i] = V_mem_cache_[decoder_layers - 1] + mem_size + i * cache_size * 2;
V_cache_[i] = K_cache_[i] + cache_size;
}
decoder_buf_ = V_cache_[decoder_layers - 1] + cache_size;
decoder_result_buf_ = (decoder_buf_ + decoder_workspace_size);
logits_buf_ = (float* )(decoder_result_buf_ + decoder_result_size);
word_ids_buf_ = (int* )(logits_buf_ + logits_size);
finished_buf_ = (bool*)(word_ids_buf_ + word_ids_size);
output_ids_buf_ = (int* )(finished_buf_ + word_ids_size);
h_finished_buf_ = new bool[finished_size];
FILE *fd = fopen("decoding_gemm_config.in", "r");
int err = 0;
if (fd == NULL) printf("[WARNING] decoding_gemm_config.in is not found\n");
if (err != 1)
{
printf("[WARNING] decoding loading GEMM algorithms error, using default GEMM algorithms!\n");
if (Traits_::OpType == OperationType::FP32)
{
cublasAlgo_[0] = HIPBLAS_GEMM_DEFAULT;
}
else
{
cublasAlgo_[0] = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
}
}
}
template <OperationType OpType_>
void DecodingOpenNMT<OpType_>::forward(const DecoderInitParam<DataType_> *param,
DecodingInitParam<DataType_> decoding_params)
{
int m = batch_size_;
int k = hidden_units_;
int n = vocab_size_;
int cache_size = batch_size_ * max_decode_length_ * hidden_units_;
for (int step = 1; step <= max_decode_length_; ++step)
{
int kv_cache_id = step & 0x1;
if(step == 1)
{
embedding_init_lookup(decoding_params.embedding_table_init, start_id_, from_tensor_[0],
batch_size_, hidden_units_, decoding_params.stream);
}
else
{
embedding_lookup(decoding_params.embedding_table_run, word_ids_buf_, from_tensor_[0],
batch_size_, hidden_units_, decoding_params.stream);
}
hipDeviceSynchronize();
check_cuda_error(hipGetLastError());
int from_id, out_id;
for (int layer = 0; layer < decoder_layers_; ++layer)
{
from_id = layer & 0x1;
out_id = 1 - from_id;
decoder_->initialize(param[layer], decoder_buf_);
decoder_->forward(from_tensor_[from_id], decoding_params.memory_tensor,
K_cache_[layer], V_cache_[layer],
K_mem_cache_[layer], V_mem_cache_[layer],
from_tensor_[out_id], step);
}
decoder_->decoder_norm1(from_tensor_[out_id], decoding_params.layernorm.gamma,
decoding_params.layernorm.beta, decoder_result_buf_, m, k);
float alpha = (float)1.0f;
float beta = (float)0.0f;
check_cuda_error(hipblasGemmEx(decoding_params.cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
n, m, k,
&alpha,
decoding_params.embedding_kernel, AType_, k,
decoder_result_buf_, BType_, k,
&beta,
logits_buf_, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
int* pos = new int[batch_size_];
for(int i=0; i<batch_size_; i++)
{
thrust::device_ptr<float> d_ptr = thrust::device_pointer_cast(logits_buf_ + i * vocab_size_);
thrust::device_ptr<float> iter = thrust::max_element(d_ptr, d_ptr + vocab_size_);
pos[i] = iter - d_ptr;
if(pos[i]==2) h_finished_buf_[i] = true;
printf("%d_max_value: %d \n", i, pos[i]);
};
hipMemcpy(word_ids_buf_, pos, batch_size_*sizeof(int), hipMemcpyHostToDevice);
delete []pos;
int sum = 0;
for(int i = 0; i < batch_size_; i++){
sum += (int)h_finished_buf_[i];
}
if(step == 18 || sum == batch_size_) break;
};
}
template void DecodingOpenNMT<OperationType::FP32>::forward(const DecoderInitParam<DataType_> *param,
DecodingInitParam<DataType_> decoding_params);
template DecodingOpenNMT<OperationType::FP32>::DecodingOpenNMT(const IAllocator &allocator,
const int batch_size,
const int max_decode_length,
const int head_num,
const int size_per_head,
const int vocab_size,
const int decoder_layers,
const int hidden_units,
const int seq_len,
int* start_id,
const int end_id);
}
| 94c898d320c8b6ef938cabd94a43ab7c3308d134.cu | #include "decoding.h"
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include <iostream>
namespace fastertransformer
{
template <typename T>
__global__ void embedding_lookup_kernel(const T* embedding_table, const int* word_ids,
const int hidden_units, T* from_tensor)
{
int write_pos = threadIdx.x + blockIdx.x * hidden_units;
from_tensor[write_pos] = embedding_table[word_ids[blockIdx.x] * hidden_units + threadIdx.x] * 32.0f;
}
template <typename T>
__global__ void embedding_init_lookup_kernel(const T* embedding_table, const int* word_ids,
const int hidden_units, T* from_tensor)
{
int write_pos = threadIdx.x + blockIdx.x * hidden_units;
from_tensor[write_pos] = embedding_table[word_ids[blockIdx.x] * hidden_units + threadIdx.x] ;
}
/*i*************************************************************************/
template <typename T>
void embedding_init_lookup(const T* embedding_table, const int* word_ids, T* from_tensor,
const int batch_size, const int hidden_units, cudaStream_t stream)
{
dim3 grid(batch_size);
dim3 block(hidden_units);
assert(hidden_units <= 1024);
embedding_init_lookup_kernel<<<grid, block, 0, stream>>>(embedding_table, word_ids,
hidden_units, from_tensor);
}
template <typename T>
void embedding_lookup(const T* embedding_table, const int* word_ids, T* from_tensor, const int batch_size, const int hidden_units, cudaStream_t stream)
{
dim3 grid(batch_size);
dim3 block(hidden_units);
assert(hidden_units <= 1024);
embedding_lookup_kernel<<<grid, block, 0, stream>>>(embedding_table, word_ids, hidden_units, from_tensor);
}
template <OperationType OpType_>
DecodingOpenNMT<OpType_>::DecodingOpenNMT(const IAllocator &allocator,
const int batch_size,
const int max_decode_length,
const int head_num,
const int size_per_head,
const int vocab_size,
const int decoder_layers,
const int hidden_units,
const int seq_len,
int* start_id,
const int end_id) :
allocator_(allocator),
batch_size_(batch_size),
max_decode_length_(max_decode_length),
head_num_(head_num),
size_per_head_(size_per_head),
vocab_size_(vocab_size),
decoder_layers_(decoder_layers),
hidden_units_(hidden_units),
start_id_(start_id),
end_id_(end_id),
seq_len_(seq_len)
{
K_cache_ = new DataType_ *[decoder_layers_];
V_cache_ = new DataType_ *[decoder_layers_];
K_mem_cache_ = new DataType_ *[decoder_layers_];
V_mem_cache_ = new DataType_ *[decoder_layers_];
decoder_ = new OpenDecoder<OpType_>(allocator, batch_size, seq_len, head_num, size_per_head, hidden_units, max_decode_length_);
int tensor_size = batch_size_ * hidden_units_; //decoding input
int decoder_workspace_size = decoder_->getWorkspaceSize(); // decoder_buf
int cache_size = batch_size_ * max_decode_length_ * hidden_units_; // cache size
int mem_size = batch_size_ * seq_len_ * hidden_units_;
int decoder_result_size = batch_size * hidden_units_;
int logits_size = batch_size_ * vocab_size_; // type float
int word_ids_size = batch_size_ ; //type int
int finished_size = batch_size_ ; //type bool
int output_size = batch_size_ * max_decode_length_;
int datatype_size = tensor_size * 2 + decoder_workspace_size + (cache_size + mem_size) * 6 * decoder_layers_ + decoder_result_size;
buf_ = reinterpret_cast<void *>(allocator_.malloc(sizeof(DataType_) * datatype_size + sizeof(float) * (logits_size ) + sizeof(int) * word_ids_size + sizeof(bool) * finished_size + sizeof(int) * output_size));
from_tensor_[0] = (DataType_ *)buf_;
from_tensor_[1] = (DataType_ *)(from_tensor_[0] + tensor_size);
for (int i = 0; i < decoder_layers_; ++i)
{
K_mem_cache_[i] = from_tensor_[1] + tensor_size + i * mem_size * 2;
V_mem_cache_[i] = K_mem_cache_[i] + mem_size;
}
for (int i = 0; i < decoder_layers_; ++i)
{
K_cache_[i] = V_mem_cache_[decoder_layers - 1] + mem_size + i * cache_size * 2;
V_cache_[i] = K_cache_[i] + cache_size;
}
decoder_buf_ = V_cache_[decoder_layers - 1] + cache_size;
decoder_result_buf_ = (decoder_buf_ + decoder_workspace_size);
logits_buf_ = (float* )(decoder_result_buf_ + decoder_result_size);
word_ids_buf_ = (int* )(logits_buf_ + logits_size);
finished_buf_ = (bool*)(word_ids_buf_ + word_ids_size);
output_ids_buf_ = (int* )(finished_buf_ + word_ids_size);
h_finished_buf_ = new bool[finished_size];
FILE *fd = fopen("decoding_gemm_config.in", "r");
int err = 0;
if (fd == NULL) printf("[WARNING] decoding_gemm_config.in is not found\n");
if (err != 1)
{
printf("[WARNING] decoding loading GEMM algorithms error, using default GEMM algorithms!\n");
if (Traits_::OpType == OperationType::FP32)
{
cublasAlgo_[0] = CUBLAS_GEMM_DEFAULT;
}
else
{
cublasAlgo_[0] = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
}
}
}
template <OperationType OpType_>
void DecodingOpenNMT<OpType_>::forward(const DecoderInitParam<DataType_> *param,
DecodingInitParam<DataType_> decoding_params)
{
int m = batch_size_;
int k = hidden_units_;
int n = vocab_size_;
int cache_size = batch_size_ * max_decode_length_ * hidden_units_;
for (int step = 1; step <= max_decode_length_; ++step)
{
int kv_cache_id = step & 0x1;
if(step == 1)
{
embedding_init_lookup(decoding_params.embedding_table_init, start_id_, from_tensor_[0],
batch_size_, hidden_units_, decoding_params.stream);
}
else
{
embedding_lookup(decoding_params.embedding_table_run, word_ids_buf_, from_tensor_[0],
batch_size_, hidden_units_, decoding_params.stream);
}
cudaDeviceSynchronize();
check_cuda_error(cudaGetLastError());
int from_id, out_id;
for (int layer = 0; layer < decoder_layers_; ++layer)
{
from_id = layer & 0x1;
out_id = 1 - from_id;
decoder_->initialize(param[layer], decoder_buf_);
decoder_->forward(from_tensor_[from_id], decoding_params.memory_tensor,
K_cache_[layer], V_cache_[layer],
K_mem_cache_[layer], V_mem_cache_[layer],
from_tensor_[out_id], step);
}
decoder_->decoder_norm1(from_tensor_[out_id], decoding_params.layernorm.gamma,
decoding_params.layernorm.beta, decoder_result_buf_, m, k);
float alpha = (float)1.0f;
float beta = (float)0.0f;
check_cuda_error(cublasGemmEx(decoding_params.cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
n, m, k,
&alpha,
decoding_params.embedding_kernel, AType_, k,
decoder_result_buf_, BType_, k,
&beta,
logits_buf_, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
int* pos = new int[batch_size_];
for(int i=0; i<batch_size_; i++)
{
thrust::device_ptr<float> d_ptr = thrust::device_pointer_cast(logits_buf_ + i * vocab_size_);
thrust::device_ptr<float> iter = thrust::max_element(d_ptr, d_ptr + vocab_size_);
pos[i] = iter - d_ptr;
if(pos[i]==2) h_finished_buf_[i] = true;
printf("%d_max_value: %d \n", i, pos[i]);
};
cudaMemcpy(word_ids_buf_, pos, batch_size_*sizeof(int), cudaMemcpyHostToDevice);
delete []pos;
int sum = 0;
for(int i = 0; i < batch_size_; i++){
sum += (int)h_finished_buf_[i];
}
if(step == 18 || sum == batch_size_) break;
};
}
template void DecodingOpenNMT<OperationType::FP32>::forward(const DecoderInitParam<DataType_> *param,
DecodingInitParam<DataType_> decoding_params);
template DecodingOpenNMT<OperationType::FP32>::DecodingOpenNMT(const IAllocator &allocator,
const int batch_size,
const int max_decode_length,
const int head_num,
const int size_per_head,
const int vocab_size,
const int decoder_layers,
const int hidden_units,
const int seq_len,
int* start_id,
const int end_id);
}
|
7f57c9f93d95824c380a76e4f956f570b9f75267.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define TILE_WIDTH 2
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width);
__global__ void sMatrixMulKernel(float* Md, float* Nd, float* Pd, int Width);
int main(void){
int width = 5;
//Allocate and initialize the matrices M, N, P
//I/O read the input matrices M, N
float M[width][width], N[width][width], P[width][width];
for (int i=0; i<width; i++){
for(int j=0; j<width; j++){
M[i][j] = 1;
N[i][j] = 2;
}
}
//M*N on the device
float *Md, *Nd, *Pd;
int size = width*width*sizeof(float);
// Load M and N to device mem
hipMalloc((void**)&Md, size);
hipMemcpy(Md, M, size, hipMemcpyHostToDevice);
hipMalloc((void**)&Nd, size);
hipMemcpy(Nd, N, size, hipMemcpyHostToDevice);
//Allocate P on the device
hipMalloc((void**)&Pd, size);
//Kernel invocation code
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(width/TILE_WIDTH,width/TILE_WIDTH);
hipLaunchKernelGGL(( sMatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md, Nd, Pd, width);
//Read P from the device
hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost);
//Free device matrices
hipFree(Md); hipFree(Nd); hipFree(Pd);
//I/O write the output matrix P
for (int i=0; i<width; i++){
for(int j=0; j<width; j++){
printf("%f ", P[i][j]);
}
printf("\n");
}
//Free matrices M, N, P
return 0;
}
__global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width){
//2D thread ID
//int tx = threadIdx.x;
//int ty = threadIdx.y;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
for(int k=0; k < Width; ++k){
//float Mdelement = Md[ty * Width + k];
//float Ndelement = Nd[k * Width + tx];
//Pvalue += Mdelement * Ndelement;
Pvalue += Md[Row * Width + k] * Nd[k * Width + Col];
}
Pd[Row * Width + Col] = Pvalue;
}
__global__ void sMatrixMulKernel(float *Md, float *Nd, float *Pd, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for(int m = 0; m < Width/TILE_WIDTH; ++m){
Mds[ty][tx] = Md[Row*Width+(m*TILE_WIDTH+tx)];
Nds[ty][tx] = Nd[Col+(m*TILE_WIDTH+ty)*Width];
__syncthreads();
for(int k=0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
| 7f57c9f93d95824c380a76e4f956f570b9f75267.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define TILE_WIDTH 2
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width);
__global__ void sMatrixMulKernel(float* Md, float* Nd, float* Pd, int Width);
int main(void){
int width = 5;
//Allocate and initialize the matrices M, N, P
//I/O read the input matrices M, N
float M[width][width], N[width][width], P[width][width];
for (int i=0; i<width; i++){
for(int j=0; j<width; j++){
M[i][j] = 1;
N[i][j] = 2;
}
}
//M*N on the device
float *Md, *Nd, *Pd;
int size = width*width*sizeof(float);
// Load M and N to device mem
cudaMalloc((void**)&Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
//Allocate P on the device
cudaMalloc((void**)&Pd, size);
//Kernel invocation code
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(width/TILE_WIDTH,width/TILE_WIDTH);
sMatrixMulKernel<<<dimGrid,dimBlock>>>(Md, Nd, Pd, width);
//Read P from the device
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
//Free device matrices
cudaFree(Md); cudaFree(Nd); cudaFree(Pd);
//I/O write the output matrix P
for (int i=0; i<width; i++){
for(int j=0; j<width; j++){
printf("%f ", P[i][j]);
}
printf("\n");
}
//Free matrices M, N, P
return 0;
}
__global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width){
//2D thread ID
//int tx = threadIdx.x;
//int ty = threadIdx.y;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
for(int k=0; k < Width; ++k){
//float Mdelement = Md[ty * Width + k];
//float Ndelement = Nd[k * Width + tx];
//Pvalue += Mdelement * Ndelement;
Pvalue += Md[Row * Width + k] * Nd[k * Width + Col];
}
Pd[Row * Width + Col] = Pvalue;
}
__global__ void sMatrixMulKernel(float *Md, float *Nd, float *Pd, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for(int m = 0; m < Width/TILE_WIDTH; ++m){
Mds[ty][tx] = Md[Row*Width+(m*TILE_WIDTH+tx)];
Nds[ty][tx] = Nd[Col+(m*TILE_WIDTH+ty)*Width];
__syncthreads();
for(int k=0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
|
c726e034ee74652f7cc89f002ef3aef1319aa144.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void concat_kernel(const int nthreads, const float* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, float* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
int DLGpuConcat(const DLArrayHandle input_x, const DLArrayHandle input_y, DLArrayHandle output, int axis = 0, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
assert(input_x -> ndim == input_y -> ndim);
assert(input_y -> ndim == output -> ndim);
int now_ndim = input_x -> ndim;
for(int i = 0; i < now_ndim; i++){
if(i != axis){
assert(input_x -> shape[i] == input_y -> shape[i]);
assert(input_y -> shape[i] == output -> shape[i]);
}
else{
assert(input_x -> shape[i] + input_y -> shape[i] == output -> shape[i]);
}
}
if(p != NULL){
int size_a = 1, size_b = 1, size_c = 1;
for(int i = 0; i < input_x -> ndim; i++)
size_a *= input_x -> shape[i];
for(int i = 0; i < input_y -> ndim; i++)
size_b *= input_y -> shape[i];
for(int i = 0; i < output -> ndim; i++)
size_c *= output -> shape[i];
p -> input_memory = 1.0 * (size_a + size_b) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_c * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
// Insert the begin and end event.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventRecord(start,0);
int concat_size = 1;
for(int i = axis + 1; i < now_ndim; i++){
concat_size *= input_x -> shape[i];
}
int num_concats = 1;
for(int i = 0; i< axis; i++){
num_concats *= input_x -> shape[i];
}
int concat_offset = 0;
float *output_data = (float *)(output -> data);
for(int i = 0; i < 2; i++){
int input_concat_axis;
const float *input_data;
if(i == 0){ // input_x
input_concat_axis = input_x -> shape[axis];
input_data = (const float *)(input_x -> data);
}
else{ //input_y
input_concat_axis = input_y -> shape[axis];
input_data = (const float *)(input_y -> data);
}
const int input_concat_size = input_concat_axis * concat_size;
const int nthreads = input_concat_size * num_concats;
const int blocks = (nthreads + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( concat_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, *(hipStream_t*)stream_handle->handle, nthreads, input_data, true, num_concats, concat_size,
output -> shape[axis], input_concat_axis, concat_offset, output_data);
else
hipLaunchKernelGGL(( concat_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, nthreads, input_data, true, num_concats, concat_size,
output -> shape[axis], input_concat_axis, concat_offset, output_data);
concat_offset += input_concat_axis;
}
float elapsedTime;
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
hipEventDestroy(start);
hipEventDestroy(stop);
p->time = elapsedTime;
}else{
int concat_size = 1;
for(int i = axis + 1; i < now_ndim; i++){
concat_size *= input_x -> shape[i];
}
int num_concats = 1;
for(int i = 0; i< axis; i++){
num_concats *= input_x -> shape[i];
}
int concat_offset = 0;
float *output_data = (float *)(output -> data);
for(int i = 0; i < 2; i++){
int input_concat_axis;
const float *input_data;
if(i == 0){ // input_x
input_concat_axis = input_x -> shape[axis];
input_data = (const float *)(input_x -> data);
}
else{ //input_y
input_concat_axis = input_y -> shape[axis];
input_data = (const float *)(input_y -> data);
}
const int input_concat_size = input_concat_axis * concat_size;
const int nthreads = input_concat_size * num_concats;
const int blocks = (nthreads + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( concat_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, *(hipStream_t*)stream_handle->handle, nthreads, input_data, true, num_concats, concat_size,
output -> shape[axis], input_concat_axis, concat_offset, output_data);
else
hipLaunchKernelGGL(( concat_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, nthreads, input_data, true, num_concats, concat_size,
output -> shape[axis], input_concat_axis, concat_offset, output_data);
concat_offset += input_concat_axis;
}
}
return 0;
}
int DLGpuConcat_gradient(const DLArrayHandle output_gradient, DLArrayHandle input_gradient, int axis = 0, int id = 0, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
assert(output_gradient -> ndim == input_gradient -> ndim);
if(p != NULL){
int size_a = 1, size_b = 1;
for(int i = 0; i < output_gradient -> ndim; i++)
size_a *= output_gradient -> shape[i];
for(int i = 0; i < input_gradient -> ndim; i++)
size_b *= input_gradient -> shape[i];
p -> input_memory = 1.0 * (size_a) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_b * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
// Insert the begin and end event.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventRecord(start,0);
int now_ndim = output_gradient -> ndim;
int concat_offset = 0;
for(int i = 0; i< now_ndim; i++){
if(i!=axis){
assert(input_gradient -> shape[i] == output_gradient -> shape[i]);
}
else{
if(id == 1){
concat_offset = (output_gradient -> shape[i]) - (input_gradient -> shape[i]);
}
}
}
int concat_size = 1;
int num_concats = 1;
for(int i = axis + 1; i < now_ndim; i++){
concat_size *= output_gradient -> shape[i];
}
for(int i = 0; i< axis; i++){
num_concats *= output_gradient -> shape[i];
}
const float * grad_out_data = (const float *)(output_gradient -> data);
float * grad_in_data = (float *)(input_gradient -> data);
const int input_concat_axis = input_gradient -> shape[axis];
const int output_concat_axis = output_gradient -> shape[axis];
const int input_concat_size = input_concat_axis * concat_size;
const int nthreads = input_concat_size * num_concats;
const int blocks = (nthreads + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( concat_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, *(hipStream_t*)stream_handle->handle, nthreads, grad_out_data, false, num_concats, concat_size,
output_concat_axis, input_concat_axis, concat_offset, grad_in_data);
else
hipLaunchKernelGGL(( concat_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, nthreads, grad_out_data, false, num_concats, concat_size,
output_concat_axis, input_concat_axis, concat_offset, grad_in_data);
float elapsedTime;
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
hipEventDestroy(start);
hipEventDestroy(stop);
p->time = elapsedTime;
}else{
int now_ndim = output_gradient -> ndim;
int concat_offset = 0;
for(int i = 0; i< now_ndim; i++){
if(i!=axis){
assert(input_gradient -> shape[i] == output_gradient -> shape[i]);
}
else{
if(id == 1){
concat_offset = (output_gradient -> shape[i]) - (input_gradient -> shape[i]);
}
}
}
int concat_size = 1;
int num_concats = 1;
for(int i = axis + 1; i < now_ndim; i++){
concat_size *= output_gradient -> shape[i];
}
for(int i = 0; i< axis; i++){
num_concats *= output_gradient -> shape[i];
}
const float * grad_out_data = (const float *)(output_gradient -> data);
float * grad_in_data = (float *)(input_gradient -> data);
const int input_concat_axis = input_gradient -> shape[axis];
const int output_concat_axis = output_gradient -> shape[axis];
const int input_concat_size = input_concat_axis * concat_size;
const int nthreads = input_concat_size * num_concats;
const int blocks = (nthreads + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( concat_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, *(hipStream_t*)stream_handle->handle, nthreads, grad_out_data, false, num_concats, concat_size,
output_concat_axis, input_concat_axis, concat_offset, grad_in_data);
else
hipLaunchKernelGGL(( concat_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, nthreads, grad_out_data, false, num_concats, concat_size,
output_concat_axis, input_concat_axis, concat_offset, grad_in_data);
}
return 0;
}
| c726e034ee74652f7cc89f002ef3aef1319aa144.cu | #include "gpu_runtime.h"
__global__ void concat_kernel(const int nthreads, const float* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, float* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
int DLGpuConcat(const DLArrayHandle input_x, const DLArrayHandle input_y, DLArrayHandle output, int axis = 0, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
assert(input_x -> ndim == input_y -> ndim);
assert(input_y -> ndim == output -> ndim);
int now_ndim = input_x -> ndim;
for(int i = 0; i < now_ndim; i++){
if(i != axis){
assert(input_x -> shape[i] == input_y -> shape[i]);
assert(input_y -> shape[i] == output -> shape[i]);
}
else{
assert(input_x -> shape[i] + input_y -> shape[i] == output -> shape[i]);
}
}
if(p != NULL){
int size_a = 1, size_b = 1, size_c = 1;
for(int i = 0; i < input_x -> ndim; i++)
size_a *= input_x -> shape[i];
for(int i = 0; i < input_y -> ndim; i++)
size_b *= input_y -> shape[i];
for(int i = 0; i < output -> ndim; i++)
size_c *= output -> shape[i];
p -> input_memory = 1.0 * (size_a + size_b) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_c * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
// Insert the begin and end event.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventRecord(start,0);
int concat_size = 1;
for(int i = axis + 1; i < now_ndim; i++){
concat_size *= input_x -> shape[i];
}
int num_concats = 1;
for(int i = 0; i< axis; i++){
num_concats *= input_x -> shape[i];
}
int concat_offset = 0;
float *output_data = (float *)(output -> data);
for(int i = 0; i < 2; i++){
int input_concat_axis;
const float *input_data;
if(i == 0){ // input_x
input_concat_axis = input_x -> shape[axis];
input_data = (const float *)(input_x -> data);
}
else{ //input_y
input_concat_axis = input_y -> shape[axis];
input_data = (const float *)(input_y -> data);
}
const int input_concat_size = input_concat_axis * concat_size;
const int nthreads = input_concat_size * num_concats;
const int blocks = (nthreads + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
if (stream_handle)
concat_kernel<<<blocks, THREADS_PER_BLOCK, 0, *(cudaStream_t*)stream_handle->handle>>>(nthreads, input_data, true, num_concats, concat_size,
output -> shape[axis], input_concat_axis, concat_offset, output_data);
else
concat_kernel<<<blocks, THREADS_PER_BLOCK>>>(nthreads, input_data, true, num_concats, concat_size,
output -> shape[axis], input_concat_axis, concat_offset, output_data);
concat_offset += input_concat_axis;
}
float elapsedTime;
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
p->time = elapsedTime;
}else{
int concat_size = 1;
for(int i = axis + 1; i < now_ndim; i++){
concat_size *= input_x -> shape[i];
}
int num_concats = 1;
for(int i = 0; i< axis; i++){
num_concats *= input_x -> shape[i];
}
int concat_offset = 0;
float *output_data = (float *)(output -> data);
for(int i = 0; i < 2; i++){
int input_concat_axis;
const float *input_data;
if(i == 0){ // input_x
input_concat_axis = input_x -> shape[axis];
input_data = (const float *)(input_x -> data);
}
else{ //input_y
input_concat_axis = input_y -> shape[axis];
input_data = (const float *)(input_y -> data);
}
const int input_concat_size = input_concat_axis * concat_size;
const int nthreads = input_concat_size * num_concats;
const int blocks = (nthreads + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
if (stream_handle)
concat_kernel<<<blocks, THREADS_PER_BLOCK, 0, *(cudaStream_t*)stream_handle->handle>>>(nthreads, input_data, true, num_concats, concat_size,
output -> shape[axis], input_concat_axis, concat_offset, output_data);
else
concat_kernel<<<blocks, THREADS_PER_BLOCK>>>(nthreads, input_data, true, num_concats, concat_size,
output -> shape[axis], input_concat_axis, concat_offset, output_data);
concat_offset += input_concat_axis;
}
}
return 0;
}
int DLGpuConcat_gradient(const DLArrayHandle output_gradient, DLArrayHandle input_gradient, int axis = 0, int id = 0, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
assert(output_gradient -> ndim == input_gradient -> ndim);
if(p != NULL){
int size_a = 1, size_b = 1;
for(int i = 0; i < output_gradient -> ndim; i++)
size_a *= output_gradient -> shape[i];
for(int i = 0; i < input_gradient -> ndim; i++)
size_b *= input_gradient -> shape[i];
p -> input_memory = 1.0 * (size_a) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_b * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
// Insert the begin and end event.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventRecord(start,0);
int now_ndim = output_gradient -> ndim;
int concat_offset = 0;
for(int i = 0; i< now_ndim; i++){
if(i!=axis){
assert(input_gradient -> shape[i] == output_gradient -> shape[i]);
}
else{
if(id == 1){
concat_offset = (output_gradient -> shape[i]) - (input_gradient -> shape[i]);
}
}
}
int concat_size = 1;
int num_concats = 1;
for(int i = axis + 1; i < now_ndim; i++){
concat_size *= output_gradient -> shape[i];
}
for(int i = 0; i< axis; i++){
num_concats *= output_gradient -> shape[i];
}
const float * grad_out_data = (const float *)(output_gradient -> data);
float * grad_in_data = (float *)(input_gradient -> data);
const int input_concat_axis = input_gradient -> shape[axis];
const int output_concat_axis = output_gradient -> shape[axis];
const int input_concat_size = input_concat_axis * concat_size;
const int nthreads = input_concat_size * num_concats;
const int blocks = (nthreads + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
if (stream_handle)
concat_kernel<<<blocks, THREADS_PER_BLOCK, 0, *(cudaStream_t*)stream_handle->handle>>>(nthreads, grad_out_data, false, num_concats, concat_size,
output_concat_axis, input_concat_axis, concat_offset, grad_in_data);
else
concat_kernel<<<blocks, THREADS_PER_BLOCK>>>(nthreads, grad_out_data, false, num_concats, concat_size,
output_concat_axis, input_concat_axis, concat_offset, grad_in_data);
float elapsedTime;
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
p->time = elapsedTime;
}else{
int now_ndim = output_gradient -> ndim;
int concat_offset = 0;
for(int i = 0; i< now_ndim; i++){
if(i!=axis){
assert(input_gradient -> shape[i] == output_gradient -> shape[i]);
}
else{
if(id == 1){
concat_offset = (output_gradient -> shape[i]) - (input_gradient -> shape[i]);
}
}
}
int concat_size = 1;
int num_concats = 1;
for(int i = axis + 1; i < now_ndim; i++){
concat_size *= output_gradient -> shape[i];
}
for(int i = 0; i< axis; i++){
num_concats *= output_gradient -> shape[i];
}
const float * grad_out_data = (const float *)(output_gradient -> data);
float * grad_in_data = (float *)(input_gradient -> data);
const int input_concat_axis = input_gradient -> shape[axis];
const int output_concat_axis = output_gradient -> shape[axis];
const int input_concat_size = input_concat_axis * concat_size;
const int nthreads = input_concat_size * num_concats;
const int blocks = (nthreads + THREADS_PER_BLOCK - 1)/ THREADS_PER_BLOCK;
if (stream_handle)
concat_kernel<<<blocks, THREADS_PER_BLOCK, 0, *(cudaStream_t*)stream_handle->handle>>>(nthreads, grad_out_data, false, num_concats, concat_size,
output_concat_axis, input_concat_axis, concat_offset, grad_in_data);
else
concat_kernel<<<blocks, THREADS_PER_BLOCK>>>(nthreads, grad_out_data, false, num_concats, concat_size,
output_concat_axis, input_concat_axis, concat_offset, grad_in_data);
}
return 0;
}
|
e6d4598b8e5c41c9154d70830f20e6598f5d7118.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void pathAdjacencyKernel(int noTransitions, int noSegments, float* XY1, float* XY2, float* X4_X3, float* Y4_Y3, float* X2_X1, float* Y2_Y1, int* adjacency) {
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int idx = blockId * blockDim.x + threadIdx.x;
if (idx < noTransitions*noSegments) {
int seg1 = idx/noSegments;
int seg2 = idx - seg1*noSegments;
float Y1_Y3 = XY1[seg1 + noTransitions] - XY2[seg2 + noSegments];
float X1_X3 = XY1[seg1] - XY2[seg2];
float numa = X4_X3[seg2]*Y1_Y3 - Y4_Y3[seg2]*X1_X3;
float numb = X2_X1[seg1]*Y1_Y3 - Y2_Y1[seg1]*X1_X3;
float deno = Y4_Y3[seg2]*X2_X1[seg1] - X4_X3[seg2]*Y2_Y1[seg1];
float u_a = numa/deno;
float u_b = numb/deno;
adjacency[idx] = (int)((u_a >= 0.0) && (u_a <= 1.0) && (u_b >= 0.0)
&& (u_b <= 1.0));
}
} | e6d4598b8e5c41c9154d70830f20e6598f5d7118.cu | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void pathAdjacencyKernel(int noTransitions, int noSegments, float* XY1, float* XY2, float* X4_X3, float* Y4_Y3, float* X2_X1, float* Y2_Y1, int* adjacency) {
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int idx = blockId * blockDim.x + threadIdx.x;
if (idx < noTransitions*noSegments) {
int seg1 = idx/noSegments;
int seg2 = idx - seg1*noSegments;
float Y1_Y3 = XY1[seg1 + noTransitions] - XY2[seg2 + noSegments];
float X1_X3 = XY1[seg1] - XY2[seg2];
float numa = X4_X3[seg2]*Y1_Y3 - Y4_Y3[seg2]*X1_X3;
float numb = X2_X1[seg1]*Y1_Y3 - Y2_Y1[seg1]*X1_X3;
float deno = Y4_Y3[seg2]*X2_X1[seg1] - X4_X3[seg2]*Y2_Y1[seg1];
float u_a = numa/deno;
float u_b = numb/deno;
adjacency[idx] = (int)((u_a >= 0.0) && (u_a <= 1.0) && (u_b >= 0.0)
&& (u_b <= 1.0));
}
} |
7ac5e26b3b4bb45683e349b1c0ad2f409ca579a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
//#include "REPEATL.h"
#include "../include/REPEATR.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 32
#define NUM_OF_BLOCKS 1
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 4
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
#define ITERATIONS REPLACE_ITERATIONS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int size = (400*max_tid*LINE_SIZE)/sizeof(int);
unsigned j=0, k=0;
int sum=0;
// Fill the L1 cache, Miss on every iteration
for (int i=0; i<ITERATIONS ; i++){
REPEAT_L6(0);
//REPLACE_ITERATIONS
}
/*
// Fill the L1 cache, Miss on first LD, Hit on subsequent LDs
for(k=0; k<ITERATIONS; ++k){
for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){
C[tid+j] = A[tid+j];
}
}
*/
C[0]=sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
int N = (400*max_tid*LINE_SIZE);
size_t size = N * sizeof(int) ;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| 7ac5e26b3b4bb45683e349b1c0ad2f409ca579a2.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
//#include "REPEATL.h"
#include "../include/REPEATR.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 32
#define NUM_OF_BLOCKS 1
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 4
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
#define ITERATIONS REPLACE_ITERATIONS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int size = (400*max_tid*LINE_SIZE)/sizeof(int);
unsigned j=0, k=0;
int sum=0;
// Fill the L1 cache, Miss on every iteration
for (int i=0; i<ITERATIONS ; i++){
REPEAT_L6(0);
//REPLACE_ITERATIONS
}
/*
// Fill the L1 cache, Miss on first LD, Hit on subsequent LDs
for(k=0; k<ITERATIONS; ++k){
for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){
C[tid+j] = A[tid+j];
}
}
*/
C[0]=sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
int N = (400*max_tid*LINE_SIZE);
size_t size = N * sizeof(int) ;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
559d066a3b4f7db80eab3e8d7649696a5fc32a57.hip | // !!! This is a file automatically generated by hipify!!!
template<typename _Op, typename _Scalar, int _Axis, int MiniBatch = 16>
void ReduceDevice(
const Matrix<_Scalar, Dynamic, Dynamic, Dynamic, ColumnMajor>& in,
Matrix<_Scalar, Dynamic, Dynamic, Dynamic, ColumnMajor>& out,
const _Op& op, const _Scalar& initial)
{
const Index N = NumEntries(in.rows(), in.cols(), in.batches());
const Index B = NumBatches(in.rows(), in.cols(), in.batches());
const Index S = Stride(in.rows(), in.cols(), in.batches());
const _Scalar* input = in.data();
_Scalar* output = out.data();
//create substreams and event for synchronization
Context substreams[MiniBatch];
Event event;
size_t temp_storage_bytes = 0;
DevicePointer<uint8_t> temp_storage[MiniBatch];
hipStream_t mainStream = Context::current().stream();
int Bmin = ::min(int(B), MiniBatch);
//initialize temporal storage and add sync points
event.record(mainStream);
hipcub::DeviceReduce::Reduce(
nullptr,
temp_storage_bytes,
make_strided_iterator(input, S),
output,
int(N), op, initial,
substreams[0].stream());
for (int b=0; b<Bmin; ++b)
{
event.streamWait(substreams[b].stream());
temp_storage[b] = DevicePointer<uint8_t>(temp_storage_bytes, substreams[b]);
}
//perform reduction
for (Index b=0; b<B; ++b)
{
const int i = b % MiniBatch;
Index O = Offset(in.rows(), in.cols(), in.batches(), b);
hipcub::DeviceReduce::Reduce(
temp_storage[i].pointer(),
temp_storage_bytes,
make_strided_iterator(input + O, S),
output + b,
int(N), op, initial,
substreams[i].stream());
}
//add sync points
for (int b = 0; b < Bmin; ++b)
{
event.record(substreams[b].stream());
event.streamWait(mainStream);
}
} | 559d066a3b4f7db80eab3e8d7649696a5fc32a57.cu | template<typename _Op, typename _Scalar, int _Axis, int MiniBatch = 16>
void ReduceDevice(
const Matrix<_Scalar, Dynamic, Dynamic, Dynamic, ColumnMajor>& in,
Matrix<_Scalar, Dynamic, Dynamic, Dynamic, ColumnMajor>& out,
const _Op& op, const _Scalar& initial)
{
const Index N = NumEntries(in.rows(), in.cols(), in.batches());
const Index B = NumBatches(in.rows(), in.cols(), in.batches());
const Index S = Stride(in.rows(), in.cols(), in.batches());
const _Scalar* input = in.data();
_Scalar* output = out.data();
//create substreams and event for synchronization
Context substreams[MiniBatch];
Event event;
size_t temp_storage_bytes = 0;
DevicePointer<uint8_t> temp_storage[MiniBatch];
cudaStream_t mainStream = Context::current().stream();
int Bmin = std::min(int(B), MiniBatch);
//initialize temporal storage and add sync points
event.record(mainStream);
cub::DeviceReduce::Reduce(
nullptr,
temp_storage_bytes,
make_strided_iterator(input, S),
output,
int(N), op, initial,
substreams[0].stream());
for (int b=0; b<Bmin; ++b)
{
event.streamWait(substreams[b].stream());
temp_storage[b] = DevicePointer<uint8_t>(temp_storage_bytes, substreams[b]);
}
//perform reduction
for (Index b=0; b<B; ++b)
{
const int i = b % MiniBatch;
Index O = Offset(in.rows(), in.cols(), in.batches(), b);
cub::DeviceReduce::Reduce(
temp_storage[i].pointer(),
temp_storage_bytes,
make_strided_iterator(input + O, S),
output + b,
int(N), op, initial,
substreams[i].stream());
}
//add sync points
for (int b = 0; b < Bmin; ++b)
{
event.record(substreams[b].stream());
event.streamWait(mainStream);
}
} |
690d159c242ca77180c3993cb33e55182ab4340a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how use texture fetches in CUDA
*
* This sample takes an input PGM image (image_filename) and generates
* an output PGM image (image_filename_out). This CUDA kernel performs
* a simple 2D transform (rotation) on the texture coordinates (u,v).
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#include <shrQATest.h>
// includes, kernels
#include "simpleTexture_kernel.cu"
char *image_filename = "lena_bw.pgm";
char *ref_filename = "ref_rotated.pgm";
float angle = 0.5f; // angle to rotate image by (in radians)
#define MIN_EPSILON_ERROR 5e-3f
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
/*
int
main( int argc, char** argv)
{
runTest( argc, argv);
}
*/
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <cstdlib>
#include <iostream>
#include <iterator>
// defines the function prototype
//#include "device.h"
//#include <device.cu>
void
runTestThrust( int argc, char** argv)
{
// generate 20 random numbers on the host
thrust::host_vector<int> h_vec(20);
//thrust::device_vector<float4> hj
thrust::generate(h_vec.begin(), h_vec.end(), rand);
// interface to CUDA code
//sort_on_device(h_vec);
// print sorted array
thrust::copy(h_vec.begin(), h_vec.end(), std::ostream_iterator<int>(std::cout, "\n"));
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
bool bTestResult = true;
shrQAStart(argc, argv);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
{
cutilDeviceInit(argc, argv);
}
else
{
cutilSafeCall( hipSetDevice( cutGetMaxGflopsDeviceId() ) );
}
// load image from disk
float* h_data = NULL;
unsigned int width, height;
char* image_path = cutFindFilePath(image_filename, argv[0]);
if (image_path == NULL) {
printf("Unable to source image file: %s\n", image_filename);
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
cutilCheckError( cutLoadPGMf(image_path, &h_data, &width, &height));
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", image_filename, width, height);
// load reference image from image (output)
float *h_data_ref = (float*) malloc(size);
char* ref_path = cutFindFilePath(ref_filename, argv[0]);
if (ref_path == NULL) {
printf("Unable to find reference image file: %s\n", ref_filename);
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
cutilCheckError( cutLoadPGMf(ref_path, &h_data_ref, &width, &height));
// allocate device memory for result
float* d_data = NULL;
cutilSafeCall( hipMalloc( (void**) &d_data, size));
// allocate array and copy image data
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray* cu_array;
cutilSafeCall( hipMallocArray( &cu_array, &channelDesc, width, height ));
cutilSafeCall( hipMemcpyToArray( cu_array, 0, 0, h_data, size, hipMemcpyHostToDevice));
// set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
cutilSafeCall( hipBindTextureToArray( tex, cu_array, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// warmup
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, d_data, width, height, angle);
cutilSafeCall( cutilDeviceSynchronize() );
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
// execute the kernel
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, d_data, width, height, angle);
// check if kernel execution generated an error
cutilCheckMsg("Kernel execution failed");
cutilSafeCall( cutilDeviceSynchronize() );
cutilCheckError( cutStopTimer( timer));
printf("Processing time: %f (ms)\n", cutGetTimerValue( timer));
printf("%.2f Mpixels/sec\n", (width*height / (cutGetTimerValue( timer) / 1000.0f)) / 1e6);
cutilCheckError( cutDeleteTimer( timer));
// allocate mem for the result on host side
float* h_odata = (float*) malloc( size);
// copy result from device to host
cutilSafeCall( hipMemcpy( h_odata, d_data, size, hipMemcpyDeviceToHost) );
// write result to file
char output_filename[1024];
strcpy(output_filename, image_path);
strcpy(output_filename + strlen(image_path) - 4, "_out.pgm");
cutilCheckError( cutSavePGMf( output_filename, h_odata, width, height));
printf("Wrote '%s'\n", output_filename);
// write regression file if necessary
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
cutilCheckError( cutWriteFilef( "./data/regression.dat", h_odata, width*height, 0.0));
}
else
{
// We need to reload the data from disk, because it is inverted upon output
cutilCheckError( cutLoadPGMf(output_filename, &h_odata, &width, &height));
printf("Comparing files\n");
printf("\toutput: <%s>\n", output_filename);
printf("\treference: <%s>\n", ref_path);
bTestResult = cutComparefe( h_odata, h_data_ref, width*height, MIN_EPSILON_ERROR );
}
cutilSafeCall(hipFree(d_data));
cutilSafeCall(hipFreeArray(cu_array));
cutFree(image_path);
cutFree(ref_path);
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (bTestResult ? QA_PASSED : QA_FAILED) );
}
| 690d159c242ca77180c3993cb33e55182ab4340a.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how use texture fetches in CUDA
*
* This sample takes an input PGM image (image_filename) and generates
* an output PGM image (image_filename_out). This CUDA kernel performs
* a simple 2D transform (rotation) on the texture coordinates (u,v).
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#include <shrQATest.h>
// includes, kernels
#include "simpleTexture_kernel.cu"
char *image_filename = "lena_bw.pgm";
char *ref_filename = "ref_rotated.pgm";
float angle = 0.5f; // angle to rotate image by (in radians)
#define MIN_EPSILON_ERROR 5e-3f
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
/*
int
main( int argc, char** argv)
{
runTest( argc, argv);
}
*/
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <cstdlib>
#include <iostream>
#include <iterator>
// defines the function prototype
//#include "device.h"
//#include <device.cu>
void
runTestThrust( int argc, char** argv)
{
// generate 20 random numbers on the host
thrust::host_vector<int> h_vec(20);
//thrust::device_vector<float4> hj
thrust::generate(h_vec.begin(), h_vec.end(), rand);
// interface to CUDA code
//sort_on_device(h_vec);
// print sorted array
thrust::copy(h_vec.begin(), h_vec.end(), std::ostream_iterator<int>(std::cout, "\n"));
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
bool bTestResult = true;
shrQAStart(argc, argv);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
{
cutilDeviceInit(argc, argv);
}
else
{
cutilSafeCall( cudaSetDevice( cutGetMaxGflopsDeviceId() ) );
}
// load image from disk
float* h_data = NULL;
unsigned int width, height;
char* image_path = cutFindFilePath(image_filename, argv[0]);
if (image_path == NULL) {
printf("Unable to source image file: %s\n", image_filename);
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
cutilCheckError( cutLoadPGMf(image_path, &h_data, &width, &height));
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", image_filename, width, height);
// load reference image from image (output)
float *h_data_ref = (float*) malloc(size);
char* ref_path = cutFindFilePath(ref_filename, argv[0]);
if (ref_path == NULL) {
printf("Unable to find reference image file: %s\n", ref_filename);
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
cutilCheckError( cutLoadPGMf(ref_path, &h_data_ref, &width, &height));
// allocate device memory for result
float* d_data = NULL;
cutilSafeCall( cudaMalloc( (void**) &d_data, size));
// allocate array and copy image data
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray* cu_array;
cutilSafeCall( cudaMallocArray( &cu_array, &channelDesc, width, height ));
cutilSafeCall( cudaMemcpyToArray( cu_array, 0, 0, h_data, size, cudaMemcpyHostToDevice));
// set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
cutilSafeCall( cudaBindTextureToArray( tex, cu_array, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// warmup
transformKernel<<< dimGrid, dimBlock, 0 >>>( d_data, width, height, angle);
cutilSafeCall( cutilDeviceSynchronize() );
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
// execute the kernel
transformKernel<<< dimGrid, dimBlock, 0 >>>( d_data, width, height, angle);
// check if kernel execution generated an error
cutilCheckMsg("Kernel execution failed");
cutilSafeCall( cutilDeviceSynchronize() );
cutilCheckError( cutStopTimer( timer));
printf("Processing time: %f (ms)\n", cutGetTimerValue( timer));
printf("%.2f Mpixels/sec\n", (width*height / (cutGetTimerValue( timer) / 1000.0f)) / 1e6);
cutilCheckError( cutDeleteTimer( timer));
// allocate mem for the result on host side
float* h_odata = (float*) malloc( size);
// copy result from device to host
cutilSafeCall( cudaMemcpy( h_odata, d_data, size, cudaMemcpyDeviceToHost) );
// write result to file
char output_filename[1024];
strcpy(output_filename, image_path);
strcpy(output_filename + strlen(image_path) - 4, "_out.pgm");
cutilCheckError( cutSavePGMf( output_filename, h_odata, width, height));
printf("Wrote '%s'\n", output_filename);
// write regression file if necessary
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
cutilCheckError( cutWriteFilef( "./data/regression.dat", h_odata, width*height, 0.0));
}
else
{
// We need to reload the data from disk, because it is inverted upon output
cutilCheckError( cutLoadPGMf(output_filename, &h_odata, &width, &height));
printf("Comparing files\n");
printf("\toutput: <%s>\n", output_filename);
printf("\treference: <%s>\n", ref_path);
bTestResult = cutComparefe( h_odata, h_data_ref, width*height, MIN_EPSILON_ERROR );
}
cutilSafeCall(cudaFree(d_data));
cutilSafeCall(cudaFreeArray(cu_array));
cutFree(image_path);
cutFree(ref_path);
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (bTestResult ? QA_PASSED : QA_FAILED) );
}
|
3bb3a7dde2093eb4aac34e5bebfe90d0bd615a6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "RuleBookIterator.h"
// NTX must be >=2 so r is filled properly
template <typename T, Int NTX, Int NTY>
__global__ void MaxPooling_fp(T *input_features, T *output_features,
Int nPlanes, Int input_stride, Int output_stride,
Int *rules, Int nHot) {
__shared__ Int r[NTY * 2];
for (Int n = blockIdx.x * NTY; n < nHot; n += gridDim.x * NTY) {
{
Int i = threadIdx.x + NTX * threadIdx.y;
if (i < NTY * 2 && i < 2 * (nHot - n))
r[i] = rules[2 * n + i];
}
__syncthreads();
if (n + threadIdx.y < nHot) {
Int i = r[2 * threadIdx.y] * input_stride;
Int o = r[2 * threadIdx.y + 1] * output_stride;
for (Int plane = threadIdx.x; plane < nPlanes; plane += NTX) {
T inp = input_features[i + plane];
if (output_features[o + plane] < inp)
output_features[o + plane] = inp;
}
}
__syncthreads();
}
}
template <typename T>
void cuda_MaxPooling_ForwardPass(T *input_features, T *output_features,
Int nPlanes, Int input_stride,
Int output_stride, RuleBook _rules) {
hipLaunchKernelGGL(( RULEBOOKITERATOR((MaxPooling_fp<T, 32, 32>), dim3(32), dim3(dim3(32, 32)), 0, 0,
input_features, output_features, nPlanes, input_stride, output_stride,
rbB, nHotB));
, )
}
template <typename T, Int NTX, Int NTY>
__global__ void MaxPooling_bp(T *input_features, T *d_input_features,
T *output_features, T *d_output_features,
Int nPlanes, Int input_stride, Int output_stride,
Int *rules, Int nHot) {
__shared__ Int r[NTY * 2];
for (Int n = blockIdx.x * NTY; n < nHot; n += gridDim.x * NTY) {
{
Int i = threadIdx.x + NTX * threadIdx.y;
if (i < NTY * 2 && i < 2 * (nHot - n))
r[i] = rules[2 * n + i];
}
__syncthreads();
if (n + threadIdx.y < nHot) {
Int i = r[2 * threadIdx.y] * input_stride;
Int o = r[2 * threadIdx.y + 1] * output_stride;
for (Int plane = threadIdx.x; plane < nPlanes; plane += NTX)
if (output_features[o + plane] == input_features[i + plane])
d_input_features[i + plane] += d_output_features[o + plane];
}
__syncthreads();
}
}
template <typename T>
void cuda_MaxPooling_BackwardPass(T *input_features, T *d_input_features,
T *output_features, T *d_output_features,
Int nPlanes, Int input_stride,
Int output_stride, RuleBook _rules) {
hipLaunchKernelGGL(( RULEBOOKITERATOR((MaxPooling_bp<T, 32, 32>), dim3(32), dim3(dim3(32, 32)), 0, 0,
input_features, d_input_features, output_features, d_output_features,
nPlanes, input_stride, output_stride, rbB, nHotB));
, )
}
| 3bb3a7dde2093eb4aac34e5bebfe90d0bd615a6d.cu | // Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include "RuleBookIterator.h"
// NTX must be >=2 so r is filled properly
template <typename T, Int NTX, Int NTY>
__global__ void MaxPooling_fp(T *input_features, T *output_features,
Int nPlanes, Int input_stride, Int output_stride,
Int *rules, Int nHot) {
__shared__ Int r[NTY * 2];
for (Int n = blockIdx.x * NTY; n < nHot; n += gridDim.x * NTY) {
{
Int i = threadIdx.x + NTX * threadIdx.y;
if (i < NTY * 2 && i < 2 * (nHot - n))
r[i] = rules[2 * n + i];
}
__syncthreads();
if (n + threadIdx.y < nHot) {
Int i = r[2 * threadIdx.y] * input_stride;
Int o = r[2 * threadIdx.y + 1] * output_stride;
for (Int plane = threadIdx.x; plane < nPlanes; plane += NTX) {
T inp = input_features[i + plane];
if (output_features[o + plane] < inp)
output_features[o + plane] = inp;
}
}
__syncthreads();
}
}
template <typename T>
void cuda_MaxPooling_ForwardPass(T *input_features, T *output_features,
Int nPlanes, Int input_stride,
Int output_stride, RuleBook _rules) {
RULEBOOKITERATOR((MaxPooling_fp<T, 32, 32><<<32, dim3(32, 32)>>>(
input_features, output_features, nPlanes, input_stride, output_stride,
rbB, nHotB));
, )
}
template <typename T, Int NTX, Int NTY>
__global__ void MaxPooling_bp(T *input_features, T *d_input_features,
T *output_features, T *d_output_features,
Int nPlanes, Int input_stride, Int output_stride,
Int *rules, Int nHot) {
__shared__ Int r[NTY * 2];
for (Int n = blockIdx.x * NTY; n < nHot; n += gridDim.x * NTY) {
{
Int i = threadIdx.x + NTX * threadIdx.y;
if (i < NTY * 2 && i < 2 * (nHot - n))
r[i] = rules[2 * n + i];
}
__syncthreads();
if (n + threadIdx.y < nHot) {
Int i = r[2 * threadIdx.y] * input_stride;
Int o = r[2 * threadIdx.y + 1] * output_stride;
for (Int plane = threadIdx.x; plane < nPlanes; plane += NTX)
if (output_features[o + plane] == input_features[i + plane])
d_input_features[i + plane] += d_output_features[o + plane];
}
__syncthreads();
}
}
template <typename T>
void cuda_MaxPooling_BackwardPass(T *input_features, T *d_input_features,
T *output_features, T *d_output_features,
Int nPlanes, Int input_stride,
Int output_stride, RuleBook _rules) {
RULEBOOKITERATOR((MaxPooling_bp<T, 32, 32><<<32, dim3(32, 32)>>>(
input_features, d_input_features, output_features, d_output_features,
nPlanes, input_stride, output_stride, rbB, nHotB));
, )
}
|
a15b5cdb988b28e9cf1b158660a0b8130225aed0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "depthSR_kernel.cuh"
__global__ void memcpy_float(int width, float* dst, float* src)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
dst[y * width + x] = src[y * width + x];
}
__global__ void extractCorrelation_kernel(ContextDepthSR *context, uint8_t *LR_GrayBorder, float *LR_DepthBorder, float *GD_correlation)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int pixel = y + x * blockDim.y * gridDim.y;
int width = context->width;
int height = context->height;
int scale_w = context->scale_w;
int scale_h = context->scale_h;
int s_width = width / scale_w;
int s_height = height / scale_h;
int win_size = 3; //3x3 windows
float sumPixG = 0.0, sumPixG2 = 0.0;
float sumPixD = 0.0, sumPixD2 = 0.0;
float sumPixGD = 0.0;
for (int m = -win_size / 2; m <= win_size / 2; m++){
int i = x + 1 + m;
i = (i > 0 ? (i < s_height + 2 ? i : s_height + 1) : 0); //make sure the index (x+n, y+m) is located in the image.
for (int n = -win_size / 2; n <= win_size / 2; n++){
int j = y + 1 + n;
j = (j > 0 ? (j < s_width + 2 ? j : s_width + 1) : 0);
uint8_t a1 = LR_GrayBorder[i * (s_width + 2) + j];
float a2 = LR_DepthBorder[i * (s_width + 2) + j];
sumPixG += a1;
sumPixD += a2;
sumPixG2 += (a1 * a1);
sumPixD2 += (a2 * a2);
sumPixGD += (a1 * a2);
}//end for n
}//end for m
float meanPixG = sumPixG / (win_size * win_size);//EX
float meanPixD = sumPixD / (win_size * win_size);
float meanPixG2 = sumPixG2 / (win_size * win_size); //E(X^2)
float meanPixD2 = sumPixD2 / (win_size * win_size);
float meanPixGD = sumPixGD / (win_size * win_size);
float CA = meanPixGD - meanPixG * meanPixD;
float variancePixG = meanPixG2 - meanPixG * meanPixG;
float variancePixD = meanPixD2 - meanPixD * meanPixD;
CA /= sqrt(variancePixG * variancePixD);
GD_correlation[pixel] = CA;
}
ContextDepthSR* create_context(ContextDepthSR* context)
{
ContextDepthSR *cu_context;
hipMallocManaged((void**)&cu_context, sizeof(ContextDepthSR));
hipMemcpy(&cu_context->width, &context->width, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&cu_context->height, &context->height, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&cu_context->scale_w, &context->scale_w, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&cu_context->scale_h, &context->scale_h, sizeof(int), hipMemcpyHostToDevice);
return cu_context;
}
void extractCorrelation(ContextDepthSR *context, uint8_t *LR_Gray, float *LR_Depth, float *GD_correlation)
{
int width = context->width;
int height = context->height;
int scale_w = context->scale_w;
int scale_h = context->scale_h;
int s_width = width / scale_w;
int s_height = height / scale_h;
//get the padding border image
int border = 2;
size_t size = (s_width + border) * (s_height + border);
uint8_t *LR_GrayBorder = (uint8_t*)calloc(size, sizeof(uint8_t));
float *LR_DepthBorder = (float*)calloc(size, sizeof(float));
for(int i = 1; i < s_height + 1; i++){
for(int j = 1; j < s_width + 1; j++){
int pixel_b = i * (s_width + border) + j;
int pixel = (i-1) * s_width + (j-1);
LR_GrayBorder[pixel_b] = LR_Gray[pixel];
LR_DepthBorder[pixel_b] = LR_Depth[pixel];
}
}//CENTER
for(int i = 1; i < s_width + 1; i++){
LR_GrayBorder[i] = LR_Gray[i - 1];
LR_DepthBorder[i] = LR_Depth[i - 1]; //TOP
int pixel_b = (s_width + border) * (s_height + 1) + i;
int pixel = s_width * (s_height - 1) + i - 1;
LR_GrayBorder[pixel_b] = LR_Gray[pixel];
LR_DepthBorder[pixel_b] = LR_Depth[pixel];//BOTTOM
}
for(int j = 1; j < s_height + 1; j++){
LR_GrayBorder[j * (s_width + border)] = LR_GrayBorder[j * (s_width + border) + 1];
LR_DepthBorder[j * (s_width + border)] = LR_DepthBorder[j * (s_width + border) + 1];//LEFT
LR_GrayBorder[(j + 1) * (s_width + border) - 1] = LR_GrayBorder[(j + 1) * (s_width + border) - 2];
LR_DepthBorder[(j + 1) * (s_width + border) - 1] = LR_DepthBorder[(j + 1) * (s_width + border) - 2];//RIGHT
}
//the four corner value
int pixel_corner;
LR_GrayBorder[0] = (LR_GrayBorder[1] + LR_GrayBorder[(s_width + border)]) / 2;
LR_DepthBorder[0] = (LR_DepthBorder[1] + LR_DepthBorder[(s_width + border)]) / 2;
pixel_corner = (s_width + border) - 1;
LR_GrayBorder[pixel_corner] = (LR_GrayBorder[(pixel_corner - 1)] + LR_GrayBorder[(pixel_corner + s_width + border)]) / 2;
LR_DepthBorder[pixel_corner] = (LR_DepthBorder[(pixel_corner - 1)] + LR_DepthBorder[(pixel_corner + s_width + border)]) / 2;
pixel_corner = ((s_height + border) - 1) * (s_width + border);
LR_GrayBorder[pixel_corner] = (LR_GrayBorder[(pixel_corner + 1)] + LR_GrayBorder[(pixel_corner - s_width - border)]) / 2;
LR_DepthBorder[pixel_corner] = (LR_DepthBorder[(pixel_corner + 1)] + LR_DepthBorder[(pixel_corner - s_width - border)]) / 2;
pixel_corner = (s_height + border) * (s_width + border) - 1;
LR_GrayBorder[pixel_corner] = (LR_GrayBorder[(pixel_corner - 1)] + LR_GrayBorder[(pixel_corner - s_width - border)]) / 2;
LR_DepthBorder[pixel_corner] = (LR_DepthBorder[(pixel_corner - 1)] + LR_DepthBorder[(pixel_corner - s_width - border)]) / 2;
dim3 blocks(s_height / THREADS_PER_BLOCK, s_width / ThreadsPerBlock);
dim3 threads(THREADS_PER_BLOCK, ThreadsPerBlock);
uint8_t* cu_LR_GrayBorder;
float* cu_LR_DepthBorder;
float* cu_GD_correlation;
hipMalloc((void**)&cu_LR_GrayBorder, (s_height + border) * (s_width + border) * sizeof(uint8_t));
hipMalloc((void**)&cu_LR_DepthBorder, (s_height + border) * (s_width + border) * sizeof(float));
hipMemcpy(cu_LR_GrayBorder, LR_GrayBorder, (s_height + border) * (s_width + border) * sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(cu_LR_DepthBorder, LR_DepthBorder, (s_height + border) * (s_width + border) * sizeof(float), hipMemcpyHostToDevice);
hipMallocManaged((void**)&cu_GD_correlation, s_width * s_height * sizeof(float));
hipLaunchKernelGGL(( extractCorrelation_kernel), dim3(blocks), dim3(threads), 0, 0, context, cu_LR_GrayBorder, cu_LR_DepthBorder, cu_GD_correlation);
//memcpy_float<<<blocks, threads>>>(s_width, gray_variance, cu_gray_variance);
//memcpy_float<<<blocks, threads>>>(s_width, depth_variance, cu_depth_variance);
hipMemcpy(GD_correlation, cu_GD_correlation, s_width * s_height * sizeof(float), hipMemcpyDeviceToHost);
//memcpy(GD_correlation, cu_GD_correlation, s_width * s_height * sizeof(float));
hipFree(cu_LR_GrayBorder);
hipFree(cu_LR_DepthBorder);
hipFree(cu_GD_correlation);
} | a15b5cdb988b28e9cf1b158660a0b8130225aed0.cu | #include "depthSR_kernel.cuh"
__global__ void memcpy_float(int width, float* dst, float* src)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
dst[y * width + x] = src[y * width + x];
}
__global__ void extractCorrelation_kernel(ContextDepthSR *context, uint8_t *LR_GrayBorder, float *LR_DepthBorder, float *GD_correlation)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int pixel = y + x * blockDim.y * gridDim.y;
int width = context->width;
int height = context->height;
int scale_w = context->scale_w;
int scale_h = context->scale_h;
int s_width = width / scale_w;
int s_height = height / scale_h;
int win_size = 3; //3x3 windows
float sumPixG = 0.0, sumPixG2 = 0.0;
float sumPixD = 0.0, sumPixD2 = 0.0;
float sumPixGD = 0.0;
for (int m = -win_size / 2; m <= win_size / 2; m++){
int i = x + 1 + m;
i = (i > 0 ? (i < s_height + 2 ? i : s_height + 1) : 0); //make sure the index (x+n, y+m) is located in the image.
for (int n = -win_size / 2; n <= win_size / 2; n++){
int j = y + 1 + n;
j = (j > 0 ? (j < s_width + 2 ? j : s_width + 1) : 0);
uint8_t a1 = LR_GrayBorder[i * (s_width + 2) + j];
float a2 = LR_DepthBorder[i * (s_width + 2) + j];
sumPixG += a1;
sumPixD += a2;
sumPixG2 += (a1 * a1);
sumPixD2 += (a2 * a2);
sumPixGD += (a1 * a2);
}//end for n
}//end for m
float meanPixG = sumPixG / (win_size * win_size);//EX
float meanPixD = sumPixD / (win_size * win_size);
float meanPixG2 = sumPixG2 / (win_size * win_size); //E(X^2)
float meanPixD2 = sumPixD2 / (win_size * win_size);
float meanPixGD = sumPixGD / (win_size * win_size);
float CA = meanPixGD - meanPixG * meanPixD;
float variancePixG = meanPixG2 - meanPixG * meanPixG;
float variancePixD = meanPixD2 - meanPixD * meanPixD;
CA /= sqrt(variancePixG * variancePixD);
GD_correlation[pixel] = CA;
}
ContextDepthSR* create_context(ContextDepthSR* context)
{
ContextDepthSR *cu_context;
cudaMallocManaged((void**)&cu_context, sizeof(ContextDepthSR));
cudaMemcpy(&cu_context->width, &context->width, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&cu_context->height, &context->height, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&cu_context->scale_w, &context->scale_w, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&cu_context->scale_h, &context->scale_h, sizeof(int), cudaMemcpyHostToDevice);
return cu_context;
}
void extractCorrelation(ContextDepthSR *context, uint8_t *LR_Gray, float *LR_Depth, float *GD_correlation)
{
int width = context->width;
int height = context->height;
int scale_w = context->scale_w;
int scale_h = context->scale_h;
int s_width = width / scale_w;
int s_height = height / scale_h;
//get the padding border image
int border = 2;
size_t size = (s_width + border) * (s_height + border);
uint8_t *LR_GrayBorder = (uint8_t*)calloc(size, sizeof(uint8_t));
float *LR_DepthBorder = (float*)calloc(size, sizeof(float));
for(int i = 1; i < s_height + 1; i++){
for(int j = 1; j < s_width + 1; j++){
int pixel_b = i * (s_width + border) + j;
int pixel = (i-1) * s_width + (j-1);
LR_GrayBorder[pixel_b] = LR_Gray[pixel];
LR_DepthBorder[pixel_b] = LR_Depth[pixel];
}
}//CENTER
for(int i = 1; i < s_width + 1; i++){
LR_GrayBorder[i] = LR_Gray[i - 1];
LR_DepthBorder[i] = LR_Depth[i - 1]; //TOP
int pixel_b = (s_width + border) * (s_height + 1) + i;
int pixel = s_width * (s_height - 1) + i - 1;
LR_GrayBorder[pixel_b] = LR_Gray[pixel];
LR_DepthBorder[pixel_b] = LR_Depth[pixel];//BOTTOM
}
for(int j = 1; j < s_height + 1; j++){
LR_GrayBorder[j * (s_width + border)] = LR_GrayBorder[j * (s_width + border) + 1];
LR_DepthBorder[j * (s_width + border)] = LR_DepthBorder[j * (s_width + border) + 1];//LEFT
LR_GrayBorder[(j + 1) * (s_width + border) - 1] = LR_GrayBorder[(j + 1) * (s_width + border) - 2];
LR_DepthBorder[(j + 1) * (s_width + border) - 1] = LR_DepthBorder[(j + 1) * (s_width + border) - 2];//RIGHT
}
//the four corner value
int pixel_corner;
LR_GrayBorder[0] = (LR_GrayBorder[1] + LR_GrayBorder[(s_width + border)]) / 2;
LR_DepthBorder[0] = (LR_DepthBorder[1] + LR_DepthBorder[(s_width + border)]) / 2;
pixel_corner = (s_width + border) - 1;
LR_GrayBorder[pixel_corner] = (LR_GrayBorder[(pixel_corner - 1)] + LR_GrayBorder[(pixel_corner + s_width + border)]) / 2;
LR_DepthBorder[pixel_corner] = (LR_DepthBorder[(pixel_corner - 1)] + LR_DepthBorder[(pixel_corner + s_width + border)]) / 2;
pixel_corner = ((s_height + border) - 1) * (s_width + border);
LR_GrayBorder[pixel_corner] = (LR_GrayBorder[(pixel_corner + 1)] + LR_GrayBorder[(pixel_corner - s_width - border)]) / 2;
LR_DepthBorder[pixel_corner] = (LR_DepthBorder[(pixel_corner + 1)] + LR_DepthBorder[(pixel_corner - s_width - border)]) / 2;
pixel_corner = (s_height + border) * (s_width + border) - 1;
LR_GrayBorder[pixel_corner] = (LR_GrayBorder[(pixel_corner - 1)] + LR_GrayBorder[(pixel_corner - s_width - border)]) / 2;
LR_DepthBorder[pixel_corner] = (LR_DepthBorder[(pixel_corner - 1)] + LR_DepthBorder[(pixel_corner - s_width - border)]) / 2;
dim3 blocks(s_height / THREADS_PER_BLOCK, s_width / ThreadsPerBlock);
dim3 threads(THREADS_PER_BLOCK, ThreadsPerBlock);
uint8_t* cu_LR_GrayBorder;
float* cu_LR_DepthBorder;
float* cu_GD_correlation;
cudaMalloc((void**)&cu_LR_GrayBorder, (s_height + border) * (s_width + border) * sizeof(uint8_t));
cudaMalloc((void**)&cu_LR_DepthBorder, (s_height + border) * (s_width + border) * sizeof(float));
cudaMemcpy(cu_LR_GrayBorder, LR_GrayBorder, (s_height + border) * (s_width + border) * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(cu_LR_DepthBorder, LR_DepthBorder, (s_height + border) * (s_width + border) * sizeof(float), cudaMemcpyHostToDevice);
cudaMallocManaged((void**)&cu_GD_correlation, s_width * s_height * sizeof(float));
extractCorrelation_kernel<<<blocks, threads>>>(context, cu_LR_GrayBorder, cu_LR_DepthBorder, cu_GD_correlation);
//memcpy_float<<<blocks, threads>>>(s_width, gray_variance, cu_gray_variance);
//memcpy_float<<<blocks, threads>>>(s_width, depth_variance, cu_depth_variance);
cudaMemcpy(GD_correlation, cu_GD_correlation, s_width * s_height * sizeof(float), cudaMemcpyDeviceToHost);
//memcpy(GD_correlation, cu_GD_correlation, s_width * s_height * sizeof(float));
cudaFree(cu_LR_GrayBorder);
cudaFree(cu_LR_DepthBorder);
cudaFree(cu_GD_correlation);
} |
c382e1b42577441a93b7bc2636b4bf81f8fcd001.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/cuda_utils.h"
namespace anakin {
namespace saber {
template<typename Dtype>
__global__ void trans_map2in(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[tid] = input[map[seq] * lastdim + tid % lastdim];
// printf("in %d = %f\n",tid,output[tid]);
}
}
template<typename Dtype>
__global__ void trans_map2out(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[map[seq] * lastdim + tid % lastdim] = input[tid];
// printf("out %d = %f\n",map[seq]*lastdim + tid % lastdim,output[map[seq]*lastdim + tid % lastdim]);
}
}
template<typename Dtype>
void trans_map2out_cfunc(const Dtype* input, Dtype* output, int word_size, int seq_sum,
hipStream_t stream,
int* dev_map_vec) {
int count = seq_sum * word_size;
int block_dim = count;
int grid_dim = 1;
if (count > 1024) {
block_dim = 256;
grid_dim = (count + block_dim - 1) / block_dim;
}
trans_map2out << < grid_dim, block_dim, 0, stream >> > (output, input, dev_map_vec,
count, word_size);
}
template<typename Dtype>
void trans_map2in_cfunc(const Dtype* input, Dtype* output, int hidden_size, int seq_sum,
hipStream_t stream,
int* dev_map_vec) {
int count = seq_sum * hidden_size;
int block_dim = count;
int grid_dim = 1;
if (count > 1024) {
block_dim = 256;
grid_dim = (count + block_dim - 1) / block_dim;
}
trans_map2in << < grid_dim, block_dim, 0, stream >> > (output, input, dev_map_vec,
count, hidden_size);
}
template void trans_map2in_cfunc<float>(const float* input, float* output, int hidden_size, int seq_sum,
hipStream_t stream,
int* dev_map_vec);
template void trans_map2out_cfunc<float>(const float* input, float* output, int word_size, int seq_sum,
hipStream_t stream,
int* dev_map_vec);
template <typename Dtype>
__global__ void sub_tensor(const Dtype* in, Dtype* out, int h, int w, int stride_w) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= h * w) {
return;
}
int h_id = tid / w;
int w_id = tid % w;
out[w_id * h + h_id] = in[h_id * stride_w + w_id];
}
template <typename Dtype>
void get_sub_tensor(const Dtype* in, Dtype* out, int h, int w, int stride_w, hipStream_t stream) {
int num_threads = h * w;
hipLaunchKernelGGL(( sub_tensor), dim3(CUDA_GET_BLOCKS(num_threads)), dim3(CUDA_NUM_THREADS), 0, stream, in, out, h, w, stride_w);
}
template void get_sub_tensor(const float* in, float* out, int h, int w, int stride_w, hipStream_t stream);
}
} | c382e1b42577441a93b7bc2636b4bf81f8fcd001.cu | #include "saber/funcs/impl/cuda/cuda_utils.h"
namespace anakin {
namespace saber {
template<typename Dtype>
__global__ void trans_map2in(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[tid] = input[map[seq] * lastdim + tid % lastdim];
// printf("in %d = %f\n",tid,output[tid]);
}
}
template<typename Dtype>
__global__ void trans_map2out(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[map[seq] * lastdim + tid % lastdim] = input[tid];
// printf("out %d = %f\n",map[seq]*lastdim + tid % lastdim,output[map[seq]*lastdim + tid % lastdim]);
}
}
template<typename Dtype>
void trans_map2out_cfunc(const Dtype* input, Dtype* output, int word_size, int seq_sum,
cudaStream_t stream,
int* dev_map_vec) {
int count = seq_sum * word_size;
int block_dim = count;
int grid_dim = 1;
if (count > 1024) {
block_dim = 256;
grid_dim = (count + block_dim - 1) / block_dim;
}
trans_map2out << < grid_dim, block_dim, 0, stream >> > (output, input, dev_map_vec,
count, word_size);
}
template<typename Dtype>
void trans_map2in_cfunc(const Dtype* input, Dtype* output, int hidden_size, int seq_sum,
cudaStream_t stream,
int* dev_map_vec) {
int count = seq_sum * hidden_size;
int block_dim = count;
int grid_dim = 1;
if (count > 1024) {
block_dim = 256;
grid_dim = (count + block_dim - 1) / block_dim;
}
trans_map2in << < grid_dim, block_dim, 0, stream >> > (output, input, dev_map_vec,
count, hidden_size);
}
template void trans_map2in_cfunc<float>(const float* input, float* output, int hidden_size, int seq_sum,
cudaStream_t stream,
int* dev_map_vec);
template void trans_map2out_cfunc<float>(const float* input, float* output, int word_size, int seq_sum,
cudaStream_t stream,
int* dev_map_vec);
template <typename Dtype>
__global__ void sub_tensor(const Dtype* in, Dtype* out, int h, int w, int stride_w) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= h * w) {
return;
}
int h_id = tid / w;
int w_id = tid % w;
out[w_id * h + h_id] = in[h_id * stride_w + w_id];
}
template <typename Dtype>
void get_sub_tensor(const Dtype* in, Dtype* out, int h, int w, int stride_w, cudaStream_t stream) {
int num_threads = h * w;
sub_tensor<<<CUDA_GET_BLOCKS(num_threads), CUDA_NUM_THREADS, 0, stream>>>(in, out, h, w, stride_w);
}
template void get_sub_tensor(const float* in, float* out, int h, int w, int stride_w, cudaStream_t stream);
}
} |
a27731edc95a49449945238b7341a75b63204cfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "DalitzPlotPdf.hh"
#include <complex>
using std::complex;
const int resonanceOffset_DP = 4; // Offset of the first resonance into the parameter index array
// Offset is number of parameters, constant index, number of resonances (not calculable
// from nP because we don't know what the efficiency might need), and cache index. Efficiency
// parameters are after the resonance information.
// The function of this array is to hold all the cached waves; specific
// waves are recalculated when the corresponding resonance mass or width
// changes. Note that in a multithread environment each thread needs its
// own cache, hence the '10'. Ten threads should be enough for anyone!
MEM_DEVICE devcomplex<fptype>* cResonances[10];
EXEC_TARGET inline int parIndexFromResIndex_DP (int resIndex) {
return resonanceOffset_DP + resIndex*resonanceSize;
}
EXEC_TARGET devcomplex<fptype> device_DalitzPlot_calcIntegrals (fptype m12, fptype m13, int res_i, int res_j, fptype* p, unsigned int* indices) {
// Calculates BW_i(m12, m13) * BW_j^*(m12, m13).
// This calculation is in a separate function so
// it can be cached. Note that this function expects
// to be called on a normalisation grid, not on
// observed points, that's why it doesn't use
// cResonances. No need to cache the values at individual
// grid points - we only care about totals.
fptype motherMass = functorConstants[indices[1] + 0];
fptype daug1Mass = functorConstants[indices[1] + 1];
fptype daug2Mass = functorConstants[indices[1] + 2];
fptype daug3Mass = functorConstants[indices[1] + 3];
devcomplex<fptype> ret;
if (!inDalitz(m12, m13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return ret;
fptype m23 = motherMass*motherMass + daug1Mass*daug1Mass + daug2Mass*daug2Mass + daug3Mass*daug3Mass - m12 - m13;
int parameter_i = parIndexFromResIndex_DP(res_i);
unsigned int functn_i = indices[parameter_i+2];
unsigned int params_i = indices[parameter_i+3];
ret = getResonanceAmplitude(m12, m13, m23, functn_i, params_i);
int parameter_j = parIndexFromResIndex_DP(res_j);
unsigned int functn_j = indices[parameter_j+2];
unsigned int params_j = indices[parameter_j+3];
ret *= conj(getResonanceAmplitude(m12, m13, m23, functn_j, params_j));
return ret;
}
EXEC_TARGET fptype device_DalitzPlot (fptype* evt, fptype* p, unsigned int* indices) {
fptype motherMass = functorConstants[indices[1] + 0];
fptype daug1Mass = functorConstants[indices[1] + 1];
fptype daug2Mass = functorConstants[indices[1] + 2];
fptype daug3Mass = functorConstants[indices[1] + 3];
fptype m12 = evt[indices[2 + indices[0]]];
fptype m13 = evt[indices[3 + indices[0]]];
if (!inDalitz(m12, m13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return 0;
int evtNum = (int) FLOOR(0.5 + evt[indices[4 + indices[0]]]);
devcomplex<fptype> totalAmp(0, 0);
unsigned int numResonances = indices[2];
unsigned int cacheToUse = indices[3];
for (int i = 0; i < numResonances; ++i) {
int paramIndex = parIndexFromResIndex_DP(i);
fptype amp_real = p[indices[paramIndex+0]];
fptype amp_imag = p[indices[paramIndex+1]];
devcomplex<fptype> matrixelement((cResonances[cacheToUse][evtNum*numResonances + i]).real,
(cResonances[cacheToUse][evtNum*numResonances + i]).imag);
matrixelement.multiply(amp_real, amp_imag);
totalAmp += matrixelement;
}
fptype ret = norm2(totalAmp);
int effFunctionIdx = parIndexFromResIndex_DP(numResonances);
fptype eff = callFunction(evt, indices[effFunctionIdx], indices[effFunctionIdx + 1]);
ret *= eff;
//printf("DalitzPlot evt %i zero: %i %i %f (%f, %f).\n", evtNum, numResonances, effFunctionIdx, eff, totalAmp.real, totalAmp.imag);
return ret;
}
MEM_DEVICE device_function_ptr ptr_to_DalitzPlot = device_DalitzPlot;
__host__ DalitzPlotPdf::DalitzPlotPdf (std::string n,
Variable* m12,
Variable* m13,
Variable* eventNumber,
DecayInfo* decay,
GooPdf* efficiency)
: GooPdf(0, n)
, decayInfo(decay)
, _m12(m12)
, _m13(m13)
, dalitzNormRange(0)
, cachedWaves(0)
, integrals(0)
, forceRedoIntegrals(true)
, totalEventSize(3) // Default 3 = m12, m13, evtNum
, cacheToUse(0)
, integrators(0)
, calculators(0)
{
registerObservable(_m12);
registerObservable(_m13);
registerObservable(eventNumber);
fptype decayConstants[5];
std::vector<unsigned int> pindices;
pindices.push_back(registerConstants(5));
decayConstants[0] = decayInfo->motherMass;
decayConstants[1] = decayInfo->daug1Mass;
decayConstants[2] = decayInfo->daug2Mass;
decayConstants[3] = decayInfo->daug3Mass;
decayConstants[4] = decayInfo->meson_radius;
MEMCPY_TO_SYMBOL(functorConstants, decayConstants, 5*sizeof(fptype), cIndex*sizeof(fptype), hipMemcpyHostToDevice);
pindices.push_back(decayInfo->resonances.size());
static int cacheCount = 0;
cacheToUse = cacheCount++;
pindices.push_back(cacheToUse);
for (std::vector<ResonancePdf*>::iterator res = decayInfo->resonances.begin(); res != decayInfo->resonances.end(); ++res) {
pindices.push_back(registerParameter((*res)->amp_real));
pindices.push_back(registerParameter((*res)->amp_imag));
pindices.push_back((*res)->getFunctionIndex());
pindices.push_back((*res)->getParameterIndex());
(*res)->setConstantIndex(cIndex);
components.push_back(*res);
}
pindices.push_back(efficiency->getFunctionIndex());
pindices.push_back(efficiency->getParameterIndex());
components.push_back(efficiency);
GET_FUNCTION_ADDR(ptr_to_DalitzPlot);
initialise(pindices);
redoIntegral = new bool[decayInfo->resonances.size()];
cachedMasses = new fptype[decayInfo->resonances.size()];
cachedWidths = new fptype[decayInfo->resonances.size()];
integrals = new devcomplex<fptype>**[decayInfo->resonances.size()];
integrators = new SpecialResonanceIntegrator**[decayInfo->resonances.size()];
calculators = new SpecialResonanceCalculator*[decayInfo->resonances.size()];
for (int i = 0; i < decayInfo->resonances.size(); ++i) {
redoIntegral[i] = true;
cachedMasses[i] = -1;
cachedWidths[i] = -1;
integrators[i] = new SpecialResonanceIntegrator*[decayInfo->resonances.size()];
calculators[i] = new SpecialResonanceCalculator(parameters, i);
integrals[i] = new devcomplex<fptype>*[decayInfo->resonances.size()];
for (int j = 0; j < decayInfo->resonances.size(); ++j) {
integrals[i][j] = new devcomplex<fptype>(0, 0);
integrators[i][j] = new SpecialResonanceIntegrator(parameters, i, j);
}
}
addSpecialMask(PdfBase::ForceSeparateNorm);
}
__host__ void DalitzPlotPdf::setDataSize (unsigned int dataSize, unsigned int evtSize) {
// Default 3 is m12, m13, evtNum
totalEventSize = evtSize;
assert(totalEventSize >= 3);
if (cachedWaves) delete cachedWaves;
numEntries = dataSize;
cachedWaves = new DEVICE_VECTOR<devcomplex<fptype> >(dataSize*decayInfo->resonances.size());
void* dummy = thrust::raw_pointer_cast(cachedWaves->data());
MEMCPY_TO_SYMBOL(cResonances, &dummy, sizeof(devcomplex<fptype>*), cacheToUse*sizeof(devcomplex<fptype>*), hipMemcpyHostToDevice);
setForceIntegrals();
}
__host__ fptype DalitzPlotPdf::normalise () const {
recursiveSetNormalisation(1); // Not going to normalise efficiency,
// so set normalisation factor to 1 so it doesn't get multiplied by zero.
// Copy at this time to ensure that the SpecialResonanceCalculators, which need the efficiency,
// don't get zeroes through multiplying by the normFactor.
MEMCPY_TO_SYMBOL(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice);
int totalBins = _m12->numbins * _m13->numbins;
if (!dalitzNormRange) {
gooMalloc((void**) &dalitzNormRange, 6*sizeof(fptype));
fptype* host_norms = new fptype[6];
host_norms[0] = _m12->lowerlimit;
host_norms[1] = _m12->upperlimit;
host_norms[2] = _m12->numbins;
host_norms[3] = _m13->lowerlimit;
host_norms[4] = _m13->upperlimit;
host_norms[5] = _m13->numbins;
MEMCPY(dalitzNormRange, host_norms, 6*sizeof(fptype), hipMemcpyHostToDevice);
delete[] host_norms;
}
for (unsigned int i = 0; i < decayInfo->resonances.size(); ++i) {
redoIntegral[i] = forceRedoIntegrals;
if (!(decayInfo->resonances[i]->parametersChanged())) continue;
redoIntegral[i] = true;
decayInfo->resonances[i]->storeParameters();
}
forceRedoIntegrals = false;
// Only do this bit if masses or widths have changed.
thrust::constant_iterator<fptype*> arrayAddress(dalitzNormRange);
thrust::counting_iterator<int> binIndex(0);
// NB, SpecialResonanceCalculator assumes that fit is unbinned!
// And it needs to know the total event size, not just observables
// for this particular PDF component.
thrust::constant_iterator<fptype*> dataArray(dev_event_array);
thrust::constant_iterator<int> eventSize(totalEventSize);
thrust::counting_iterator<int> eventIndex(0);
for (int i = 0; i < decayInfo->resonances.size(); ++i) {
if (redoIntegral[i]) {
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, dataArray, eventSize)),
thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)),
strided_range<DEVICE_VECTOR<devcomplex<fptype> >::iterator>(cachedWaves->begin() + i,
cachedWaves->end(),
decayInfo->resonances.size()).begin(),
*(calculators[i]));
}
// Possibly this can be done more efficiently by exploiting symmetry?
for (int j = 0; j < decayInfo->resonances.size(); ++j) {
if ((!redoIntegral[i]) && (!redoIntegral[j])) continue;
devcomplex<fptype> dummy(0, 0);
thrust::plus<devcomplex<fptype> > complexSum;
(*(integrals[i][j])) = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex, arrayAddress)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + totalBins, arrayAddress)),
*(integrators[i][j]),
dummy,
complexSum);
}
}
// End of time-consuming integrals.
complex<fptype> sumIntegral(0, 0);
for (unsigned int i = 0; i < decayInfo->resonances.size(); ++i) {
int param_i = parameters + resonanceOffset_DP + resonanceSize*i;
complex<fptype> amplitude_i(host_params[host_indices[param_i]], host_params[host_indices[param_i + 1]]);
for (unsigned int j = 0; j < decayInfo->resonances.size(); ++j) {
int param_j = parameters + resonanceOffset_DP + resonanceSize*j;
complex<fptype> amplitude_j(host_params[host_indices[param_j]], -host_params[host_indices[param_j + 1]]);
// Notice complex conjugation
sumIntegral += (amplitude_i * amplitude_j * complex<fptype>((*(integrals[i][j])).real, (*(integrals[i][j])).imag));
}
}
fptype ret = real(sumIntegral); // That complex number is a square, so it's fully real
double binSizeFactor = 1;
binSizeFactor *= ((_m12->upperlimit - _m12->lowerlimit) / _m12->numbins);
binSizeFactor *= ((_m13->upperlimit - _m13->lowerlimit) / _m13->numbins);
ret *= binSizeFactor;
host_normalisation[parameters] = 1.0/ret;
return (fptype) ret;
}
SpecialResonanceIntegrator::SpecialResonanceIntegrator (int pIdx, unsigned int ri, unsigned int rj)
: resonance_i(ri)
, resonance_j(rj)
, parameters(pIdx)
{}
EXEC_TARGET devcomplex<fptype> SpecialResonanceIntegrator::operator () (thrust::tuple<int, fptype*> t) const {
// Bin index, base address [lower, upper, numbins]
// Notice that this is basically MetricTaker::operator (binned) with the special-case knowledge
// that event size is two, and that the function to call is dev_DalitzPlot_calcIntegrals.
int globalBinNumber = thrust::get<0>(t);
fptype lowerBoundM12 = thrust::get<1>(t)[0];
fptype upperBoundM12 = thrust::get<1>(t)[1];
int numBinsM12 = (int) FLOOR(thrust::get<1>(t)[2] + 0.5);
int binNumberM12 = globalBinNumber % numBinsM12;
fptype binCenterM12 = upperBoundM12 - lowerBoundM12;
binCenterM12 /= numBinsM12;
binCenterM12 *= (binNumberM12 + 0.5);
binCenterM12 += lowerBoundM12;
globalBinNumber /= numBinsM12;
fptype lowerBoundM13 = thrust::get<1>(t)[3];
fptype upperBoundM13 = thrust::get<1>(t)[4];
int numBinsM13 = (int) FLOOR(thrust::get<1>(t)[5] + 0.5);
fptype binCenterM13 = upperBoundM13 - lowerBoundM13;
binCenterM13 /= numBinsM13;
binCenterM13 *= (globalBinNumber + 0.5);
binCenterM13 += lowerBoundM13;
unsigned int* indices = paramIndices + parameters;
devcomplex<fptype> ret = device_DalitzPlot_calcIntegrals(binCenterM12, binCenterM13, resonance_i, resonance_j, hipArray, indices);
fptype fakeEvt[10]; // Need room for many observables in case m12 or m13 were assigned a high index in an event-weighted fit.
fakeEvt[indices[indices[0] + 2 + 0]] = binCenterM12;
fakeEvt[indices[indices[0] + 2 + 1]] = binCenterM13;
unsigned int numResonances = indices[2];
int effFunctionIdx = parIndexFromResIndex_DP(numResonances);
fptype eff = callFunction(fakeEvt, indices[effFunctionIdx], indices[effFunctionIdx + 1]);
// Multiplication by eff, not sqrt(eff), is correct:
// These complex numbers will not be squared when they
// go into the integrals. They've been squared already,
// as it were.
ret *= eff;
return ret;
}
SpecialResonanceCalculator::SpecialResonanceCalculator (int pIdx, unsigned int res_idx)
: resonance_i(res_idx)
, parameters(pIdx)
{}
EXEC_TARGET devcomplex<fptype> SpecialResonanceCalculator::operator () (thrust::tuple<int, fptype*, int> t) const {
// Calculates the BW values for a specific resonance.
devcomplex<fptype> ret;
int evtNum = thrust::get<0>(t);
fptype* evt = thrust::get<1>(t) + (evtNum * thrust::get<2>(t));
unsigned int* indices = paramIndices + parameters; // Jump to DALITZPLOT position within parameters array
fptype m12 = evt[indices[2 + indices[0]]];
fptype m13 = evt[indices[3 + indices[0]]];
fptype motherMass = functorConstants[indices[1] + 0];
fptype daug1Mass = functorConstants[indices[1] + 1];
fptype daug2Mass = functorConstants[indices[1] + 2];
fptype daug3Mass = functorConstants[indices[1] + 3];
if (!inDalitz(m12, m13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return ret;
fptype m23 = motherMass*motherMass + daug1Mass*daug1Mass + daug2Mass*daug2Mass + daug3Mass*daug3Mass - m12 - m13;
int parameter_i = parIndexFromResIndex_DP(resonance_i); // Find position of this resonance relative to DALITZPLOT start
unsigned int functn_i = indices[parameter_i+2];
unsigned int params_i = indices[parameter_i+3];
ret = getResonanceAmplitude(m12, m13, m23, functn_i, params_i);
//printf("Amplitude %f %f %f (%f, %f)\n ", m12, m13, m23, ret.real, ret.imag);
return ret;
}
| a27731edc95a49449945238b7341a75b63204cfa.cu | #include "DalitzPlotPdf.hh"
#include <complex>
using std::complex;
const int resonanceOffset_DP = 4; // Offset of the first resonance into the parameter index array
// Offset is number of parameters, constant index, number of resonances (not calculable
// from nP because we don't know what the efficiency might need), and cache index. Efficiency
// parameters are after the resonance information.
// The function of this array is to hold all the cached waves; specific
// waves are recalculated when the corresponding resonance mass or width
// changes. Note that in a multithread environment each thread needs its
// own cache, hence the '10'. Ten threads should be enough for anyone!
MEM_DEVICE devcomplex<fptype>* cResonances[10];
EXEC_TARGET inline int parIndexFromResIndex_DP (int resIndex) {
return resonanceOffset_DP + resIndex*resonanceSize;
}
EXEC_TARGET devcomplex<fptype> device_DalitzPlot_calcIntegrals (fptype m12, fptype m13, int res_i, int res_j, fptype* p, unsigned int* indices) {
// Calculates BW_i(m12, m13) * BW_j^*(m12, m13).
// This calculation is in a separate function so
// it can be cached. Note that this function expects
// to be called on a normalisation grid, not on
// observed points, that's why it doesn't use
// cResonances. No need to cache the values at individual
// grid points - we only care about totals.
fptype motherMass = functorConstants[indices[1] + 0];
fptype daug1Mass = functorConstants[indices[1] + 1];
fptype daug2Mass = functorConstants[indices[1] + 2];
fptype daug3Mass = functorConstants[indices[1] + 3];
devcomplex<fptype> ret;
if (!inDalitz(m12, m13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return ret;
fptype m23 = motherMass*motherMass + daug1Mass*daug1Mass + daug2Mass*daug2Mass + daug3Mass*daug3Mass - m12 - m13;
int parameter_i = parIndexFromResIndex_DP(res_i);
unsigned int functn_i = indices[parameter_i+2];
unsigned int params_i = indices[parameter_i+3];
ret = getResonanceAmplitude(m12, m13, m23, functn_i, params_i);
int parameter_j = parIndexFromResIndex_DP(res_j);
unsigned int functn_j = indices[parameter_j+2];
unsigned int params_j = indices[parameter_j+3];
ret *= conj(getResonanceAmplitude(m12, m13, m23, functn_j, params_j));
return ret;
}
EXEC_TARGET fptype device_DalitzPlot (fptype* evt, fptype* p, unsigned int* indices) {
fptype motherMass = functorConstants[indices[1] + 0];
fptype daug1Mass = functorConstants[indices[1] + 1];
fptype daug2Mass = functorConstants[indices[1] + 2];
fptype daug3Mass = functorConstants[indices[1] + 3];
fptype m12 = evt[indices[2 + indices[0]]];
fptype m13 = evt[indices[3 + indices[0]]];
if (!inDalitz(m12, m13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return 0;
int evtNum = (int) FLOOR(0.5 + evt[indices[4 + indices[0]]]);
devcomplex<fptype> totalAmp(0, 0);
unsigned int numResonances = indices[2];
unsigned int cacheToUse = indices[3];
for (int i = 0; i < numResonances; ++i) {
int paramIndex = parIndexFromResIndex_DP(i);
fptype amp_real = p[indices[paramIndex+0]];
fptype amp_imag = p[indices[paramIndex+1]];
devcomplex<fptype> matrixelement((cResonances[cacheToUse][evtNum*numResonances + i]).real,
(cResonances[cacheToUse][evtNum*numResonances + i]).imag);
matrixelement.multiply(amp_real, amp_imag);
totalAmp += matrixelement;
}
fptype ret = norm2(totalAmp);
int effFunctionIdx = parIndexFromResIndex_DP(numResonances);
fptype eff = callFunction(evt, indices[effFunctionIdx], indices[effFunctionIdx + 1]);
ret *= eff;
//printf("DalitzPlot evt %i zero: %i %i %f (%f, %f).\n", evtNum, numResonances, effFunctionIdx, eff, totalAmp.real, totalAmp.imag);
return ret;
}
MEM_DEVICE device_function_ptr ptr_to_DalitzPlot = device_DalitzPlot;
__host__ DalitzPlotPdf::DalitzPlotPdf (std::string n,
Variable* m12,
Variable* m13,
Variable* eventNumber,
DecayInfo* decay,
GooPdf* efficiency)
: GooPdf(0, n)
, decayInfo(decay)
, _m12(m12)
, _m13(m13)
, dalitzNormRange(0)
, cachedWaves(0)
, integrals(0)
, forceRedoIntegrals(true)
, totalEventSize(3) // Default 3 = m12, m13, evtNum
, cacheToUse(0)
, integrators(0)
, calculators(0)
{
registerObservable(_m12);
registerObservable(_m13);
registerObservable(eventNumber);
fptype decayConstants[5];
std::vector<unsigned int> pindices;
pindices.push_back(registerConstants(5));
decayConstants[0] = decayInfo->motherMass;
decayConstants[1] = decayInfo->daug1Mass;
decayConstants[2] = decayInfo->daug2Mass;
decayConstants[3] = decayInfo->daug3Mass;
decayConstants[4] = decayInfo->meson_radius;
MEMCPY_TO_SYMBOL(functorConstants, decayConstants, 5*sizeof(fptype), cIndex*sizeof(fptype), cudaMemcpyHostToDevice);
pindices.push_back(decayInfo->resonances.size());
static int cacheCount = 0;
cacheToUse = cacheCount++;
pindices.push_back(cacheToUse);
for (std::vector<ResonancePdf*>::iterator res = decayInfo->resonances.begin(); res != decayInfo->resonances.end(); ++res) {
pindices.push_back(registerParameter((*res)->amp_real));
pindices.push_back(registerParameter((*res)->amp_imag));
pindices.push_back((*res)->getFunctionIndex());
pindices.push_back((*res)->getParameterIndex());
(*res)->setConstantIndex(cIndex);
components.push_back(*res);
}
pindices.push_back(efficiency->getFunctionIndex());
pindices.push_back(efficiency->getParameterIndex());
components.push_back(efficiency);
GET_FUNCTION_ADDR(ptr_to_DalitzPlot);
initialise(pindices);
redoIntegral = new bool[decayInfo->resonances.size()];
cachedMasses = new fptype[decayInfo->resonances.size()];
cachedWidths = new fptype[decayInfo->resonances.size()];
integrals = new devcomplex<fptype>**[decayInfo->resonances.size()];
integrators = new SpecialResonanceIntegrator**[decayInfo->resonances.size()];
calculators = new SpecialResonanceCalculator*[decayInfo->resonances.size()];
for (int i = 0; i < decayInfo->resonances.size(); ++i) {
redoIntegral[i] = true;
cachedMasses[i] = -1;
cachedWidths[i] = -1;
integrators[i] = new SpecialResonanceIntegrator*[decayInfo->resonances.size()];
calculators[i] = new SpecialResonanceCalculator(parameters, i);
integrals[i] = new devcomplex<fptype>*[decayInfo->resonances.size()];
for (int j = 0; j < decayInfo->resonances.size(); ++j) {
integrals[i][j] = new devcomplex<fptype>(0, 0);
integrators[i][j] = new SpecialResonanceIntegrator(parameters, i, j);
}
}
addSpecialMask(PdfBase::ForceSeparateNorm);
}
__host__ void DalitzPlotPdf::setDataSize (unsigned int dataSize, unsigned int evtSize) {
// Default 3 is m12, m13, evtNum
totalEventSize = evtSize;
assert(totalEventSize >= 3);
if (cachedWaves) delete cachedWaves;
numEntries = dataSize;
cachedWaves = new DEVICE_VECTOR<devcomplex<fptype> >(dataSize*decayInfo->resonances.size());
void* dummy = thrust::raw_pointer_cast(cachedWaves->data());
MEMCPY_TO_SYMBOL(cResonances, &dummy, sizeof(devcomplex<fptype>*), cacheToUse*sizeof(devcomplex<fptype>*), cudaMemcpyHostToDevice);
setForceIntegrals();
}
__host__ fptype DalitzPlotPdf::normalise () const {
recursiveSetNormalisation(1); // Not going to normalise efficiency,
// so set normalisation factor to 1 so it doesn't get multiplied by zero.
// Copy at this time to ensure that the SpecialResonanceCalculators, which need the efficiency,
// don't get zeroes through multiplying by the normFactor.
MEMCPY_TO_SYMBOL(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice);
int totalBins = _m12->numbins * _m13->numbins;
if (!dalitzNormRange) {
gooMalloc((void**) &dalitzNormRange, 6*sizeof(fptype));
fptype* host_norms = new fptype[6];
host_norms[0] = _m12->lowerlimit;
host_norms[1] = _m12->upperlimit;
host_norms[2] = _m12->numbins;
host_norms[3] = _m13->lowerlimit;
host_norms[4] = _m13->upperlimit;
host_norms[5] = _m13->numbins;
MEMCPY(dalitzNormRange, host_norms, 6*sizeof(fptype), cudaMemcpyHostToDevice);
delete[] host_norms;
}
for (unsigned int i = 0; i < decayInfo->resonances.size(); ++i) {
redoIntegral[i] = forceRedoIntegrals;
if (!(decayInfo->resonances[i]->parametersChanged())) continue;
redoIntegral[i] = true;
decayInfo->resonances[i]->storeParameters();
}
forceRedoIntegrals = false;
// Only do this bit if masses or widths have changed.
thrust::constant_iterator<fptype*> arrayAddress(dalitzNormRange);
thrust::counting_iterator<int> binIndex(0);
// NB, SpecialResonanceCalculator assumes that fit is unbinned!
// And it needs to know the total event size, not just observables
// for this particular PDF component.
thrust::constant_iterator<fptype*> dataArray(dev_event_array);
thrust::constant_iterator<int> eventSize(totalEventSize);
thrust::counting_iterator<int> eventIndex(0);
for (int i = 0; i < decayInfo->resonances.size(); ++i) {
if (redoIntegral[i]) {
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, dataArray, eventSize)),
thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)),
strided_range<DEVICE_VECTOR<devcomplex<fptype> >::iterator>(cachedWaves->begin() + i,
cachedWaves->end(),
decayInfo->resonances.size()).begin(),
*(calculators[i]));
}
// Possibly this can be done more efficiently by exploiting symmetry?
for (int j = 0; j < decayInfo->resonances.size(); ++j) {
if ((!redoIntegral[i]) && (!redoIntegral[j])) continue;
devcomplex<fptype> dummy(0, 0);
thrust::plus<devcomplex<fptype> > complexSum;
(*(integrals[i][j])) = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex, arrayAddress)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + totalBins, arrayAddress)),
*(integrators[i][j]),
dummy,
complexSum);
}
}
// End of time-consuming integrals.
complex<fptype> sumIntegral(0, 0);
for (unsigned int i = 0; i < decayInfo->resonances.size(); ++i) {
int param_i = parameters + resonanceOffset_DP + resonanceSize*i;
complex<fptype> amplitude_i(host_params[host_indices[param_i]], host_params[host_indices[param_i + 1]]);
for (unsigned int j = 0; j < decayInfo->resonances.size(); ++j) {
int param_j = parameters + resonanceOffset_DP + resonanceSize*j;
complex<fptype> amplitude_j(host_params[host_indices[param_j]], -host_params[host_indices[param_j + 1]]);
// Notice complex conjugation
sumIntegral += (amplitude_i * amplitude_j * complex<fptype>((*(integrals[i][j])).real, (*(integrals[i][j])).imag));
}
}
fptype ret = real(sumIntegral); // That complex number is a square, so it's fully real
double binSizeFactor = 1;
binSizeFactor *= ((_m12->upperlimit - _m12->lowerlimit) / _m12->numbins);
binSizeFactor *= ((_m13->upperlimit - _m13->lowerlimit) / _m13->numbins);
ret *= binSizeFactor;
host_normalisation[parameters] = 1.0/ret;
return (fptype) ret;
}
SpecialResonanceIntegrator::SpecialResonanceIntegrator (int pIdx, unsigned int ri, unsigned int rj)
: resonance_i(ri)
, resonance_j(rj)
, parameters(pIdx)
{}
EXEC_TARGET devcomplex<fptype> SpecialResonanceIntegrator::operator () (thrust::tuple<int, fptype*> t) const {
// Bin index, base address [lower, upper, numbins]
// Notice that this is basically MetricTaker::operator (binned) with the special-case knowledge
// that event size is two, and that the function to call is dev_DalitzPlot_calcIntegrals.
int globalBinNumber = thrust::get<0>(t);
fptype lowerBoundM12 = thrust::get<1>(t)[0];
fptype upperBoundM12 = thrust::get<1>(t)[1];
int numBinsM12 = (int) FLOOR(thrust::get<1>(t)[2] + 0.5);
int binNumberM12 = globalBinNumber % numBinsM12;
fptype binCenterM12 = upperBoundM12 - lowerBoundM12;
binCenterM12 /= numBinsM12;
binCenterM12 *= (binNumberM12 + 0.5);
binCenterM12 += lowerBoundM12;
globalBinNumber /= numBinsM12;
fptype lowerBoundM13 = thrust::get<1>(t)[3];
fptype upperBoundM13 = thrust::get<1>(t)[4];
int numBinsM13 = (int) FLOOR(thrust::get<1>(t)[5] + 0.5);
fptype binCenterM13 = upperBoundM13 - lowerBoundM13;
binCenterM13 /= numBinsM13;
binCenterM13 *= (globalBinNumber + 0.5);
binCenterM13 += lowerBoundM13;
unsigned int* indices = paramIndices + parameters;
devcomplex<fptype> ret = device_DalitzPlot_calcIntegrals(binCenterM12, binCenterM13, resonance_i, resonance_j, cudaArray, indices);
fptype fakeEvt[10]; // Need room for many observables in case m12 or m13 were assigned a high index in an event-weighted fit.
fakeEvt[indices[indices[0] + 2 + 0]] = binCenterM12;
fakeEvt[indices[indices[0] + 2 + 1]] = binCenterM13;
unsigned int numResonances = indices[2];
int effFunctionIdx = parIndexFromResIndex_DP(numResonances);
fptype eff = callFunction(fakeEvt, indices[effFunctionIdx], indices[effFunctionIdx + 1]);
// Multiplication by eff, not sqrt(eff), is correct:
// These complex numbers will not be squared when they
// go into the integrals. They've been squared already,
// as it were.
ret *= eff;
return ret;
}
SpecialResonanceCalculator::SpecialResonanceCalculator (int pIdx, unsigned int res_idx)
: resonance_i(res_idx)
, parameters(pIdx)
{}
EXEC_TARGET devcomplex<fptype> SpecialResonanceCalculator::operator () (thrust::tuple<int, fptype*, int> t) const {
// Calculates the BW values for a specific resonance.
devcomplex<fptype> ret;
int evtNum = thrust::get<0>(t);
fptype* evt = thrust::get<1>(t) + (evtNum * thrust::get<2>(t));
unsigned int* indices = paramIndices + parameters; // Jump to DALITZPLOT position within parameters array
fptype m12 = evt[indices[2 + indices[0]]];
fptype m13 = evt[indices[3 + indices[0]]];
fptype motherMass = functorConstants[indices[1] + 0];
fptype daug1Mass = functorConstants[indices[1] + 1];
fptype daug2Mass = functorConstants[indices[1] + 2];
fptype daug3Mass = functorConstants[indices[1] + 3];
if (!inDalitz(m12, m13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return ret;
fptype m23 = motherMass*motherMass + daug1Mass*daug1Mass + daug2Mass*daug2Mass + daug3Mass*daug3Mass - m12 - m13;
int parameter_i = parIndexFromResIndex_DP(resonance_i); // Find position of this resonance relative to DALITZPLOT start
unsigned int functn_i = indices[parameter_i+2];
unsigned int params_i = indices[parameter_i+3];
ret = getResonanceAmplitude(m12, m13, m23, functn_i, params_i);
//printf("Amplitude %f %f %f (%f, %f)\n ", m12, m13, m23, ret.real, ret.imag);
return ret;
}
|
9e2af108d7684eefd468a9f9aa27fb20a59a60d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* An example of using constant memory to optimize performance of a stencil
* computation by storing coefficients of the computation in a constant memory
* array (coef).
*/
#define RADIUS 4
#define BDIM 32
// constant memory
__constant__ float coef[RADIUS + 1];
// FD coeffecient
#define a0 0.00000f
#define a1 0.80000f
#define a2 -0.20000f
#define a3 0.03809f
#define a4 -0.00357f
void initialData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 100.0f;
}
}
void printData(float *in, const int size)
{
for (int i = RADIUS; i < size; i++)
{
printf("%f ", in[i]);
}
printf("\n");
}
void setup_coef_constant (void)
{
const float h_coef[] = {a0, a1, a2, a3, a4};
CHECK(hipMemcpyToSymbol( coef, h_coef, (RADIUS + 1) * sizeof(float)));
}
void cpu_stencil_1d (float *in, float *out, int isize)
{
for (int i = RADIUS; i <= isize; i++)
{
float tmp = a1 * (in[i + 1] - in[i - 1])
+ a2 * (in[i + 2] - in[i - 2])
+ a3 * (in[i + 3] - in[i - 3])
+ a4 * (in[i + 4] - in[i - 4]);
out[i] = tmp;
}
}
void checkResult(float *hostRef, float *gpuRef, const int size)
{
double epsilon = 1.0E-6;
bool match = 1;
for (int i = RADIUS; i < size; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i, hostRef[i],
gpuRef[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void stencil_1d(float *in, float *out, int N)
{
// shared memory
__shared__ float smem[BDIM + 2 * RADIUS];
// index to global memory
int idx = blockIdx.x * blockDim.x + threadIdx.x;
while (idx < N)
{
// index to shared memory for stencil calculatioin
int sidx = threadIdx.x + RADIUS;
// Read data from global memory into shared memory
smem[sidx] = in[idx];
// read halo part to shared memory
if (threadIdx.x < RADIUS)
{
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
// Synchronize (ensure all the data is available)
__syncthreads();
// Apply the stencil
float tmp = 0.0f;
#pragma unroll
for (int i = 1; i <= RADIUS; i++)
{
tmp += coef[i] * (smem[sidx + i] - smem[sidx - i]);
}
// Store the result
out[idx] = tmp;
idx += gridDim.x * blockDim.x;
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting transpose at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size
int isize = 1 << 24;
size_t nBytes = (isize + 2 * RADIUS) * sizeof(float);
printf("array size: %d ", isize);
bool iprint = 0;
// allocate host memory
float *h_in = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// allocate device memory
float *d_in, *d_out;
CHECK(hipMalloc((float**)&d_in, nBytes));
CHECK(hipMalloc((float**)&d_out, nBytes));
// initialize host array
initialData(h_in, isize + 2 * RADIUS);
// Copy to device
CHECK(hipMemcpy(d_in, h_in, nBytes, hipMemcpyHostToDevice));
// set up constant memory
setup_coef_constant();
// launch configuration
hipDeviceProp_t info;
CHECK(hipGetDeviceProperties(&info, 0));
dim3 block(BDIM, 1);
dim3 grid(info.maxGridSize[0] < isize / block.x ? info.maxGridSize[0] :
isize / block.x, 1);
printf("(grid, block) %d,%d \n ", grid.x, block.x);
// Launch stencil_1d() kernel on GPU
hipLaunchKernelGGL(( stencil_1d), dim3(grid), dim3(block), 0, 0, d_in + RADIUS, d_out + RADIUS, isize);
// Copy result back to host
CHECK(hipMemcpy(gpuRef, d_out, nBytes, hipMemcpyDeviceToHost));
// apply cpu stencil
cpu_stencil_1d(h_in, hostRef, isize);
// check results
checkResult(hostRef, gpuRef, isize);
// print out results
if(iprint)
{
printData(gpuRef, isize);
printData(hostRef, isize);
}
// Cleanup
CHECK(hipFree(d_in));
CHECK(hipFree(d_out));
free(h_in);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 9e2af108d7684eefd468a9f9aa27fb20a59a60d3.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* An example of using constant memory to optimize performance of a stencil
* computation by storing coefficients of the computation in a constant memory
* array (coef).
*/
#define RADIUS 4
#define BDIM 32
// constant memory
__constant__ float coef[RADIUS + 1];
// FD coeffecient
#define a0 0.00000f
#define a1 0.80000f
#define a2 -0.20000f
#define a3 0.03809f
#define a4 -0.00357f
void initialData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 100.0f;
}
}
void printData(float *in, const int size)
{
for (int i = RADIUS; i < size; i++)
{
printf("%f ", in[i]);
}
printf("\n");
}
void setup_coef_constant (void)
{
const float h_coef[] = {a0, a1, a2, a3, a4};
CHECK(cudaMemcpyToSymbol( coef, h_coef, (RADIUS + 1) * sizeof(float)));
}
void cpu_stencil_1d (float *in, float *out, int isize)
{
for (int i = RADIUS; i <= isize; i++)
{
float tmp = a1 * (in[i + 1] - in[i - 1])
+ a2 * (in[i + 2] - in[i - 2])
+ a3 * (in[i + 3] - in[i - 3])
+ a4 * (in[i + 4] - in[i - 4]);
out[i] = tmp;
}
}
void checkResult(float *hostRef, float *gpuRef, const int size)
{
double epsilon = 1.0E-6;
bool match = 1;
for (int i = RADIUS; i < size; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i, hostRef[i],
gpuRef[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void stencil_1d(float *in, float *out, int N)
{
// shared memory
__shared__ float smem[BDIM + 2 * RADIUS];
// index to global memory
int idx = blockIdx.x * blockDim.x + threadIdx.x;
while (idx < N)
{
// index to shared memory for stencil calculatioin
int sidx = threadIdx.x + RADIUS;
// Read data from global memory into shared memory
smem[sidx] = in[idx];
// read halo part to shared memory
if (threadIdx.x < RADIUS)
{
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
// Synchronize (ensure all the data is available)
__syncthreads();
// Apply the stencil
float tmp = 0.0f;
#pragma unroll
for (int i = 1; i <= RADIUS; i++)
{
tmp += coef[i] * (smem[sidx + i] - smem[sidx - i]);
}
// Store the result
out[idx] = tmp;
idx += gridDim.x * blockDim.x;
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting transpose at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size
int isize = 1 << 24;
size_t nBytes = (isize + 2 * RADIUS) * sizeof(float);
printf("array size: %d ", isize);
bool iprint = 0;
// allocate host memory
float *h_in = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// allocate device memory
float *d_in, *d_out;
CHECK(cudaMalloc((float**)&d_in, nBytes));
CHECK(cudaMalloc((float**)&d_out, nBytes));
// initialize host array
initialData(h_in, isize + 2 * RADIUS);
// Copy to device
CHECK(cudaMemcpy(d_in, h_in, nBytes, cudaMemcpyHostToDevice));
// set up constant memory
setup_coef_constant();
// launch configuration
cudaDeviceProp info;
CHECK(cudaGetDeviceProperties(&info, 0));
dim3 block(BDIM, 1);
dim3 grid(info.maxGridSize[0] < isize / block.x ? info.maxGridSize[0] :
isize / block.x, 1);
printf("(grid, block) %d,%d \n ", grid.x, block.x);
// Launch stencil_1d() kernel on GPU
stencil_1d<<<grid, block>>>(d_in + RADIUS, d_out + RADIUS, isize);
// Copy result back to host
CHECK(cudaMemcpy(gpuRef, d_out, nBytes, cudaMemcpyDeviceToHost));
// apply cpu stencil
cpu_stencil_1d(h_in, hostRef, isize);
// check results
checkResult(hostRef, gpuRef, isize);
// print out results
if(iprint)
{
printData(gpuRef, isize);
printData(hostRef, isize);
}
// Cleanup
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
free(h_in);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
kSoftMaxGradCLS.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kSoftMaxGradCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - (labels[(int)indices[i % height]] == i / height ? 1 : 0);
}
} | kSoftMaxGradCLS.cu | #include "includes.h"
__global__ void kSoftMaxGradCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - (labels[(int)indices[i % height]] == i / height ? 1 : 0);
}
} |
e65cb7cedf5846f578868b0f40a4f6e78bb3cd15.hip | // !!! This is a file automatically generated by hipify!!!
#include <kernels/gpu/add_bias.h>
#include <core/tensor_builder.h>
#include <global/operator_factory.h>
#include "global/fp16_operator_factory.h"
#include <backend/name.h>
#include <utils/assert.h>
#include <core/device.h>
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "kernels/gpu/gpu_kernel.h"
/////////////////////////////////////////////////
namespace ts {
namespace gpu {
template<typename T>
static __global__ void add_bias_kernel(const T* base, T* data, int size, int step, int slice,
const T* bias, int biaslen ) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
int dim = index % ( step * slice ) / (step);
data[index] = base[index] + bias[dim];
}
}
template<typename T>
static void gpu_add_bias_compute_run(const Tensor &x, const Tensor &b, int dim, Tensor &out) {
const Shape &shape = x.sizes();
//int pre_dims = 1;
int back_dims = 1;
//for (int i = 0; i < dim; i++) {
// pre_dims *= shape[i];
//}
for (int i = dim + 1; i < shape.size(); i++) {
back_dims *= shape[i];
}
const T *psrc = x.data<T>();
const T *pbias = b.data<T>();
T *pdst = out.data<T>();
// memcpy((void*)pdst, out.device(), x.count() * sizeof(T),
// (void*)psrc, x.device(), x.count() * sizeof(T));
RUN_KERNEL(add_bias_kernel<T>, CUDA_BLOCK(x.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
psrc, pdst, x.count(), back_dims, shape[dim], pbias, b.count());
//hipDeviceSynchronize();
}
void AddBias::add(const Tensor &x, const Tensor &b, int dim, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_add_bias_compute_run<TYPE>(x, b, dim, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
/////////////////////////////////////////////////
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(AddBias, GPU, name::layer::add_bias())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(AddBias, ts::GPU, name::layer::add_bias())
#endif
| e65cb7cedf5846f578868b0f40a4f6e78bb3cd15.cu | #include <kernels/gpu/add_bias.h>
#include <core/tensor_builder.h>
#include <global/operator_factory.h>
#include "global/fp16_operator_factory.h"
#include <backend/name.h>
#include <utils/assert.h>
#include <core/device.h>
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "kernels/gpu/gpu_kernel.h"
/////////////////////////////////////////////////
namespace ts {
namespace gpu {
template<typename T>
static __global__ void add_bias_kernel(const T* base, T* data, int size, int step, int slice,
const T* bias, int biaslen ) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
int dim = index % ( step * slice ) / (step);
data[index] = base[index] + bias[dim];
}
}
template<typename T>
static void gpu_add_bias_compute_run(const Tensor &x, const Tensor &b, int dim, Tensor &out) {
const Shape &shape = x.sizes();
//int pre_dims = 1;
int back_dims = 1;
//for (int i = 0; i < dim; i++) {
// pre_dims *= shape[i];
//}
for (int i = dim + 1; i < shape.size(); i++) {
back_dims *= shape[i];
}
const T *psrc = x.data<T>();
const T *pbias = b.data<T>();
T *pdst = out.data<T>();
// memcpy((void*)pdst, out.device(), x.count() * sizeof(T),
// (void*)psrc, x.device(), x.count() * sizeof(T));
RUN_KERNEL(add_bias_kernel<T>, CUDA_BLOCK(x.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
psrc, pdst, x.count(), back_dims, shape[dim], pbias, b.count());
//cudaDeviceSynchronize();
}
void AddBias::add(const Tensor &x, const Tensor &b, int dim, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_add_bias_compute_run<TYPE>(x, b, dim, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
/////////////////////////////////////////////////
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(AddBias, GPU, name::layer::add_bias())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(AddBias, ts::GPU, name::layer::add_bias())
#endif
|
0e03c0113a4884724d3c400b5ce7aa008b9b7747.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif
#include <hip/device_functions.h>
#include <iostream>
#include <math.h>
#include <hip/hip_runtime.h>
#include "postprocess.cuh"
#include "error_util.h"
using namespace std;
__global__ void remove_background(float *confData_tmp, int num_anchors, int num_classes, int image_width)
{
int idxRow = threadIdx.x + blockDim.x*blockIdx.x;
int idxCol = threadIdx.y + blockDim.y*blockIdx.y;
int backgroundCol = 0;
if (idxRow < image_width && idxCol < image_width)
{
for (int i = 0; i < num_anchors; i++)
{
backgroundCol = image_width * i * (num_classes+1);
confData_tmp[(idxCol+backgroundCol)*image_width + idxRow] = -1000.0;
}
}
}
__global__ void encode_locData(float *locData, int num_anchors, float* anchorShape, int box_code, int featuremap_height, int featuremap_width, int original_image_height, int original_image_width, int count_layer)
{
__shared__ float cache[12000];
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxCol = threadIdx.y + blockDim.y * blockIdx.y;
//int channels = num_anchors * box_code;
int depth = idxCol / featuremap_height;
int idxCol_eachchannel = idxCol % featuremap_height;
double box_xcenter = idxRow * (double)1 / featuremap_width + 0.5 * (double)1 / featuremap_width;
double box_ycenter = idxCol_eachchannel * (double)1 / featuremap_height + 0.5 * (double)1 / featuremap_height;
double box_width = (double)anchorShape[ num_anchors * 2 * count_layer + 2 * (depth / box_code) + 1];
double box_height = (double)anchorShape[num_anchors * 2 * count_layer + 2 * (depth / box_code)];
// locData= [y_center, x_center, height, width]
if (depth % box_code == 3)
{
locData[idxCol * featuremap_width + idxRow] = (expf((float)(locData[idxCol * featuremap_width + idxRow] * 0.2)) * (float)box_width) * original_image_width;
}
else if (depth % box_code ==2)
{
locData[idxCol * featuremap_width + idxRow] = (expf((float)(locData[idxCol * featuremap_width + idxRow] * 0.2)) * (float)box_height) * original_image_height;
}
else if (depth % box_code == 1)
{
locData[idxCol * featuremap_width + idxRow] = ((float)(locData[idxCol * featuremap_width + idxRow] * 0.1) * (float)box_width + (float)box_xcenter) * original_image_width;
}
else if (depth % box_code == 0)
{
locData[idxCol * featuremap_width + idxRow] = ((float)(locData[idxCol * featuremap_width + idxRow] * 0.1) * (float)box_height + (float)box_ycenter) * original_image_height;
}
}
__global__ void sum_encodedData(float* locData, int featuremap_width, int featuremap_height, int box_code)
{
__shared__ float cache[12000];
int one_depth = featuremap_width*featuremap_height;
int idxCol = (threadIdx.y + blockDim.y * blockIdx.y) % featuremap_height;
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxBox = 4 * ((threadIdx.y + blockDim.y * blockIdx.y) / featuremap_height);
cache[idxBox * one_depth + idxCol * featuremap_width + idxRow] = locData[one_depth * idxBox + idxCol * featuremap_width + idxRow] - locData[one_depth *(idxBox + 2) + idxCol*featuremap_width + idxRow] * 0.5; //y_min
cache[(1 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = locData[one_depth * (idxBox + 1) + idxCol * featuremap_width + idxRow] - locData[one_depth *(idxBox + 3) + idxCol*featuremap_width + idxRow] * 0.5; //x_min
cache[(2 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = locData[one_depth * idxBox + idxCol * featuremap_width + idxRow] + locData[one_depth *(idxBox + 2) + idxCol*featuremap_width + idxRow] * 0.5; //y_max
cache[(3 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = locData[one_depth * (idxBox + 1) + idxCol * featuremap_width + idxRow] + locData[one_depth *(idxBox + 3) + idxCol*featuremap_width + idxRow] * 0.5; //x_max
__syncthreads();
locData[idxBox * one_depth + idxCol * featuremap_width + idxRow] = cache[idxBox * one_depth + idxCol * featuremap_width + idxRow];
locData[(1 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = cache[(1 + idxBox) * one_depth + idxCol * featuremap_width + idxRow];
locData[(2 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = cache[(2 + idxBox) * one_depth + idxCol * featuremap_width + idxRow];
locData[(3 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = cache[(3 + idxBox) * one_depth + idxCol * featuremap_width + idxRow];
}
__global__ void clip(float *locData, int num_anchors, int box_code, int featuremap_height, int featuremap_width, int original_image_h, int original_image_w)
{
int one_depth = featuremap_width*featuremap_height;
int idxCol = (threadIdx.y + blockDim.y * blockIdx.y) % featuremap_height;
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxBox = 4 * ((threadIdx.y + blockDim.y * blockIdx.y) / featuremap_height);
//y_min
locData[idxBox * one_depth + idxCol * featuremap_width + idxRow] = fmaxf((float)0, fminf(locData[idxBox * one_depth + idxCol * featuremap_width + idxRow], (float)original_image_h));
//y_max
locData[(idxBox + 2)* one_depth + idxCol * featuremap_width + idxRow] = fmaxf((float)0, fminf(locData[(idxBox + 2) * one_depth + idxCol * featuremap_width + idxRow], (float)original_image_h));
//x_min
locData[(idxBox + 1)* one_depth + idxCol * featuremap_width + idxRow] = fmaxf((float)0, fminf(locData[(idxBox + 1) * one_depth + idxCol * featuremap_width + idxRow], (float)original_image_w));
//x_max
locData[(idxBox + 3)* one_depth + idxCol * featuremap_width + idxRow] = fmaxf((float)0, fminf(locData[(idxBox + 3) * one_depth + idxCol * featuremap_width + idxRow], (float)original_image_w));
}
__global__ void sum_boxes(float *Data, float *Data_all, int data_startXpoint, int y_axis, int box_total)
{
//int idxCol = threadIdx.y;
int idxPerThread = threadIdx.x + threadIdx.y * blockDim.x;
int idxTotal = idxPerThread + blockDim.x * blockDim.y * blockIdx.x;
for (int i = 0; i < y_axis; i++) {
Data_all[i * (box_total) + idxTotal + data_startXpoint] = Data[blockIdx.x * (blockDim.x * blockDim.y) * (y_axis) + i * (blockDim.x * blockDim.y) + idxPerThread];
}
}
//void softmax(float* srcData, float* dstData, int anchor_num, int channel, int h, int w) {
// cout << fixed;
// cout.precision(7);
// float* scoreSum = new float[anchor_num * h *w];
// memset(scoreSum, 0, sizeof(float)*anchor_num * h *w);
// float* srcexp = new float[channel * h * w];
// memset(srcexp, 0, sizeof(float)*channel * h *w);
//
// for (int k = 0; k < channel; ++k) {
// for (int j = 0; j < h; ++j) {
// for (int i = 0; i < w; ++i) {
// srcexp[k* h *w + j * w + i] = expf((float)srcData[k* h *w + j * w + i]);
// //srcexp[k* h *w + j * w + i] = srcData[k* h *w + j * w + i];
// if (k % 9 == 0) {
// srcexp[k* h *w + j * w + i] = 0;
// }
// //cout << srcexp[k* h *w + j * w + i] << "\t";
// } //cout << endl;
// }//cout << "=============" << endl;
// }
//
// for (int q = 0; q < anchor_num; ++q) {
// for (int j = 0; j < h; ++j) {
// for (int i = 0; i < w; ++i) {
// for (int k = 0; k < 9; ++k) {
// scoreSum[q * h * w + j*w + i] += srcexp[q * 9 * h * w + k* h *w + j * w + i];
// }
// }
// }
// }
//
// for (int q = 0; q < anchor_num; ++q) {
// for (int k = 0; k < 9; ++k) {
// for (int j = 0; j < h; ++j) {
// for (int i = 0; i <w; ++i) {
// dstData[q*9* h * w + k* h *w + j * w + i] = srcexp[q * 9 * h * w + k* h *w + j * w + i] / scoreSum[q *h * w + j * w + i];
// }
// }
// }
// }
//
//
//
//}
void remove_background(float *confData_tmp, int num_anchors, int num_classes, int image_width, dim3 threads_per_block, dim3 num_of_blocks)
{
remove_background << <num_of_blocks, threads_per_block >> > (confData_tmp, num_anchors, num_classes, image_width);
return;
}
void encode_locData(float *locData, int num_anchors, float* anchorShape, int box_code, int featuremap_height, int featuremap_width, int original_image_height, int original_image_width, int count_layer, dim3 threads_per_block, dim3 num_of_blocks)
{
encode_locData << < num_of_blocks, threads_per_block >> > (locData, num_anchors, anchorShape, box_code, featuremap_height, featuremap_width, original_image_height, original_image_width, count_layer);
hipDeviceSynchronize();
dim3 num_of_blocks_sumData(1, num_anchors, 1);
sum_encodedData << <num_of_blocks_sumData, threads_per_block >> > (locData, featuremap_width, featuremap_height, box_code);
return;
}
void clip_window(float *locData, int num_anchors, int box_code, int featuremap_height, int featuremap_width, int original_image_h, int original_image_w, dim3 threads_per_block, dim3 num_of_blocks)
{
clip << < num_of_blocks, threads_per_block >> > ( locData, num_anchors, box_code, featuremap_height, featuremap_width, original_image_h, original_image_w);
return;
}
void sum_boxes(float *locData, float * confData, float *locData_all, float *confData_all, int class_num, int *box_featuremap_size, int *anchor_num, int box_index, int box_code, int box_total) {
dim3 conf_blocks(anchor_num[box_index], 1, 1);
dim3 conf_threads(box_featuremap_size[box_index], box_featuremap_size[box_index], 1);
int data_startPoint = 0;
for (int i = 0; i < box_index; i++)
{
data_startPoint += box_featuremap_size[i] * box_featuremap_size[i] * anchor_num[i];
}
int data_startPoint_conf = data_startPoint;
int data_startPoint_loc = data_startPoint;
sum_boxes << <conf_blocks, conf_threads >> > (confData, confData_all, data_startPoint_conf, class_num+1, box_total);
hipDeviceSynchronize();
sum_boxes << < conf_blocks, conf_threads >> > (locData, locData_all, data_startPoint_loc, box_code, box_total);
hipDeviceSynchronize();
return;
}
int descending(const void* a, const void* b)
{
if (*(int *)a < *(int *)b)
return 1;
else if (*(int *)a > *(int *)b)
return -1;
else
return 0;
}
int find_index(float *data, int size, float key) {
int low, middle, high;
low = 0;
high = size - 1;
while (low <= high) {
middle = (low + high) / 2;
if (key == data[middle]) {
return middle;
}
else if (key > data[middle]) {
high = middle - 1;
}
else { low = middle + 1; }
}
}
float rectangleSize(float *box_offset, int index, int box_total) {
float box_h = box_offset[2 * box_total + index] - box_offset[0 * box_total + index];
float box_w = box_offset[3 * box_total + index] - box_offset[1 * box_total + index];
return box_h * box_w;
}
float middleValue(float * box_offset, int index, int box_total) {
float middle_y = (box_offset[2 * box_total + index] + box_offset[0 * box_total + index]) *0.5;
float middle_x = (box_offset[3 * box_total + index] + box_offset[1 * box_total + index]) *0.5;
return middle_y, middle_x;
}
float iou(float *box_offset, int index, int next_index, int box_total) {
float box_h = 0, box_w = 0, nextBox_h = 0, nextBox_w = 0;
float intersec_xMin = 0, intersec_xMax = 0, intersec_yMin = 0, intersec_yMax = 0;
float intersec_w, intersec_h;
intersec_xMin = fmaxf(box_offset[box_total * 1 + index], box_offset[box_total * 1 + next_index]);
intersec_yMin = fmaxf(box_offset[box_total * 0 + index], box_offset[box_total * 0 + next_index]);
intersec_xMax = fminf(box_offset[box_total * 3 + index], box_offset[box_total * 3 + next_index]);
intersec_yMax = fminf(box_offset[box_total * 2 + index], box_offset[box_total * 2 + next_index]);
float intersec_area = (fmaxf(0, (intersec_yMax - intersec_yMin)) * fmaxf(0, (intersec_xMax - intersec_xMin)));
float box_area = rectangleSize(box_offset, index, box_total);
float nextBox_area = rectangleSize(box_offset, next_index, box_total);
return (intersec_area / (box_area + nextBox_area - intersec_area));
}
vector<int> nms(float * box_loc, vector<pair<int,int>> & address_index,
const float & threshold, int box_total)
{
int last;
int i;
vector<int> pick;
vector<int> deleteIdxs;
vector<pair<int,int>>::iterator iter = address_index.begin();
// keep looping while some indexes still remain in the indexes list
for (int k = 0; k < address_index.size() - 1; k++) {
last = address_index.size() - 1 - k;
i = address_index[last].first;
for (int j = 0; j < last ; j++)
{
auto iou_result = iou(box_loc, i, address_index[j].first, box_total);
if (iou_result > threshold)
{
deleteIdxs.push_back(last);
break;
}
}
}
for (int k = 0; k < deleteIdxs.size(); k++) {
iter += deleteIdxs[k];
address_index.erase(iter);
iter = address_index.begin();
}
vector<int>().swap(pick);
vector<int>().swap(deleteIdxs);
return pick;
}
bool compare(const pair<int, int>& a, const pair<int, int>& b) {
return a.second < b.second;
}
vector<int> sort_by_sequence(string* result_word, float *box_loc, vector<pair<int, int>>& address_index, int box_total) {
/*box_loc[ 0 * box_total + index] : Y_topleft
--box_loc[ 1 * box_total + index] : X_topleft
--box_loc[ 2 * box_total + index] : Y_bottomright
--box_loc[ 3 * box_total + index] : X_bottomright */
vector<pair<float, float>> box_leftTop;
const float box_height = box_loc[2 * box_total + address_index[0].first] - box_loc[0 * box_total + address_index[0].first];
float threshold_lineBreak = 0;
//box_leftTop.first : Y_topleft, box_leftTop.second : X_topleft
for (int i = 0; i < address_index.size(); i++) {
box_leftTop.push_back(pair<float, float>(box_loc[0 * box_total + address_index[i].first], box_loc[1 * box_total + address_index[i].first]));
}
//set the threshold_linebreak
for (int i = 0; i < address_index.size(); i++) {
//cout << box_loc[2 * box_total + address_index[i].first] << " " << box_loc[0 * box_total + address_index[(i + 1)%address_index.size()].first] << endl;
if (box_loc[2 * box_total + address_index[i].first] < box_loc[0 * box_total + address_index[(i + 1) % address_index.size()].first]) {
threshold_lineBreak = (box_loc[0 * box_total + address_index[(i + 1) % address_index.size()].first] + box_loc[0 * box_total + address_index[i].first]) / 2;
break;
}
}
//sort box_leftTop by ascending(X_topleft)
sort(box_leftTop.begin(), box_leftTop.end(), compare);
float last_value = box_leftTop[box_leftTop.size() - 2].second;
float tmp[2] = { 0.0, 0.0 };
int k = 0;
vector<int> secondLine;
vector<pair<float, float>>::iterator iter = box_leftTop.begin();
for (int i = 0; i < address_index.size(); i++) {
if (box_leftTop[i].first > threshold_lineBreak) {
secondLine.push_back(i);
}
}
//line break process
for (int i = 0; i < secondLine.size(); i++) {
iter += secondLine[i] - k;
tmp[0] = box_leftTop[secondLine[i] - k].first;
tmp[1] = box_leftTop[secondLine[i] - k].second;
box_leftTop.erase(iter);
box_leftTop.push_back(pair<float, float>(tmp[0], tmp[1]));
iter = box_leftTop.begin();
k += 1;
}
vector<int> sequence;
for (int i = 0; i < box_leftTop.size(); i++) {
//cout << box_leftTop[i].first << " , " << box_leftTop[i].second << endl;
for (int j = 0; j < address_index.size(); j++) {
if (box_loc[0 * box_total + address_index[j].first] == box_leftTop[i].first) {
sequence.push_back(address_index[j].second);
}
}
}
vector<int>().swap(secondLine);
vector<pair<float,float>>().swap(box_leftTop);
return sequence;
}
| 0e03c0113a4884724d3c400b5ce7aa008b9b7747.cu |
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif
#include <device_functions.h>
#include <iostream>
#include <math.h>
#include <cuda.h>
#include "postprocess.cuh"
#include "error_util.h"
using namespace std;
__global__ void remove_background(float *confData_tmp, int num_anchors, int num_classes, int image_width)
{
int idxRow = threadIdx.x + blockDim.x*blockIdx.x;
int idxCol = threadIdx.y + blockDim.y*blockIdx.y;
int backgroundCol = 0;
if (idxRow < image_width && idxCol < image_width)
{
for (int i = 0; i < num_anchors; i++)
{
backgroundCol = image_width * i * (num_classes+1);
confData_tmp[(idxCol+backgroundCol)*image_width + idxRow] = -1000.0;
}
}
}
__global__ void encode_locData(float *locData, int num_anchors, float* anchorShape, int box_code, int featuremap_height, int featuremap_width, int original_image_height, int original_image_width, int count_layer)
{
__shared__ float cache[12000];
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxCol = threadIdx.y + blockDim.y * blockIdx.y;
//int channels = num_anchors * box_code;
int depth = idxCol / featuremap_height;
int idxCol_eachchannel = idxCol % featuremap_height;
double box_xcenter = idxRow * (double)1 / featuremap_width + 0.5 * (double)1 / featuremap_width;
double box_ycenter = idxCol_eachchannel * (double)1 / featuremap_height + 0.5 * (double)1 / featuremap_height;
double box_width = (double)anchorShape[ num_anchors * 2 * count_layer + 2 * (depth / box_code) + 1];
double box_height = (double)anchorShape[num_anchors * 2 * count_layer + 2 * (depth / box_code)];
// locData= [y_center, x_center, height, width]
if (depth % box_code == 3)
{
locData[idxCol * featuremap_width + idxRow] = (expf((float)(locData[idxCol * featuremap_width + idxRow] * 0.2)) * (float)box_width) * original_image_width;
}
else if (depth % box_code ==2)
{
locData[idxCol * featuremap_width + idxRow] = (expf((float)(locData[idxCol * featuremap_width + idxRow] * 0.2)) * (float)box_height) * original_image_height;
}
else if (depth % box_code == 1)
{
locData[idxCol * featuremap_width + idxRow] = ((float)(locData[idxCol * featuremap_width + idxRow] * 0.1) * (float)box_width + (float)box_xcenter) * original_image_width;
}
else if (depth % box_code == 0)
{
locData[idxCol * featuremap_width + idxRow] = ((float)(locData[idxCol * featuremap_width + idxRow] * 0.1) * (float)box_height + (float)box_ycenter) * original_image_height;
}
}
__global__ void sum_encodedData(float* locData, int featuremap_width, int featuremap_height, int box_code)
{
__shared__ float cache[12000];
int one_depth = featuremap_width*featuremap_height;
int idxCol = (threadIdx.y + blockDim.y * blockIdx.y) % featuremap_height;
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxBox = 4 * ((threadIdx.y + blockDim.y * blockIdx.y) / featuremap_height);
cache[idxBox * one_depth + idxCol * featuremap_width + idxRow] = locData[one_depth * idxBox + idxCol * featuremap_width + idxRow] - locData[one_depth *(idxBox + 2) + idxCol*featuremap_width + idxRow] * 0.5; //y_min
cache[(1 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = locData[one_depth * (idxBox + 1) + idxCol * featuremap_width + idxRow] - locData[one_depth *(idxBox + 3) + idxCol*featuremap_width + idxRow] * 0.5; //x_min
cache[(2 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = locData[one_depth * idxBox + idxCol * featuremap_width + idxRow] + locData[one_depth *(idxBox + 2) + idxCol*featuremap_width + idxRow] * 0.5; //y_max
cache[(3 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = locData[one_depth * (idxBox + 1) + idxCol * featuremap_width + idxRow] + locData[one_depth *(idxBox + 3) + idxCol*featuremap_width + idxRow] * 0.5; //x_max
__syncthreads();
locData[idxBox * one_depth + idxCol * featuremap_width + idxRow] = cache[idxBox * one_depth + idxCol * featuremap_width + idxRow];
locData[(1 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = cache[(1 + idxBox) * one_depth + idxCol * featuremap_width + idxRow];
locData[(2 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = cache[(2 + idxBox) * one_depth + idxCol * featuremap_width + idxRow];
locData[(3 + idxBox) * one_depth + idxCol * featuremap_width + idxRow] = cache[(3 + idxBox) * one_depth + idxCol * featuremap_width + idxRow];
}
__global__ void clip(float *locData, int num_anchors, int box_code, int featuremap_height, int featuremap_width, int original_image_h, int original_image_w)
{
int one_depth = featuremap_width*featuremap_height;
int idxCol = (threadIdx.y + blockDim.y * blockIdx.y) % featuremap_height;
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxBox = 4 * ((threadIdx.y + blockDim.y * blockIdx.y) / featuremap_height);
//y_min
locData[idxBox * one_depth + idxCol * featuremap_width + idxRow] = fmaxf((float)0, fminf(locData[idxBox * one_depth + idxCol * featuremap_width + idxRow], (float)original_image_h));
//y_max
locData[(idxBox + 2)* one_depth + idxCol * featuremap_width + idxRow] = fmaxf((float)0, fminf(locData[(idxBox + 2) * one_depth + idxCol * featuremap_width + idxRow], (float)original_image_h));
//x_min
locData[(idxBox + 1)* one_depth + idxCol * featuremap_width + idxRow] = fmaxf((float)0, fminf(locData[(idxBox + 1) * one_depth + idxCol * featuremap_width + idxRow], (float)original_image_w));
//x_max
locData[(idxBox + 3)* one_depth + idxCol * featuremap_width + idxRow] = fmaxf((float)0, fminf(locData[(idxBox + 3) * one_depth + idxCol * featuremap_width + idxRow], (float)original_image_w));
}
__global__ void sum_boxes(float *Data, float *Data_all, int data_startXpoint, int y_axis, int box_total)
{
//int idxCol = threadIdx.y;
int idxPerThread = threadIdx.x + threadIdx.y * blockDim.x;
int idxTotal = idxPerThread + blockDim.x * blockDim.y * blockIdx.x;
for (int i = 0; i < y_axis; i++) {
Data_all[i * (box_total) + idxTotal + data_startXpoint] = Data[blockIdx.x * (blockDim.x * blockDim.y) * (y_axis) + i * (blockDim.x * blockDim.y) + idxPerThread];
}
}
//void softmax(float* srcData, float* dstData, int anchor_num, int channel, int h, int w) {
// cout << fixed;
// cout.precision(7);
// float* scoreSum = new float[anchor_num * h *w];
// memset(scoreSum, 0, sizeof(float)*anchor_num * h *w);
// float* srcexp = new float[channel * h * w];
// memset(srcexp, 0, sizeof(float)*channel * h *w);
//
// for (int k = 0; k < channel; ++k) {
// for (int j = 0; j < h; ++j) {
// for (int i = 0; i < w; ++i) {
// srcexp[k* h *w + j * w + i] = expf((float)srcData[k* h *w + j * w + i]);
// //srcexp[k* h *w + j * w + i] = srcData[k* h *w + j * w + i];
// if (k % 9 == 0) {
// srcexp[k* h *w + j * w + i] = 0;
// }
// //cout << srcexp[k* h *w + j * w + i] << "\t";
// } //cout << endl;
// }//cout << "=============" << endl;
// }
//
// for (int q = 0; q < anchor_num; ++q) {
// for (int j = 0; j < h; ++j) {
// for (int i = 0; i < w; ++i) {
// for (int k = 0; k < 9; ++k) {
// scoreSum[q * h * w + j*w + i] += srcexp[q * 9 * h * w + k* h *w + j * w + i];
// }
// }
// }
// }
//
// for (int q = 0; q < anchor_num; ++q) {
// for (int k = 0; k < 9; ++k) {
// for (int j = 0; j < h; ++j) {
// for (int i = 0; i <w; ++i) {
// dstData[q*9* h * w + k* h *w + j * w + i] = srcexp[q * 9 * h * w + k* h *w + j * w + i] / scoreSum[q *h * w + j * w + i];
// }
// }
// }
// }
//
//
//
//}
void remove_background(float *confData_tmp, int num_anchors, int num_classes, int image_width, dim3 threads_per_block, dim3 num_of_blocks)
{
remove_background << <num_of_blocks, threads_per_block >> > (confData_tmp, num_anchors, num_classes, image_width);
return;
}
void encode_locData(float *locData, int num_anchors, float* anchorShape, int box_code, int featuremap_height, int featuremap_width, int original_image_height, int original_image_width, int count_layer, dim3 threads_per_block, dim3 num_of_blocks)
{
encode_locData << < num_of_blocks, threads_per_block >> > (locData, num_anchors, anchorShape, box_code, featuremap_height, featuremap_width, original_image_height, original_image_width, count_layer);
cudaDeviceSynchronize();
dim3 num_of_blocks_sumData(1, num_anchors, 1);
sum_encodedData << <num_of_blocks_sumData, threads_per_block >> > (locData, featuremap_width, featuremap_height, box_code);
return;
}
void clip_window(float *locData, int num_anchors, int box_code, int featuremap_height, int featuremap_width, int original_image_h, int original_image_w, dim3 threads_per_block, dim3 num_of_blocks)
{
clip << < num_of_blocks, threads_per_block >> > ( locData, num_anchors, box_code, featuremap_height, featuremap_width, original_image_h, original_image_w);
return;
}
void sum_boxes(float *locData, float * confData, float *locData_all, float *confData_all, int class_num, int *box_featuremap_size, int *anchor_num, int box_index, int box_code, int box_total) {
dim3 conf_blocks(anchor_num[box_index], 1, 1);
dim3 conf_threads(box_featuremap_size[box_index], box_featuremap_size[box_index], 1);
int data_startPoint = 0;
for (int i = 0; i < box_index; i++)
{
data_startPoint += box_featuremap_size[i] * box_featuremap_size[i] * anchor_num[i];
}
int data_startPoint_conf = data_startPoint;
int data_startPoint_loc = data_startPoint;
sum_boxes << <conf_blocks, conf_threads >> > (confData, confData_all, data_startPoint_conf, class_num+1, box_total);
cudaDeviceSynchronize();
sum_boxes << < conf_blocks, conf_threads >> > (locData, locData_all, data_startPoint_loc, box_code, box_total);
cudaDeviceSynchronize();
return;
}
int descending(const void* a, const void* b)
{
if (*(int *)a < *(int *)b)
return 1;
else if (*(int *)a > *(int *)b)
return -1;
else
return 0;
}
int find_index(float *data, int size, float key) {
int low, middle, high;
low = 0;
high = size - 1;
while (low <= high) {
middle = (low + high) / 2;
if (key == data[middle]) {
return middle;
}
else if (key > data[middle]) {
high = middle - 1;
}
else { low = middle + 1; }
}
}
float rectangleSize(float *box_offset, int index, int box_total) {
float box_h = box_offset[2 * box_total + index] - box_offset[0 * box_total + index];
float box_w = box_offset[3 * box_total + index] - box_offset[1 * box_total + index];
return box_h * box_w;
}
float middleValue(float * box_offset, int index, int box_total) {
float middle_y = (box_offset[2 * box_total + index] + box_offset[0 * box_total + index]) *0.5;
float middle_x = (box_offset[3 * box_total + index] + box_offset[1 * box_total + index]) *0.5;
return middle_y, middle_x;
}
float iou(float *box_offset, int index, int next_index, int box_total) {
float box_h = 0, box_w = 0, nextBox_h = 0, nextBox_w = 0;
float intersec_xMin = 0, intersec_xMax = 0, intersec_yMin = 0, intersec_yMax = 0;
float intersec_w, intersec_h;
intersec_xMin = fmaxf(box_offset[box_total * 1 + index], box_offset[box_total * 1 + next_index]);
intersec_yMin = fmaxf(box_offset[box_total * 0 + index], box_offset[box_total * 0 + next_index]);
intersec_xMax = fminf(box_offset[box_total * 3 + index], box_offset[box_total * 3 + next_index]);
intersec_yMax = fminf(box_offset[box_total * 2 + index], box_offset[box_total * 2 + next_index]);
float intersec_area = (fmaxf(0, (intersec_yMax - intersec_yMin)) * fmaxf(0, (intersec_xMax - intersec_xMin)));
float box_area = rectangleSize(box_offset, index, box_total);
float nextBox_area = rectangleSize(box_offset, next_index, box_total);
return (intersec_area / (box_area + nextBox_area - intersec_area));
}
vector<int> nms(float * box_loc, vector<pair<int,int>> & address_index,
const float & threshold, int box_total)
{
int last;
int i;
vector<int> pick;
vector<int> deleteIdxs;
vector<pair<int,int>>::iterator iter = address_index.begin();
// keep looping while some indexes still remain in the indexes list
for (int k = 0; k < address_index.size() - 1; k++) {
last = address_index.size() - 1 - k;
i = address_index[last].first;
for (int j = 0; j < last ; j++)
{
auto iou_result = iou(box_loc, i, address_index[j].first, box_total);
if (iou_result > threshold)
{
deleteIdxs.push_back(last);
break;
}
}
}
for (int k = 0; k < deleteIdxs.size(); k++) {
iter += deleteIdxs[k];
address_index.erase(iter);
iter = address_index.begin();
}
vector<int>().swap(pick);
vector<int>().swap(deleteIdxs);
return pick;
}
bool compare(const pair<int, int>& a, const pair<int, int>& b) {
return a.second < b.second;
}
vector<int> sort_by_sequence(string* result_word, float *box_loc, vector<pair<int, int>>& address_index, int box_total) {
/*box_loc[ 0 * box_total + index] : Y_topleft
--box_loc[ 1 * box_total + index] : X_topleft
--box_loc[ 2 * box_total + index] : Y_bottomright
--box_loc[ 3 * box_total + index] : X_bottomright */
vector<pair<float, float>> box_leftTop;
const float box_height = box_loc[2 * box_total + address_index[0].first] - box_loc[0 * box_total + address_index[0].first];
float threshold_lineBreak = 0;
//box_leftTop.first : Y_topleft, box_leftTop.second : X_topleft
for (int i = 0; i < address_index.size(); i++) {
box_leftTop.push_back(pair<float, float>(box_loc[0 * box_total + address_index[i].first], box_loc[1 * box_total + address_index[i].first]));
}
//set the threshold_linebreak
for (int i = 0; i < address_index.size(); i++) {
//cout << box_loc[2 * box_total + address_index[i].first] << " " << box_loc[0 * box_total + address_index[(i + 1)%address_index.size()].first] << endl;
if (box_loc[2 * box_total + address_index[i].first] < box_loc[0 * box_total + address_index[(i + 1) % address_index.size()].first]) {
threshold_lineBreak = (box_loc[0 * box_total + address_index[(i + 1) % address_index.size()].first] + box_loc[0 * box_total + address_index[i].first]) / 2;
break;
}
}
//sort box_leftTop by ascending(X_topleft)
sort(box_leftTop.begin(), box_leftTop.end(), compare);
float last_value = box_leftTop[box_leftTop.size() - 2].second;
float tmp[2] = { 0.0, 0.0 };
int k = 0;
vector<int> secondLine;
vector<pair<float, float>>::iterator iter = box_leftTop.begin();
for (int i = 0; i < address_index.size(); i++) {
if (box_leftTop[i].first > threshold_lineBreak) {
secondLine.push_back(i);
}
}
//line break process
for (int i = 0; i < secondLine.size(); i++) {
iter += secondLine[i] - k;
tmp[0] = box_leftTop[secondLine[i] - k].first;
tmp[1] = box_leftTop[secondLine[i] - k].second;
box_leftTop.erase(iter);
box_leftTop.push_back(pair<float, float>(tmp[0], tmp[1]));
iter = box_leftTop.begin();
k += 1;
}
vector<int> sequence;
for (int i = 0; i < box_leftTop.size(); i++) {
//cout << box_leftTop[i].first << " , " << box_leftTop[i].second << endl;
for (int j = 0; j < address_index.size(); j++) {
if (box_loc[0 * box_total + address_index[j].first] == box_leftTop[i].first) {
sequence.push_back(address_index[j].second);
}
}
}
vector<int>().swap(secondLine);
vector<pair<float,float>>().swap(box_leftTop);
return sequence;
}
|
31147806e3510f2c0e9dde70f7d349cac74fc65a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
#include "Calculator.h"
#include "Threads.h"
using namespace Calculator;
__global__ void initializeCellsKernel ( float4 *d_boids,
int *d_boidId,
int *d_cellId,
int gridWidth,
int cellSize)
{
int boidIdx = blockDim.x*blockIdx.x + threadIdx.x;
if (boidIdx >= BOID_COUNT)
return;
Threads::initializeCellsThreadWork(boidIdx, d_boids, d_cellId, d_boidId, gridWidth, cellSize);
}
__global__ void updateCellsBeginKernel (int *d_boidId,
int *d_cellId,
int *d_cellBegin,
int cellCount)
{
int tId = blockDim.x*blockIdx.x + threadIdx.x;
if (tId >= BOID_COUNT)
return;
Threads::updateCellsThreadWork(tId, d_cellId, d_cellBegin, cellCount);
}
__global__ void moveBoidKernel (float4 *d_boids,
float4 *d_boidsDoubleBuffer,
int *d_boidId,
int *d_cellId,
int *d_cellIdDoubleBuffer,
int *d_cellBegin,
int gridWidth,
int gridHeight,
int cellSize,
uint dt)
{
int tId = blockDim.x*blockIdx.x + threadIdx.x;
if (tId >= BOID_COUNT)
return;
Threads::moveBoidThreadWork (tId,
d_boids,
d_boidsDoubleBuffer,
d_boidId,
d_cellId,
d_cellIdDoubleBuffer,
d_cellBegin,
gridWidth,
gridHeight,
cellSize,
dt);
}
// Invokes kernel calculating updated boid's position
void moveBoidGPU(float4 *&d_boids,
float4 *&d_boidsDoubleBuffer,
uint &arraySize,
int *&d_boidId,
int *&d_cellId,
int *&d_cellIdDoubleBuffer,
int *&d_cellBegin,
int gridWidth,
int gridHeight,
int cellSize,
int cellCount,
uint dt)
{
hipLaunchKernelGGL(( moveBoidKernel), dim3(BLOCK_COUNT), dim3(256), 0, 0, d_boids, d_boidsDoubleBuffer, d_boidId, d_cellId, d_cellIdDoubleBuffer, d_cellBegin, gridWidth, gridHeight, cellSize, dt);
hipDeviceSynchronize();
hipMemcpy(d_cellId, d_cellIdDoubleBuffer, BOID_COUNT * sizeof(int), hipMemcpyDeviceToDevice);
thrust::sort_by_key(thrust::device_ptr<int>(d_cellId), thrust::device_ptr<int>(d_cellId + BOID_COUNT), thrust::device_ptr<int>(d_boidId));
hipMemset(d_cellBegin, -1, cellCount* sizeof(int));
updateCellsBeginKernel << <BLOCK_COUNT, 256 >> > (d_boidId, d_cellId, d_cellBegin, cellCount);
hipDeviceSynchronize();
hipMemcpy(d_boids, d_boidsDoubleBuffer, arraySize, hipMemcpyDeviceToDevice);
}
// Invokes kernel initializing cells
void initializeCellsGPU (float4 *&d_boids,
uint &boidArraySize,
int *&d_boidId,
int *&d_cellId,
int *&d_cellBegin,
int gridWidth,
int cellSize,
int cellCount)
{
initializeCellsKernel << <BLOCK_COUNT, 256 >> > (d_boids, d_boidId, d_cellId, gridWidth, cellSize);
hipDeviceSynchronize();
thrust::sort_by_key(thrust::device_ptr<int>(d_cellId), thrust::device_ptr<int>(d_cellId + BOID_COUNT), thrust::device_ptr<int>(d_boidId));
hipMemset(d_cellBegin, -1, cellCount * sizeof(int));
updateCellsBeginKernel << <BLOCK_COUNT, 256 >> > (d_boidId, d_cellId, d_cellBegin, cellCount);
hipDeviceSynchronize();
} | 31147806e3510f2c0e9dde70f7d349cac74fc65a.cu | #include "kernel.cuh"
#include "Calculator.h"
#include "Threads.h"
using namespace Calculator;
__global__ void initializeCellsKernel ( float4 *d_boids,
int *d_boidId,
int *d_cellId,
int gridWidth,
int cellSize)
{
int boidIdx = blockDim.x*blockIdx.x + threadIdx.x;
if (boidIdx >= BOID_COUNT)
return;
Threads::initializeCellsThreadWork(boidIdx, d_boids, d_cellId, d_boidId, gridWidth, cellSize);
}
__global__ void updateCellsBeginKernel (int *d_boidId,
int *d_cellId,
int *d_cellBegin,
int cellCount)
{
int tId = blockDim.x*blockIdx.x + threadIdx.x;
if (tId >= BOID_COUNT)
return;
Threads::updateCellsThreadWork(tId, d_cellId, d_cellBegin, cellCount);
}
__global__ void moveBoidKernel (float4 *d_boids,
float4 *d_boidsDoubleBuffer,
int *d_boidId,
int *d_cellId,
int *d_cellIdDoubleBuffer,
int *d_cellBegin,
int gridWidth,
int gridHeight,
int cellSize,
uint dt)
{
int tId = blockDim.x*blockIdx.x + threadIdx.x;
if (tId >= BOID_COUNT)
return;
Threads::moveBoidThreadWork (tId,
d_boids,
d_boidsDoubleBuffer,
d_boidId,
d_cellId,
d_cellIdDoubleBuffer,
d_cellBegin,
gridWidth,
gridHeight,
cellSize,
dt);
}
// Invokes kernel calculating updated boid's position
void moveBoidGPU(float4 *&d_boids,
float4 *&d_boidsDoubleBuffer,
uint &arraySize,
int *&d_boidId,
int *&d_cellId,
int *&d_cellIdDoubleBuffer,
int *&d_cellBegin,
int gridWidth,
int gridHeight,
int cellSize,
int cellCount,
uint dt)
{
moveBoidKernel<<<BLOCK_COUNT, 256>>>(d_boids, d_boidsDoubleBuffer, d_boidId, d_cellId, d_cellIdDoubleBuffer, d_cellBegin, gridWidth, gridHeight, cellSize, dt);
cudaDeviceSynchronize();
cudaMemcpy(d_cellId, d_cellIdDoubleBuffer, BOID_COUNT * sizeof(int), cudaMemcpyDeviceToDevice);
thrust::sort_by_key(thrust::device_ptr<int>(d_cellId), thrust::device_ptr<int>(d_cellId + BOID_COUNT), thrust::device_ptr<int>(d_boidId));
cudaMemset(d_cellBegin, -1, cellCount* sizeof(int));
updateCellsBeginKernel << <BLOCK_COUNT, 256 >> > (d_boidId, d_cellId, d_cellBegin, cellCount);
cudaDeviceSynchronize();
cudaMemcpy(d_boids, d_boidsDoubleBuffer, arraySize, cudaMemcpyDeviceToDevice);
}
// Invokes kernel initializing cells
void initializeCellsGPU (float4 *&d_boids,
uint &boidArraySize,
int *&d_boidId,
int *&d_cellId,
int *&d_cellBegin,
int gridWidth,
int cellSize,
int cellCount)
{
initializeCellsKernel << <BLOCK_COUNT, 256 >> > (d_boids, d_boidId, d_cellId, gridWidth, cellSize);
cudaDeviceSynchronize();
thrust::sort_by_key(thrust::device_ptr<int>(d_cellId), thrust::device_ptr<int>(d_cellId + BOID_COUNT), thrust::device_ptr<int>(d_boidId));
cudaMemset(d_cellBegin, -1, cellCount * sizeof(int));
updateCellsBeginKernel << <BLOCK_COUNT, 256 >> > (d_boidId, d_cellId, d_cellBegin, cellCount);
cudaDeviceSynchronize();
} |
055b17fad1dbc7a8cc651b0458ae9f76731c1f9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cutil_math.h>
__constant__ unsigned char c_perm_3d[256];
__shared__ unsigned char s_perm_3d[256]; // shared memory copy of permuation array
//unsigned char* d_perm_parts=NULL; // global memory copy of permutation array
// host version of permutation array
const static unsigned char h_perm[] = { 151, 160, 137, 91, 90, 15,
131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23,
190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33,
88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166,
77, 146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244,
102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196,
135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123,
5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42,
223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9,
129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228,
251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107,
49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180
};
__device__ inline int perm(int i) { return(c_perm_3d[i & 0xff]); }
__device__ inline float fade(float t) { return t * t * t * (t * (t * 6.f - 15.f) + 10.f); }
__device__ inline float lerpP(float t, float a, float b) { return a + t * (b - a); }
__device__ inline float grad(int hash, float x, float y, float z) {
int h = hash & 15; // CONVERT LO 4 BITS OF HASH CODE
float u = h<8 ? x : y, // INTO 12 GRADIENT DIRECTIONS.
v = h<4 ? y : h == 12 || h == 14 ? x : z;
return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
}
__device__ float inoise_parts2(float x, float y, float z) {
int X = ((int)floorf(x)) & 255, // FIND UNIT CUBE THAT
Y = ((int)floorf(y)) & 255, // CONTAINS POINT.
Z = ((int)floorf(z)) & 255;
x -= floorf(x); // FIND RELATIVE X,Y,Z
y -= floorf(y); // OF POINT IN CUBE.
z -= floorf(z);
float u = fade(x), // COMPUTE FADE CURVES
v = fade(y), // FOR EACH OF X,Y,Z.
w = fade(z);
int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, // HASH COORDINATES OF
B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; // THE 8 CUBE CORNERS,
return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z), // AND ADD
grad(perm(BA), x - 1.f, y, z)), // BLENDED
lerpP(u, grad(perm(AB), x, y - 1.f, z), // RESULTS
grad(perm(BB), x - 1.f, y - 1.f, z))), // FROM 8
lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.f), // CORNERS
grad(perm(BA + 1), x - 1.f, y, z - 1.f)), // OF CUBE
lerpP(u, grad(perm(AB + 1), x, y - 1.f, z - 1.f),
grad(perm(BB + 1), x - 1.f, y - 1.f, z - 1.f))));
#ifdef ORIG
return(perm(X));
#endif
}
__device__ inline float noise1D(float x, float y, float z, int octaves,
float lacunarity, float gain, float freq, float amp)
{
float sum = 0.f;
for (int i = 0; i<octaves; i++) {
sum += inoise_parts2(x*freq, y*freq, z*freq)*amp;
freq *= lacunarity;
amp *= gain;
}
return sum;
}
__global__ void dampVelKernel(float3* vel, float damping, float dt, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
vel[n] = (1 - damping*dt)*vel[n];
}
}
__global__ void addGravityKernel(float3* vel, float3 gravityDir, float gravityStrength, float dt, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
vel[n] = vel[n] + gravityDir*gravityStrength*dt;
}
}
__global__ void addTurbulenceKernel(float3* vel, float3* pos,
float3 noiseAmp, float3 noiseOffset, int noiseOct, float noiseLac, float noiseFreq,
float dt, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
float3 currVel = vel[n];
float3 noise = make_float3(0,0,0);
if (noiseAmp.x != 0)
noise.x = noiseAmp.x*noise1D(pos[n].x+noiseOffset.x, pos[n].y+noiseOffset.y, pos[n].z+noiseOffset.z,
noiseOct, noiseLac, 0.5, noiseFreq,1);
if (noiseAmp.y != 0)
noise.y = noiseAmp.y*noise1D(pos[n].x+noiseOffset.x+2000, pos[n].y+noiseOffset.y, pos[n].z+noiseOffset.z,
noiseOct, noiseLac, 0.5, noiseFreq,1);
if (noiseAmp.x != 0)
noise.z += noiseAmp.z*noise1D(pos[n].x+noiseOffset.x+5000, pos[n].y+noiseOffset.y, pos[n].z+noiseOffset.z,
noiseOct, noiseLac, 0.5, noiseFreq,1);
vel[n] = vel[n] + noise*dt;
}
}
__global__ void integrateParticlesKernel(float3* pos, float3* vel, float* age, float* life,
float4* colour, float opacity, float3 col1, float3 col2, float dt, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
pos[n] = pos[n] + vel[n]*dt;
age[n] = age[n] + dt;
if (age[n] > life[n])
age[n] = life[n];
float ageNorm = age[n]/life[n];
float3 col = lerp(col1,col2,ageNorm);
float alpha = opacity*(1-pow(age[n]/life[n],2));
colour[n] = make_float4(col.x,col.y,col.z,alpha);
}
}
__global__ void initNewParticlesKernel(float3* pos, float3* vel, float* age, float* life,
float3 initPos, float3 initVel, float radVelAmp,
float3 noiseVelAmp, float3 noiseVelOffset, int noiseVelOct, float noiseVelLac, float noiseVelFreq,
float initLife, float time, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
//__device__ inline float noise1D(float x, float y, float z, int octaves,
// float lacunarity, float gain, float freq, float amp)
if (n<nParts) {
float3 radVel = radVelAmp * (pos[n] - initPos);
vel[n] = initVel + radVel;
if (noiseVelAmp.x != 0)
vel[n].x += noiseVelAmp.x*noise1D(pos[n].x+noiseVelOffset.x, pos[n].y+noiseVelOffset.y, pos[n].z+noiseVelOffset.z,
noiseVelOct, noiseVelLac, 0.5, noiseVelFreq,1);
if (noiseVelAmp.y != 0)
vel[n].y += noiseVelAmp.y*noise1D(pos[n].x+noiseVelOffset.x+2000, pos[n].y+noiseVelOffset.y, pos[n].z+noiseVelOffset.z,
noiseVelOct, noiseVelLac, 0.5, noiseVelFreq,1);
if (noiseVelAmp.x != 0)
vel[n].z += noiseVelAmp.z*noise1D(pos[n].x+noiseVelOffset.x+5000, pos[n].y+noiseVelOffset.y, pos[n].z+noiseVelOffset.z,
noiseVelOct, noiseVelLac, 0.5, noiseVelFreq,1);
age[n] = 0.0;
}
}
__global__ void resetParticlesKernel(float3* pos, float3* vel, float* age, float* life, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
pos[n] = make_float3(0.0,0.0,0.0);
vel[n] = make_float3(0.0,0.0,0.0);
age[n] = 1.0;
life[n] = 1.0;
}
}
extern "C" void dampVelCu(float3* vel, float damping, float dt, int nParts){
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1); dim3 threads(nthreads, 1, 1);
hipLaunchKernelGGL(( dampVelKernel), dim3(blocks), dim3(threads), 0, 0, vel, damping, dt, nParts);
}
extern "C" void addGravityCu(float3* vel, float3 gravityDir, float gravityStrength, float dt, int nParts){
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1); dim3 threads(nthreads, 1, 1);
hipLaunchKernelGGL(( addGravityKernel), dim3(blocks), dim3(threads), 0, 0, vel, gravityDir, gravityStrength, dt, nParts);
}
extern "C" void addTurbulenceCu(float3* vel, float3* pos, float3 noiseAmp, float3 noiseOffset,
int noiseOct, float noiseLac, float noiseFreq, float dt, int nParts){
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1); dim3 threads(nthreads, 1, 1);
hipLaunchKernelGGL(( addTurbulenceKernel), dim3(blocks), dim3(threads), 0, 0, vel, pos, noiseAmp, noiseOffset, noiseOct, noiseLac, noiseFreq, dt, nParts);
}
extern "C" void integrateParticlesCu(float3* pos, float3* vel, float* age, float* life,
float4* colour, float opacity, float3 col1, float3 col2,
float dt, int nParts){
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1); dim3 threads(nthreads, 1, 1);
hipLaunchKernelGGL(( integrateParticlesKernel), dim3(blocks), dim3(threads), 0, 0, pos, vel, age, life, colour, opacity, col1, col2, dt, nParts);
}
extern "C" void resetParticlesCu(float3* pos, float3* vel, float* age, float* life, int nParts) {
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1);
dim3 threads(nthreads, 1, 1);
hipLaunchKernelGGL(( resetParticlesKernel), dim3(blocks), dim3(threads), 0, 0, pos, vel, age, life, nParts);
}
extern "C" void initNewParticlesCu(float3* pos, float3* vel, float* age, float* life,
float3 initPos, float3 initVel, float radVelAmp,
float3 noiseVelAmp, float3 noiseVelOffset, int noiseVelOct, float noiseVelLac, float noiseVelFreq,
float initLife, float time, int nParts) {
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1);
dim3 threads(nthreads, 1, 1);
hipMemcpyToSymbol(c_perm_3d, h_perm, sizeof(h_perm),0,hipMemcpyHostToDevice );
hipLaunchKernelGGL(( initNewParticlesKernel), dim3(blocks), dim3(threads), 0, 0, pos, vel, age, life, initPos, initVel, radVelAmp,
noiseVelAmp, noiseVelOffset, noiseVelOct, noiseVelLac, noiseVelFreq,
initLife, time, nParts);
} | 055b17fad1dbc7a8cc651b0458ae9f76731c1f9d.cu | #include <cutil_math.h>
__constant__ unsigned char c_perm_3d[256];
__shared__ unsigned char s_perm_3d[256]; // shared memory copy of permuation array
//unsigned char* d_perm_parts=NULL; // global memory copy of permutation array
// host version of permutation array
const static unsigned char h_perm[] = { 151, 160, 137, 91, 90, 15,
131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23,
190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33,
88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166,
77, 146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244,
102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196,
135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123,
5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42,
223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9,
129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228,
251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107,
49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180
};
__device__ inline int perm(int i) { return(c_perm_3d[i & 0xff]); }
__device__ inline float fade(float t) { return t * t * t * (t * (t * 6.f - 15.f) + 10.f); }
__device__ inline float lerpP(float t, float a, float b) { return a + t * (b - a); }
__device__ inline float grad(int hash, float x, float y, float z) {
int h = hash & 15; // CONVERT LO 4 BITS OF HASH CODE
float u = h<8 ? x : y, // INTO 12 GRADIENT DIRECTIONS.
v = h<4 ? y : h == 12 || h == 14 ? x : z;
return ((h & 1) == 0 ? u : -u) + ((h & 2) == 0 ? v : -v);
}
__device__ float inoise_parts2(float x, float y, float z) {
int X = ((int)floorf(x)) & 255, // FIND UNIT CUBE THAT
Y = ((int)floorf(y)) & 255, // CONTAINS POINT.
Z = ((int)floorf(z)) & 255;
x -= floorf(x); // FIND RELATIVE X,Y,Z
y -= floorf(y); // OF POINT IN CUBE.
z -= floorf(z);
float u = fade(x), // COMPUTE FADE CURVES
v = fade(y), // FOR EACH OF X,Y,Z.
w = fade(z);
int A = perm(X) + Y, AA = perm(A) + Z, AB = perm(A + 1) + Z, // HASH COORDINATES OF
B = perm(X + 1) + Y, BA = perm(B) + Z, BB = perm(B + 1) + Z; // THE 8 CUBE CORNERS,
return lerpP(w, lerpP(v, lerpP(u, grad(perm(AA), x, y, z), // AND ADD
grad(perm(BA), x - 1.f, y, z)), // BLENDED
lerpP(u, grad(perm(AB), x, y - 1.f, z), // RESULTS
grad(perm(BB), x - 1.f, y - 1.f, z))), // FROM 8
lerpP(v, lerpP(u, grad(perm(AA + 1), x, y, z - 1.f), // CORNERS
grad(perm(BA + 1), x - 1.f, y, z - 1.f)), // OF CUBE
lerpP(u, grad(perm(AB + 1), x, y - 1.f, z - 1.f),
grad(perm(BB + 1), x - 1.f, y - 1.f, z - 1.f))));
#ifdef ORIG
return(perm(X));
#endif
}
__device__ inline float noise1D(float x, float y, float z, int octaves,
float lacunarity, float gain, float freq, float amp)
{
float sum = 0.f;
for (int i = 0; i<octaves; i++) {
sum += inoise_parts2(x*freq, y*freq, z*freq)*amp;
freq *= lacunarity;
amp *= gain;
}
return sum;
}
__global__ void dampVelKernel(float3* vel, float damping, float dt, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
vel[n] = (1 - damping*dt)*vel[n];
}
}
__global__ void addGravityKernel(float3* vel, float3 gravityDir, float gravityStrength, float dt, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
vel[n] = vel[n] + gravityDir*gravityStrength*dt;
}
}
__global__ void addTurbulenceKernel(float3* vel, float3* pos,
float3 noiseAmp, float3 noiseOffset, int noiseOct, float noiseLac, float noiseFreq,
float dt, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
float3 currVel = vel[n];
float3 noise = make_float3(0,0,0);
if (noiseAmp.x != 0)
noise.x = noiseAmp.x*noise1D(pos[n].x+noiseOffset.x, pos[n].y+noiseOffset.y, pos[n].z+noiseOffset.z,
noiseOct, noiseLac, 0.5, noiseFreq,1);
if (noiseAmp.y != 0)
noise.y = noiseAmp.y*noise1D(pos[n].x+noiseOffset.x+2000, pos[n].y+noiseOffset.y, pos[n].z+noiseOffset.z,
noiseOct, noiseLac, 0.5, noiseFreq,1);
if (noiseAmp.x != 0)
noise.z += noiseAmp.z*noise1D(pos[n].x+noiseOffset.x+5000, pos[n].y+noiseOffset.y, pos[n].z+noiseOffset.z,
noiseOct, noiseLac, 0.5, noiseFreq,1);
vel[n] = vel[n] + noise*dt;
}
}
__global__ void integrateParticlesKernel(float3* pos, float3* vel, float* age, float* life,
float4* colour, float opacity, float3 col1, float3 col2, float dt, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
pos[n] = pos[n] + vel[n]*dt;
age[n] = age[n] + dt;
if (age[n] > life[n])
age[n] = life[n];
float ageNorm = age[n]/life[n];
float3 col = lerp(col1,col2,ageNorm);
float alpha = opacity*(1-pow(age[n]/life[n],2));
colour[n] = make_float4(col.x,col.y,col.z,alpha);
}
}
__global__ void initNewParticlesKernel(float3* pos, float3* vel, float* age, float* life,
float3 initPos, float3 initVel, float radVelAmp,
float3 noiseVelAmp, float3 noiseVelOffset, int noiseVelOct, float noiseVelLac, float noiseVelFreq,
float initLife, float time, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
//__device__ inline float noise1D(float x, float y, float z, int octaves,
// float lacunarity, float gain, float freq, float amp)
if (n<nParts) {
float3 radVel = radVelAmp * (pos[n] - initPos);
vel[n] = initVel + radVel;
if (noiseVelAmp.x != 0)
vel[n].x += noiseVelAmp.x*noise1D(pos[n].x+noiseVelOffset.x, pos[n].y+noiseVelOffset.y, pos[n].z+noiseVelOffset.z,
noiseVelOct, noiseVelLac, 0.5, noiseVelFreq,1);
if (noiseVelAmp.y != 0)
vel[n].y += noiseVelAmp.y*noise1D(pos[n].x+noiseVelOffset.x+2000, pos[n].y+noiseVelOffset.y, pos[n].z+noiseVelOffset.z,
noiseVelOct, noiseVelLac, 0.5, noiseVelFreq,1);
if (noiseVelAmp.x != 0)
vel[n].z += noiseVelAmp.z*noise1D(pos[n].x+noiseVelOffset.x+5000, pos[n].y+noiseVelOffset.y, pos[n].z+noiseVelOffset.z,
noiseVelOct, noiseVelLac, 0.5, noiseVelFreq,1);
age[n] = 0.0;
}
}
__global__ void resetParticlesKernel(float3* pos, float3* vel, float* age, float* life, int nParts)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
int n = x;
if (n<nParts) {
pos[n] = make_float3(0.0,0.0,0.0);
vel[n] = make_float3(0.0,0.0,0.0);
age[n] = 1.0;
life[n] = 1.0;
}
}
extern "C" void dampVelCu(float3* vel, float damping, float dt, int nParts){
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1); dim3 threads(nthreads, 1, 1);
dampVelKernel<<< blocks, threads>>>(vel, damping, dt, nParts);
}
extern "C" void addGravityCu(float3* vel, float3 gravityDir, float gravityStrength, float dt, int nParts){
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1); dim3 threads(nthreads, 1, 1);
addGravityKernel<<< blocks, threads>>>(vel, gravityDir, gravityStrength, dt, nParts);
}
extern "C" void addTurbulenceCu(float3* vel, float3* pos, float3 noiseAmp, float3 noiseOffset,
int noiseOct, float noiseLac, float noiseFreq, float dt, int nParts){
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1); dim3 threads(nthreads, 1, 1);
addTurbulenceKernel<<< blocks, threads>>>(vel, pos, noiseAmp, noiseOffset, noiseOct, noiseLac, noiseFreq, dt, nParts);
}
extern "C" void integrateParticlesCu(float3* pos, float3* vel, float* age, float* life,
float4* colour, float opacity, float3 col1, float3 col2,
float dt, int nParts){
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1); dim3 threads(nthreads, 1, 1);
integrateParticlesKernel<<< blocks, threads>>>(pos, vel, age, life, colour, opacity, col1, col2, dt, nParts);
}
extern "C" void resetParticlesCu(float3* pos, float3* vel, float* age, float* life, int nParts) {
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1);
dim3 threads(nthreads, 1, 1);
resetParticlesKernel<<< blocks, threads>>>(pos, vel, age, life, nParts);
}
extern "C" void initNewParticlesCu(float3* pos, float3* vel, float* age, float* life,
float3 initPos, float3 initVel, float radVelAmp,
float3 noiseVelAmp, float3 noiseVelOffset, int noiseVelOct, float noiseVelLac, float noiseVelFreq,
float initLife, float time, int nParts) {
int nthreads = min(256, nParts);
int nBlocks = nParts/nthreads + (!(nParts%nthreads)?0:1);
dim3 blocks(nBlocks, 1,1);
dim3 threads(nthreads, 1, 1);
cudaMemcpyToSymbol(c_perm_3d, h_perm, sizeof(h_perm),0,cudaMemcpyHostToDevice );
initNewParticlesKernel<<< blocks, threads>>>(pos, vel, age, life, initPos, initVel, radVelAmp,
noiseVelAmp, noiseVelOffset, noiseVelOct, noiseVelLac, noiseVelFreq,
initLife, time, nParts);
} |
727e43fb74a9c55ffea4e46c862b00654f2769bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/nullary/fill.h"
#include "cunumeric/nullary/fill_template.inl"
#include "cunumeric/cuda_help.h"
namespace cunumeric {
using namespace Legion;
template <typename ARG, typename ReadAcc>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
dense_kernel(size_t volume, ARG* out, ReadAcc fill_value)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= volume) return;
out[idx] = fill_value[0];
}
template <typename WriteAcc, typename ReadAcc, typename Pitches, typename Rect>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
generic_kernel(size_t volume, WriteAcc out, ReadAcc fill_value, Pitches pitches, Rect rect)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= volume) return;
auto point = pitches.unflatten(idx, rect.lo);
out[point] = fill_value[0];
}
template <typename VAL, int32_t DIM>
struct FillImplBody<VariantKind::GPU, VAL, DIM> {
void operator()(AccessorWO<VAL, DIM> out,
AccessorRO<VAL, 1> in,
const Pitches<DIM - 1>& pitches,
const Rect<DIM>& rect,
bool dense) const
{
size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (dense) {
auto outptr = out.ptr(rect);
hipLaunchKernelGGL(( dense_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, volume, outptr, in);
} else {
hipLaunchKernelGGL(( generic_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, volume, out, in, pitches, rect);
}
}
};
/*static*/ void FillTask::gpu_variant(TaskContext& context)
{
fill_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
| 727e43fb74a9c55ffea4e46c862b00654f2769bd.cu | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/nullary/fill.h"
#include "cunumeric/nullary/fill_template.inl"
#include "cunumeric/cuda_help.h"
namespace cunumeric {
using namespace Legion;
template <typename ARG, typename ReadAcc>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
dense_kernel(size_t volume, ARG* out, ReadAcc fill_value)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= volume) return;
out[idx] = fill_value[0];
}
template <typename WriteAcc, typename ReadAcc, typename Pitches, typename Rect>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
generic_kernel(size_t volume, WriteAcc out, ReadAcc fill_value, Pitches pitches, Rect rect)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= volume) return;
auto point = pitches.unflatten(idx, rect.lo);
out[point] = fill_value[0];
}
template <typename VAL, int32_t DIM>
struct FillImplBody<VariantKind::GPU, VAL, DIM> {
void operator()(AccessorWO<VAL, DIM> out,
AccessorRO<VAL, 1> in,
const Pitches<DIM - 1>& pitches,
const Rect<DIM>& rect,
bool dense) const
{
size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (dense) {
auto outptr = out.ptr(rect);
dense_kernel<<<blocks, THREADS_PER_BLOCK>>>(volume, outptr, in);
} else {
generic_kernel<<<blocks, THREADS_PER_BLOCK>>>(volume, out, in, pitches, rect);
}
}
};
/*static*/ void FillTask::gpu_variant(TaskContext& context)
{
fill_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
|
dd7dfea7a7c320e9252e874ccda522cea01df4d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "test_cuda_utils.h"
#include <hip/hip_runtime.h>
#include "gpu/kernel/primitives.h"
namespace gccl {
namespace {
extern "C" __global__ void Copy128bGlobal(CopyArgs args) {
int tid = threadIdx.x;
int n_threads = blockDim.x;
args.tid = tid;
args.n_threads = n_threads;
Copy128b(&args);
}
} // namespace
} // namespace gccl | dd7dfea7a7c320e9252e874ccda522cea01df4d5.cu | #include "test_cuda_utils.h"
#include <cuda_runtime.h>
#include "gpu/kernel/primitives.h"
namespace gccl {
namespace {
extern "C" __global__ void Copy128bGlobal(CopyArgs args) {
int tid = threadIdx.x;
int n_threads = blockDim.x;
args.tid = tid;
args.n_threads = n_threads;
Copy128b(&args);
}
} // namespace
} // namespace gccl |
51634413e003c05fb35d1db1f3b32c932665ea1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
//#define array = "hello"
int array[5] = {1,2,3,4,5};
//#define arrayCount = 5
int arrayCount = 5;
__global__ void MyKernel(int *array, int arrayCount)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < arrayCount)
{
array[idx] *= array[idx];
}
}
//extern "C" void launchMyKernel(int *array, int arrayCount)
extern "C" void launchMyKernel()
{
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
MyKernel, 0, 0);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( MyKernel), dim3(gridSize), dim3(blockSize) , 0, 0, array, arrayCount);
hipDeviceSynchronize();
// calculate theoretical occupancy
int maxActiveBlocks;
hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks,
MyKernel, blockSize,
0);
int device;
hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f\n",
blockSize, occupancy);
}
| 51634413e003c05fb35d1db1f3b32c932665ea1a.cu | #include "stdio.h"
//#define array = "hello"
int array[5] = {1,2,3,4,5};
//#define arrayCount = 5
int arrayCount = 5;
__global__ void MyKernel(int *array, int arrayCount)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < arrayCount)
{
array[idx] *= array[idx];
}
}
//extern "C" void launchMyKernel(int *array, int arrayCount)
extern "C" void launchMyKernel()
{
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
MyKernel, 0, 0);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
MyKernel<<< gridSize, blockSize >>>(array, arrayCount);
cudaDeviceSynchronize();
// calculate theoretical occupancy
int maxActiveBlocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks,
MyKernel, blockSize,
0);
int device;
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f\n",
blockSize, occupancy);
}
|
822eb32c2eda59745bfdf5913be04b048703ded5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/common_cudnn.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(data[blockIdx.x * sz + threadIdx.x]);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, hipcub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
#if CUDNN_VERSION_MIN(6,0,0)
mode_ = CUDNN_POOLING_MAX_DETERMINISTIC;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (sizeof(T) == 4) {
if (order_ == StorageOrder::NCHW && Y->size() == N * C) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
hipLaunchKernelGGL(( global_maxpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
X.template data<T>(),
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Y->template mutable_data<T>()));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float,float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16,float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
mode_ = CUDNN_POOLING_MAX;
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (sizeof(T) == 4) {
if (order_ == StorageOrder::NCHW && dY.size() == N * C) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
hipLaunchKernelGGL(( global_maxpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Y.template data<T>(),
top_desc_,
dY.template data<T>(),
bottom_desc_,
X.template data<T>(),
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dX->template mutable_data<T>()));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float,float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16,float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
// MSVC defines IN and OUT in minwindef.h
#ifdef IN
#undef IN
#endif
#ifdef OUT
#undef OUT
#endif
// Input: X, Y, dY
// Output: dX
INPUT_TAGS(IN, OUT, OUT_GRAD);
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
| 822eb32c2eda59745bfdf5913be04b048703ded5.cu | #include "caffe2/core/common_cudnn.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <cub/cub.cuh>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(data[blockIdx.x * sz + threadIdx.x]);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, cub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
#if CUDNN_VERSION_MIN(6,0,0)
mode_ = CUDNN_POOLING_MAX_DETERMINISTIC;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (sizeof(T) == 4) {
if (order_ == StorageOrder::NCHW && Y->size() == N * C) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
global_maxpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
X.template data<T>(),
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Y->template mutable_data<T>()));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float,float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16,float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
mode_ = CUDNN_POOLING_MAX;
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (sizeof(T) == 4) {
if (order_ == StorageOrder::NCHW && dY.size() == N * C) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
global_maxpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Y.template data<T>(),
top_desc_,
dY.template data<T>(),
bottom_desc_,
X.template data<T>(),
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dX->template mutable_data<T>()));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float,float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16,float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
// MSVC defines IN and OUT in minwindef.h
#ifdef IN
#undef IN
#endif
#ifdef OUT
#undef OUT
#endif
// Input: X, Y, dY
// Output: dX
INPUT_TAGS(IN, OUT, OUT_GRAD);
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
|
5bc289c288558aaac3f640d80a38ba8f57b2cfe8.hip | // !!! This is a file automatically generated by hipify!!!
// Compile: nvcc -g -G -arch=sm_52 -std=c++11 assignment5-p5.cu -o assignment5-p5
#include <cmath>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <iostream>
#include <sys/time.h>
const uint64_t N = (64);
#define THRESHOLD (0.000001)
//TileD should be less than or equal to 10 due to thread_per_blolck limit
#define TileD (10)
using std::cerr;
using std::cout;
using std::endl;
// TODO: Edit the function definition as required
__global__ void kernel1(float* d_in,float* d_out) {
int i = (blockIdx.x*blockDim.x + threadIdx.x);
int j = (blockIdx.y*blockDim.y + threadIdx.y);
int k = (blockIdx.z*blockDim.z + threadIdx.z);
if(i>0 && j>0 && k>0 && i<(N-1) && j<(N-1) && k<(N-1)){
float temp = 0;
temp = (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k]);
temp += d_in[i*N*N + (j-1)*N + k];
temp += d_in[i*N*N + (j+1)*N + k];
temp += d_in[i*N*N + j*N + k-1];
temp += d_in[i*N*N + j*N + k+1];
d_out[i*N*N + j*N + k] = ((float) 0.8) * temp;
}
}
// TODO: Edit the function definition as required
__global__ void kernel2(float* d_in, float* d_out) {
int i = (blockIdx.x*blockDim.x + threadIdx.x);
int j = (blockIdx.y*blockDim.y + threadIdx.y);
int k = (blockIdx.z*blockDim.z + threadIdx.z);
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int bx = blockDim.x-1;
int by = blockDim.y-1;
int bz = blockDim.z-1;
__shared__ float tile3D[TileD][TileD][TileD];
if(i<N && j<N && k<N){
tile3D[tx][ty][tz] = d_in[i*N*N + j*N + k];
__syncthreads();
}
if(i>0 && j>0 && k>0 && i<(N-1) && j<(N-1) && k<(N-1)){
float a = (tx>0)?tile3D[tx-1][ty][tz]:d_in[(i-1)*N*N + j*N + k];
float b = (tx<bx)?tile3D[tx+1][ty][tz]:d_in[(i+1)*N*N + j*N + k];
float c = (ty>0)?tile3D[tx][ty-1][tz]:d_in[i*N*N + (j-1)*N + k];
float d = (ty<by)?tile3D[tx][ty+1][tz]:d_in[i*N*N + (j+1)*N + k];
float e = (tz>0)?tile3D[tx][ty][tz-1]:d_in[i*N*N + j*N + k-1];
float f = (tz<bz)?tile3D[tx][ty][tz+1]:d_in[i*N*N + j*N + k+1];
float temp = 0.0;
temp = a+b;
temp += c;
temp += d;
temp += e;
temp += f;
d_out[i*N*N + j*N + k] = ((float) 0.8)*temp;
}
}
// TODO: Edit the function definition as required
__host__ void stencil(float* in, float* out) {
for(int i=1;i<N-1;i++){
for(int j=1;j<N-1;j++){
for(int k=1;k<N-1;k++){
out[i*N*N + j*N + k] = ((float) 0.8)*(in[(i-1)*N*N + j*N + k] + in[(i+1)*N*N + j*N + k] + in[i*N*N + (j-1)*N + k] + in[i*N*N + (j+1)*N + k] + in[i*N*N + j*N + k-1] + in[i*N*N + j*N + k+1]);
}
}
}
}
__host__ void check_result(float* w_ref, float* w_opt, uint64_t size) {
double maxdiff = 0.0, this_diff = 0.0;
int numdiffs = 0;
for (uint64_t i = 0; i < size; i++) {
for (uint64_t j = 0; j < size; j++) {
for (uint64_t k = 0; k < size; k++) {
this_diff = w_ref[i + N * j + N * N * k] - w_opt[i + N * j + N * N * k];
if (::fabs(this_diff) > THRESHOLD) {
numdiffs++;
if (this_diff > maxdiff) {
maxdiff = this_diff;
}
}
}
}
}
if (numdiffs > 0) {
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff
<< endl;
} else {
cout << "No differences found between base and test versions\n";
}
}
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
uint64_t SIZE = N * N * N;
float *h_in = new float[SIZE];
float *h_cpu_out = new float[SIZE];
float *h_k1_out = new float[SIZE];
float *h_k2_out = new float[SIZE];
for(int i=0;i<SIZE;i++){
h_in[i] = rand();
h_cpu_out[i] = 0;
h_k1_out[i] = 0;
h_k2_out[i] = 0;
}
double clkbegin = rtclock();
stencil(h_in,h_cpu_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "Stencil time on CPU: " << cpu_time * 1000 << " msec" << endl;
hipError_t status;
hipEvent_t start, end;
// TODO: Fill in kernel1
float *d_k1_in, *d_k1_out;
status = hipMalloc(&d_k1_in, SIZE * sizeof(float));
if (status != hipSuccess) {
cerr << hipGetErrorString(status) << endl;
}
status = hipMalloc(&d_k1_out, SIZE * sizeof(float));
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
status = hipMemcpy(d_k1_in, h_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
int threadPerBlock1 = min(10,(int)N);
int numBlock1 = (int)ceil(((double)N)/((double)threadPerBlock1));
dim3 blockD1(threadPerBlock1,threadPerBlock1,threadPerBlock1);
dim3 gridD1(numBlock1,numBlock1,numBlock1);
hipLaunchKernelGGL(( kernel1), dim3(gridD1),dim3(blockD1), 0, 0, d_k1_in,d_k1_out);
hipMemcpy(h_k1_out, d_k1_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(end, 0);
hipEventSynchronize(end);
// TODO: Adapt check_result() and invoke
float kernel_time;
hipEventElapsedTime(&kernel_time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
check_result(h_cpu_out,h_k1_out,N);
std::cout << "Kernel 1 time (ms): " << kernel_time << "\n";
// TODO: Fill in kernel2
float *d_k2_in, *d_k2_out;
status = hipMalloc(&d_k2_in, SIZE * sizeof(float));
if (status != hipSuccess) {
cerr << hipGetErrorString(status) << endl;
}
status = hipMalloc(&d_k2_out, SIZE * sizeof(float));
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
status = hipMemcpy(d_k2_in, h_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
int threadPerBlock2 = min(TileD,(int)N);
int numBlock2 = (int)ceil(((double)N)/((double)threadPerBlock2));
dim3 blockD2(threadPerBlock2,threadPerBlock2,threadPerBlock2);
dim3 gridD2(numBlock2,numBlock2,numBlock2);
hipLaunchKernelGGL(( kernel2), dim3(gridD2),dim3(blockD2), 0, 0, d_k2_in,d_k2_out);
hipMemcpy(h_k2_out, d_k2_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(end, 0);
hipEventSynchronize(end);
// TODO: Adapt check_result() and invoke
hipEventElapsedTime(&kernel_time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
check_result(h_cpu_out,h_k2_out,N);
std::cout << "Kernel 2 time (ms): " << kernel_time << "\n";
// TODO: Free memory
hipFree(d_k1_in);
hipFree(d_k2_in);
hipFree(d_k1_out);
hipFree(d_k2_out);
free(h_cpu_out);
free(h_in);
free(h_k1_out);
free(h_k2_out);
return EXIT_SUCCESS;
}
| 5bc289c288558aaac3f640d80a38ba8f57b2cfe8.cu | // Compile: nvcc -g -G -arch=sm_52 -std=c++11 assignment5-p5.cu -o assignment5-p5
#include <cmath>
#include <cstdlib>
#include <cuda.h>
#include <iostream>
#include <sys/time.h>
const uint64_t N = (64);
#define THRESHOLD (0.000001)
//TileD should be less than or equal to 10 due to thread_per_blolck limit
#define TileD (10)
using std::cerr;
using std::cout;
using std::endl;
// TODO: Edit the function definition as required
__global__ void kernel1(float* d_in,float* d_out) {
int i = (blockIdx.x*blockDim.x + threadIdx.x);
int j = (blockIdx.y*blockDim.y + threadIdx.y);
int k = (blockIdx.z*blockDim.z + threadIdx.z);
if(i>0 && j>0 && k>0 && i<(N-1) && j<(N-1) && k<(N-1)){
float temp = 0;
temp = (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k]);
temp += d_in[i*N*N + (j-1)*N + k];
temp += d_in[i*N*N + (j+1)*N + k];
temp += d_in[i*N*N + j*N + k-1];
temp += d_in[i*N*N + j*N + k+1];
d_out[i*N*N + j*N + k] = ((float) 0.8) * temp;
}
}
// TODO: Edit the function definition as required
__global__ void kernel2(float* d_in, float* d_out) {
int i = (blockIdx.x*blockDim.x + threadIdx.x);
int j = (blockIdx.y*blockDim.y + threadIdx.y);
int k = (blockIdx.z*blockDim.z + threadIdx.z);
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int bx = blockDim.x-1;
int by = blockDim.y-1;
int bz = blockDim.z-1;
__shared__ float tile3D[TileD][TileD][TileD];
if(i<N && j<N && k<N){
tile3D[tx][ty][tz] = d_in[i*N*N + j*N + k];
__syncthreads();
}
if(i>0 && j>0 && k>0 && i<(N-1) && j<(N-1) && k<(N-1)){
float a = (tx>0)?tile3D[tx-1][ty][tz]:d_in[(i-1)*N*N + j*N + k];
float b = (tx<bx)?tile3D[tx+1][ty][tz]:d_in[(i+1)*N*N + j*N + k];
float c = (ty>0)?tile3D[tx][ty-1][tz]:d_in[i*N*N + (j-1)*N + k];
float d = (ty<by)?tile3D[tx][ty+1][tz]:d_in[i*N*N + (j+1)*N + k];
float e = (tz>0)?tile3D[tx][ty][tz-1]:d_in[i*N*N + j*N + k-1];
float f = (tz<bz)?tile3D[tx][ty][tz+1]:d_in[i*N*N + j*N + k+1];
float temp = 0.0;
temp = a+b;
temp += c;
temp += d;
temp += e;
temp += f;
d_out[i*N*N + j*N + k] = ((float) 0.8)*temp;
}
}
// TODO: Edit the function definition as required
__host__ void stencil(float* in, float* out) {
for(int i=1;i<N-1;i++){
for(int j=1;j<N-1;j++){
for(int k=1;k<N-1;k++){
out[i*N*N + j*N + k] = ((float) 0.8)*(in[(i-1)*N*N + j*N + k] + in[(i+1)*N*N + j*N + k] + in[i*N*N + (j-1)*N + k] + in[i*N*N + (j+1)*N + k] + in[i*N*N + j*N + k-1] + in[i*N*N + j*N + k+1]);
}
}
}
}
__host__ void check_result(float* w_ref, float* w_opt, uint64_t size) {
double maxdiff = 0.0, this_diff = 0.0;
int numdiffs = 0;
for (uint64_t i = 0; i < size; i++) {
for (uint64_t j = 0; j < size; j++) {
for (uint64_t k = 0; k < size; k++) {
this_diff = w_ref[i + N * j + N * N * k] - w_opt[i + N * j + N * N * k];
if (std::fabs(this_diff) > THRESHOLD) {
numdiffs++;
if (this_diff > maxdiff) {
maxdiff = this_diff;
}
}
}
}
}
if (numdiffs > 0) {
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff
<< endl;
} else {
cout << "No differences found between base and test versions\n";
}
}
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
uint64_t SIZE = N * N * N;
float *h_in = new float[SIZE];
float *h_cpu_out = new float[SIZE];
float *h_k1_out = new float[SIZE];
float *h_k2_out = new float[SIZE];
for(int i=0;i<SIZE;i++){
h_in[i] = rand();
h_cpu_out[i] = 0;
h_k1_out[i] = 0;
h_k2_out[i] = 0;
}
double clkbegin = rtclock();
stencil(h_in,h_cpu_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "Stencil time on CPU: " << cpu_time * 1000 << " msec" << endl;
cudaError_t status;
cudaEvent_t start, end;
// TODO: Fill in kernel1
float *d_k1_in, *d_k1_out;
status = cudaMalloc(&d_k1_in, SIZE * sizeof(float));
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
status = cudaMalloc(&d_k1_out, SIZE * sizeof(float));
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_k1_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
int threadPerBlock1 = min(10,(int)N);
int numBlock1 = (int)ceil(((double)N)/((double)threadPerBlock1));
dim3 blockD1(threadPerBlock1,threadPerBlock1,threadPerBlock1);
dim3 gridD1(numBlock1,numBlock1,numBlock1);
kernel1<<<gridD1,blockD1>>>(d_k1_in,d_k1_out);
cudaMemcpy(h_k1_out, d_k1_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
// TODO: Adapt check_result() and invoke
float kernel_time;
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out,h_k1_out,N);
std::cout << "Kernel 1 time (ms): " << kernel_time << "\n";
// TODO: Fill in kernel2
float *d_k2_in, *d_k2_out;
status = cudaMalloc(&d_k2_in, SIZE * sizeof(float));
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
status = cudaMalloc(&d_k2_out, SIZE * sizeof(float));
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_k2_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
int threadPerBlock2 = min(TileD,(int)N);
int numBlock2 = (int)ceil(((double)N)/((double)threadPerBlock2));
dim3 blockD2(threadPerBlock2,threadPerBlock2,threadPerBlock2);
dim3 gridD2(numBlock2,numBlock2,numBlock2);
kernel2<<<gridD2,blockD2>>>(d_k2_in,d_k2_out);
cudaMemcpy(h_k2_out, d_k2_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
// TODO: Adapt check_result() and invoke
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out,h_k2_out,N);
std::cout << "Kernel 2 time (ms): " << kernel_time << "\n";
// TODO: Free memory
cudaFree(d_k1_in);
cudaFree(d_k2_in);
cudaFree(d_k1_out);
cudaFree(d_k2_out);
free(h_cpu_out);
free(h_in);
free(h_k1_out);
free(h_k2_out);
return EXIT_SUCCESS;
}
|
5b0a023c6da12a3b2b35cc877e712a3f17bd8995.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ unsigned int Rand(unsigned int randx)
{
randx = randx*1103515245+12345;
return randx&2147483647;
}
__global__ void setRandom(float *gpu_array, int N, int maxval )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < N ){
gpu_array[id] = 1.0f / maxval * Rand(id) / float( RAND_MAX );
}
} | 5b0a023c6da12a3b2b35cc877e712a3f17bd8995.cu | #include "includes.h"
__device__ unsigned int Rand(unsigned int randx)
{
randx = randx*1103515245+12345;
return randx&2147483647;
}
__global__ void setRandom(float *gpu_array, int N, int maxval )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < N ){
gpu_array[id] = 1.0f / maxval * Rand(id) / float( RAND_MAX );
}
} |
d0a1fe76dbaac360ed78b0369658e9c0a66ae44e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (C) 2014, Geometric Design and Manufacturing Lab in THE CHINESE UNIVERSITY OF HONG KONG
* All rights reserved.
*
* http://ldnibasedsolidmodeling.sourceforge.net/
*
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <sys/stat.h>
#include "../common/GL/glew.h"
#include "hip/hip_runtime.h"
#include "cutil.h"
#include "cuda_gl_interop.h"
#include "..\GLKLib\GLK.h"
#include "PMBody.h"
#include "LDNIcpuSolid.h"
#include "LDNIcudaSolid.h"
#include "LDNIcudaOperation.h"
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
#define GPU_BASED_SCAN true
extern GLK _pGLK;
extern bool _bExpandableWorkingSpace;
//--------------------------------------------------------------------------------------------
texture<float4,2> tex2DFloat4In;
extern __global__ void krLDNISuperUnion_CopySamples(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int n, int arrsize, int res, unsigned int *devIndexArrayPtr);
extern __global__ void krLDNIBoolean_SuperUnionOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr, unsigned int *devIndexArrayPtr,
unsigned int *devIndexArrayPtrRes, int arrsize);
extern __global__ void krLDNIBoolean_IdentifyEnterLeaveOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr, unsigned int *devIndexArrayPtr, int arrsize);
extern __global__ void krLDNIBoolean_BooleanOnRays(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrB, float *devNyArrayPtrB, float *devDepthArrayPtrB, unsigned int *devIndexArrayPtrB,
unsigned int *devIndexArrayPtrRes, int arrsize, short nOperationType);
extern __global__ void krLDNIBoolean_ResultSampleCollection(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrB, float *devNyArrayPtrB, float *devDepthArrayPtrB, unsigned int *devIndexArrayPtrB,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes, unsigned int *devIndexArrayPtrRes, int arrsize);
extern __global__ void krLDNIBoolean_ResultSampleCollection(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes, unsigned int *devIndexArrayPtrRes, int arrsize, float width, float gwidth);
extern __global__ void krLDNIBilateralNormalFilter_PerRay(unsigned int* xIndexArray, unsigned int* yIndexArray, unsigned int* zIndexArray,
float* xNxArray, float* yNxArray, float* zNxArray, float* xNyArray, float* yNyArray, float* zNyArray,
float* xDepthArray, float* yDepthArray, float* zDepthArray, float *buffer,
int arrsize, short nAxis, int res, float ww, float ox, float oy, float oz, unsigned int nSupportSize, float normalPara);
extern __global__ void krLDNIBilateralNormalFilter_PerSample(unsigned int* xIndexArray, unsigned int* yIndexArray, unsigned int* zIndexArray,
float* xNxArray, float* yNxArray, float* zNxArray, float* xNyArray, float* yNyArray, float* zNyArray,
float* xDepthArray, float* yDepthArray, float* zDepthArray, float *buffer,
int sampleNum, short nAxis, int res, float ww, unsigned int nSupportSize, float normalPara);
extern __global__ void krLDNINormalProcessing_PreProc(unsigned int* indexArray, float *buffer, int res, int arrsize);
extern __global__ void krLDNINormalProcessing_Update(int sampleNum, float *nxArray, float *nyArray, float *depthArray, float *buffer);
extern __global__ void krLDNINormalProcessing_OrientationCorrectionByVoting(
unsigned int* xIndexArray, unsigned int* yIndexArray, unsigned int* zIndexArray,
float* xNxArray, float* yNxArray, float* zNxArray, float* xNyArray, float* yNyArray, float* zNyArray,
float* xDepthArray, float* yDepthArray, float* zDepthArray, float *buffer,
int sampleNum, short nAxis, int res, float ww, unsigned int nSupportSize);
extern __global__ void krLDNINormalReconstruction_PerSample(unsigned int* xIndexArray, unsigned int* yIndexArray, unsigned int* zIndexArray,
float* xNxArray, float* yNxArray, float* zNxArray, float* xNyArray, float* yNyArray, float* zNyArray,
float* xDepthArray, float* yDepthArray, float* zDepthArray, float *buffer,
int sampleNum, short nAxis, int res, float ww, unsigned int nSupportSize);
extern __global__ void krLDNISampling_SortSamples(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr);
extern __global__ void krLDNISampling_CopySamples(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int n, int arrsize, float width, float sampleWidth, int res,
unsigned int *devIndexArrayPtr);
extern __global__ void krLDNISampling_CopyIndexAndFindMax(unsigned char *devStencilBufferPtr, unsigned int *devIndexArrayPtr,
unsigned int *devResArrayPtr, int arrsize );
extern __global__ void krLDNIcudaSolid_depthSampleAdd(float *depthSamples, float addValue, unsigned int sampleNum);
extern __global__ void krLDNIcudaSolid_fillNewIndexBySampleNumber(unsigned int *newIndexArray, unsigned int *indexArray, int res, int newRes, int sdi, int sdj);
extern __global__ void krLDNIRegularization_RegularizationOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
unsigned int *devIndexArrayPtr, unsigned int *devIndexArrayPtrRes, int arrsize, float eps);
extern __global__ void krLDNIRegularization_ResultSampleCollection(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
unsigned int *devIndexArrayPtr, float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes,
unsigned int *devIndexArrayPtrRes, int arrsize);
extern bool initGLInteroperabilityOnCUDA(int major, int minor);
//--------------------------------------------------------------------------------------------
// adpative slicing related
extern __global__ void krLDNIAdaptiveSlicing_CalculateRayLength(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devRayLengthArrayPtr, int res);
extern __global__ void krLDNIAdaptiveSlicing_CalculateLayerArea(float *devRayLengthArrayPtr, unsigned int *devIndexArrayPtr, float *devAreaArrayPtr, int res);
extern __global__ void krLDNIAdaptiveSlicing_CalculateVolumeErrorPerRow(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devVolumeErrorMatrix, int res, int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float y_min, float oy, float ww);
extern __global__ void krLDNIAdaptiveSlicing_CalculateVolumeErrorPerTile(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devVolumeErrorMatrix, int res, int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float y_min, float oy, float ww, int tileCount);
extern __global__ void krLDNIAdaptiveSlicing_ReduceVolumeErrorByTile(float *devVolumeErrorMatrix, int oneLayerSliceCount, int totalSliceCount, int tileCount);
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
bool LDNIcudaOperation::MultiObjectSamplingInOneSolid(LDNIcudaSolid* &solid, GLKObList* meshlist, float boundingBox[], int res)
{
float origin[3],gWidth;
char fileadd[256];
long time=clock(),totalTime=clock();
//---------------------------------------------------------------------------------
solid=new LDNIcudaSolid;
solid->MallocMemory(res);
solid->SetBoundingBox(boundingBox);
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
solid->SetSampleWidth(gWidth);
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
solid->SetOrigin(origin[0],origin[1],origin[2]);
//---------------------------------------------------------------------------------
// For using OpenGL Shading Language to implement the sampling procedure
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
//-----------------------------------------------------------------------------------------
GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
const char *VshaderString[1],*GshaderString[1], *FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
char str[4096] = "";
//-----------------------------------------------------------------------------------------
// Step 1: Setup the shaders
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"SuperUnionLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error \n%s\n",str); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"SuperUnionLDNIGeometryShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n%s\n",str); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"SuperUnionLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL!\n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// Configuration setting for geometry shader
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating vertex and index array buffer
glGetError(); // for clean-up the error generated before
int meshNum = meshlist->GetCount();
GLuint* vbo = (GLuint*)malloc(meshNum*sizeof(GLuint));
GLuint* vboInd = (GLuint*)malloc(meshNum*sizeof(GLuint));
GLKPOSITION Pos;
int nodeNum,faceNum,i=0,j=0;
float* verTex;
float* tempver;
int* inDex;
int* tempinD;
unsigned int* meshptr;
int* indexCount;
indexCount = (int*)malloc(meshNum*sizeof(int));
printf("Mesh Num : %d \n",meshNum);
verTex = (float*)malloc(sizeof(float));
inDex = (int*)malloc(sizeof(int));
glGenBuffers(meshNum, vbo);
glGenBuffers(meshNum, vboInd);
for(Pos=meshlist->GetHeadPosition();Pos!=NULL;j++) {
QuadTrglMesh *mesh=(QuadTrglMesh *)(meshlist->GetNext(Pos));
nodeNum = mesh->GetNodeNumber();
faceNum = mesh->GetFaceNumber();
printf("node num %d %d\n",nodeNum,faceNum);
tempver = (float*)realloc(verTex,nodeNum*3*sizeof(float));
if (tempver!=NULL)
verTex = tempver;
else
{
free(verTex);
printf("realloc memeory error!!");
return false;
}
tempinD = (int*)realloc(inDex,faceNum*3*sizeof(int));
if (tempinD!=NULL)
inDex = tempinD;
else
{
free(inDex);
printf("realloc memeory error!!");
return false;
}
memset(verTex,0,nodeNum*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
memset(inDex,0,faceNum*3*sizeof(int));
meshptr = mesh->GetFaceTablePtr();
for(i=0; i < faceNum; i++)
{ inDex[3*i] = meshptr[4*i]-1; inDex[3*i+1] = meshptr[4*i+1]-1; inDex[3*i+2] = meshptr[4*i+2]-1;
}
indexCount[j] = faceNum*3;
glBindBuffer(GL_ARRAY_BUFFER, vbo[j]);
glBufferData(GL_ARRAY_BUFFER, nodeNum*3*sizeof(GLfloat), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, nodeNum*3*sizeof(GLfloat), verTex);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB, vboInd[j]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER_ARB, faceNum*3*sizeof(GL_UNSIGNED_INT), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, faceNum*3*sizeof(GL_UNSIGNED_INT), inDex);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: buffer binding!\n\n");
}
free(verTex);
free(inDex);
//-----------------------------------------------------------------------------------------
float centerPos[3];
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
glUseProgramObjectARB(g_programObj);
{
_decomposeLDNIByFBOPBO(solid, vbo, vboInd, meshNum, centerPos, g_programObj,indexCount);
}
glUseProgramObjectARB(0);
//-----------------------------------------------------------------------------------------
// Step 6: free the memory
time=clock();
//-----------------------------------------------------------------------------------------
glDeleteBuffers(meshNum, vboInd);
glDeleteBuffers(meshNum, vbo);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
free(indexCount);
//------------------------------------------------------------------------
printf("\nMemory clean-up time is %ld (ms)\n",clock()-time);
printf("--------------------------------------------------------------\n");
printf("Total time for sampling is %ld (ms)\n\n",clock()-totalTime);
return true;
}
bool LDNIcudaOperation::SuperUnionOperation(LDNIcudaSolid* &solid, GLKObList* meshlist, float boundingBox[],int res)
{
long time=clock(),totalTime=clock();
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
if (!MultiObjectSamplingInOneSolid(solid, meshlist, boundingBox, res)) return false;
if (!_UnionMultiObjects(solid, res)) return false;
return true;
}
bool LDNIcudaOperation::_UnionMultiObjects(LDNIcudaSolid* &inputSolid, int res)
{
unsigned int arrsize=res*res;
float width, gwidth;
float bbox[6];
if (inputSolid->GetSampleNumber()==0) {
printf("No Samples!");
return false;
}
inputSolid->GetBoundingBox(bbox);
width = bbox[1]-bbox[0];
gwidth = inputSolid->GetSampleWidth();
//-----------------------------------------------------------------------------------
// Step 1: Initialization
long time=clock();
unsigned int *devIndexArrayResPtr;
CUDA_SAFE_CALL( hipMalloc( (void**)&devIndexArrayResPtr, (arrsize+1)*sizeof(unsigned int) ) );
//-----------------------------------------------------------------------------------
// Step 2: computing the Boolean operation results on LDNIs
for(short nAxis=0;nAxis<3;nAxis++) {
//---------------------------------------------------------------------------------------------
// Sub-step 1: intialization
CUDA_SAFE_CALL( hipMemset( (void*)devIndexArrayResPtr, 0, (arrsize+1)*sizeof(unsigned int) ) );
//---------------------------------------------------------------------------------------------
float *devNxArrayPtr=inputSolid->GetSampleNxArrayPtr(nAxis);
float *devNyArrayPtr=inputSolid->GetSampleNyArrayPtr(nAxis);
float *devDepthArrayPtr=inputSolid->GetSampleDepthArrayPtr(nAxis); //if (devDepthArrayPtrA==NULL) printf("Empty ");
unsigned int *devIndexArrayPtr=inputSolid->GetIndexArrayPtr(nAxis);
//---------------------------------------------------------------------------------------------
// Sub-step 2: identify the entering and leaving samples ray by ray
hipLaunchKernelGGL(( krLDNIBoolean_IdentifyEnterLeaveOnRays), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr, arrsize);
//---------------------------------------------------------------------------------------------
// Sub-step 3: Sorting the entering and leaving samples ray by ray
hipLaunchKernelGGL(( krLDNISampling_SortSamples), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, arrsize, devIndexArrayPtr);
//---------------------------------------------------------------------------------------------
// Sub-step 4: Super - union samples ray by ray
hipLaunchKernelGGL(( krLDNIBoolean_SuperUnionOnRays), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr,
devIndexArrayResPtr, arrsize);
//---------------------------------------------------------------------------------------------
// Sub-step 5: compaction of index array
thrust::device_ptr<unsigned int> dev_ptr(devIndexArrayResPtr); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int sampleNum=dev_ptr[arrsize];
//printf("max sample ----- %d\n",sampleNum);
//---------------------------------------------------------------------------------------------
// Sub-step 6: collecting the resultant samples into the sampleArray of solidTileA
float *newDevNxArrayPtr, *newDevNyArrayPtr, *newDevDepthArrayPtr;
inputSolid->MallocSampleMemory(nAxis, sampleNum);
newDevNxArrayPtr=inputSolid->GetSampleNxArrayPtr(nAxis);
newDevNyArrayPtr=inputSolid->GetSampleNyArrayPtr(nAxis);
newDevDepthArrayPtr=inputSolid->GetSampleDepthArrayPtr(nAxis);
hipLaunchKernelGGL(( krLDNIBoolean_ResultSampleCollection), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0,
devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr,
newDevNxArrayPtr, newDevNyArrayPtr, newDevDepthArrayPtr, devIndexArrayResPtr, arrsize, width, gwidth);
CUDA_SAFE_CALL( hipMemcpy( devIndexArrayPtr, devIndexArrayResPtr, (arrsize+1)*sizeof(unsigned int), hipMemcpyDeviceToDevice ) );
hipFree(devNxArrayPtr); hipFree(devNyArrayPtr); hipFree(devDepthArrayPtr);
}
//-----------------------------------------------------------------------------------
// Step 3: free the memory
hipFree(devIndexArrayResPtr);
printf("Boolean Operation Time (ms): %ld\n",clock()-time);
return true;
}
void LDNIcudaOperation::_decomposeLDNIByFBOPBO(LDNIcudaSolid *solid, GLuint* vbo, GLuint* vboI, int mesh_count, float Cent[], GLhandleARB g_programObj, int indexCount[])
{
unsigned int n_max,i,n,mesh_ID;
float gWidth,origin[3];
unsigned int overall_n_max=0;
long readbackTime=0, sortingTime=0, tempTime;
GLint id0,id1;
hipEvent_t startClock, stopClock;
CUDA_SAFE_CALL( hipEventCreate( &startClock ) );
CUDA_SAFE_CALL( hipEventCreate( &stopClock ) );
tempTime=clock();
//------------------------------------------------------------------------
// Preparation
int nRes=solid->GetResolution(); gWidth=solid->GetSampleWidth();
float width=gWidth*(float)nRes;
solid->GetOrigin(origin[0],origin[1],origin[2]);
int arrsize=nRes*nRes;
//------------------------------------------------------------------------
// Step 1: Setup the rendering environment
glEnable(GL_DEPTH_TEST);
glEnable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR); glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glGetError(); // for clean-up the error generated before
//------------------------------------------------------------------------
// create the FBO objects and texture for rendering
if (glewIsSupported("GL_EXT_framebuffer_object") == 0) printf("Warning: FBO is not supported!\n");
if (glGetError()!=GL_NO_ERROR) printf("Error: before framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint fbo;
glGenFramebuffersEXT(1, &fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer generation!\n");
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer binding!\n");
//------------------------------------------------------------------------
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F_ARB, nRes, nRes, 0, GL_RGBA, GL_FLOAT, 0);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching texture to framebuffer generation!\n");
cudaGraphicsResource *sampleTex_resource;
CUDA_SAFE_CALL( hipGraphicsGLRegisterImage(&sampleTex_resource, tex, GL_TEXTURE_2D, hipGraphicsMapFlagsReadOnly) );
//------------------------------------------------------------------------
GLuint depth_and_stencil_rb;
glGenRenderbuffersEXT(1, &depth_and_stencil_rb);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_STENCIL_EXT, nRes, nRes);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of depth-buffer to framebuffer generation!\n");
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of stencil-buffer to framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint indexPBO;
glGenBuffers(1,&indexPBO); // generation of PBO for index array readback
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
glBufferData(GL_PIXEL_PACK_BUFFER_ARB, nRes*nRes*sizeof(unsigned char), NULL, GL_STREAM_READ_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( hipGLRegisterBufferObject(indexPBO) );
//------------------------------------------------------------------------
if (glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT)!=GL_FRAMEBUFFER_COMPLETE_EXT)
printf("Warning: the setting for rendering on FBO is not correct!\n");
else
printf("FBO has been created successfully!\n");
glPushAttrib(GL_VIEWPORT_BIT);
glViewport(0,0,nRes,nRes);
printf("Preparation time: %ld (ms)\n",clock()-tempTime);
id0 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id0,Cent[0],Cent[1],Cent[2]);
id1 = glGetUniformLocationARB(g_programObj,"mesh_ID");
//------------------------------------------------------------------------
// Step 2: Rendering to get the Hermite samples
for(short nAxis=0; nAxis<3; nAxis++) {
//---------------------------------------------------------------------------------------
// Rendering step 1: setting the viewing window
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// The eye is located at (0, 0, 0), the near clipping plane is at the z=0 plane
// the far clipping plane is at the z=(boundingBox[5]-boundingBox[4]) plane
glOrtho(-width*0.5f,width*0.5f,-width*0.5f,width*0.5f,width*0.5f,-width*0.5f);
// Note that: in "glOrtho(left,right,bottom,top,near,far);"
// (left,right,bottom,top) are located at the boundary of pixel instead of
// the center of pixels
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// Rendering step 2: determine the number of layers
glClearColor( 1.0f, 1.0f, 1.0f, 1.0f );
glClearDepth(1.0);
glClearStencil(0); glColor3f(1,1,1);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glStencilFunc(GL_GREATER, 1, 0xff);
glStencilOp(GL_INCR, GL_INCR, GL_INCR);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glEnableClientState( GL_VERTEX_ARRAY );
for(mesh_ID = 0; mesh_ID < mesh_count; mesh_ID++)
{
glUniform1iARB(id1,mesh_ID);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo[mesh_ID]);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vboI[mesh_ID]);
glDrawElements(GL_TRIANGLES, indexCount[mesh_ID], GL_UNSIGNED_INT, 0);
}
glDisableClientState( GL_VERTEX_ARRAY );
glFlush();
//--------------------------------------------------------------------------------------------------------
// reading stencil buffer into the device memory of CUDA
tempTime=clock();
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
GLint OldPackAlignment;
glGetIntegerv(GL_PACK_ALIGNMENT,&OldPackAlignment);
glPixelStorei(GL_PACK_ALIGNMENT,1); // Important!!! Without this, the read-back could be abnormal.
glReadPixels(0,0,nRes,nRes,GL_STENCIL_INDEX,GL_UNSIGNED_BYTE,0);
glPixelStorei(GL_PACK_ALIGNMENT,OldPackAlignment);
//--------------------------------------------------------------------------------------------------------
unsigned char *devStencilBufferPtr;
unsigned int *devResArrayPtr;
unsigned int *devIndexArrayPtr=solid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( hipGLMapBufferObject__( (void **)&devStencilBufferPtr, indexPBO) );
CUDA_SAFE_CALL( hipMalloc( (void**)&devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int) ) );
//--------------------------------------------------------------------------------------------------------
// building the indexArray on device
hipLaunchKernelGGL(( krLDNISampling_CopyIndexAndFindMax), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devStencilBufferPtr,
devIndexArrayPtr,devResArrayPtr,arrsize);
//--------------------------------------------------------------------------------------------------------
// read back the max number of layers -- "n_max"
unsigned int* resArrayPtr;
resArrayPtr=(unsigned int *)malloc(BLOCKS_PER_GRID*sizeof(unsigned int));
CUDA_SAFE_CALL( hipMemcpy( resArrayPtr, devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int), hipMemcpyDeviceToHost ) );
n_max=0;
for(i=0;i<BLOCKS_PER_GRID;i++) n_max = MAX(n_max,resArrayPtr[i]);
hipFree(devResArrayPtr); free(resArrayPtr);
//--------------------------------------------------------------------------------------------------------
// read back the number of samples -- "sampleNum"
unsigned int sampleNum=0;
tempTime=clock()-tempTime; //readbackTime+=tempTime;
printf("Stencil buffer processing time: %ld (ms)\n",tempTime);
long scanTime=clock();
// for debug purpose
resArrayPtr=(unsigned int *)malloc((arrsize+1)*sizeof(unsigned int));
CUDA_SAFE_CALL( hipMemcpy( resArrayPtr, devIndexArrayPtr, (arrsize+1)*sizeof(unsigned int), hipMemcpyDeviceToHost ) );
sampleNum=0;
for(int k=0;k<arrsize;k++) {sampleNum+=resArrayPtr[k]; resArrayPtr[k]=sampleNum;}
for(int k=arrsize;k>0;k--) {resArrayPtr[k]=resArrayPtr[k-1];}
resArrayPtr[0]=0;
CUDA_SAFE_CALL( hipMemcpy( devIndexArrayPtr, resArrayPtr, (arrsize+1)*sizeof(unsigned int), hipMemcpyHostToDevice ) );
free(resArrayPtr);
scanTime=clock()-scanTime; printf("Scanning time: %ld (ms)\n",scanTime);
//--------------------------------------------------------------------------------------------------------
CUDA_SAFE_CALL( hipGLUnmapBufferObject( indexPBO ) );
glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
printf("n_max=%d sampleNum=%d\n",n_max,sampleNum);
if (n_max>overall_n_max) overall_n_max=n_max;
if (sampleNum==0) continue;
//---------------------------------------------------------------------------------------
// Rendering step 3: decomposing the Layered Depth Images (LDIs) and record its corresponding normals
solid->MallocSampleMemory(nAxis,sampleNum);
float* devNxArrayPtr=solid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr=solid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr=solid->GetSampleDepthArrayPtr(nAxis);
tempTime=clock();
for(n=1;n<=n_max;n++) {
CUDA_SAFE_CALL( hipGraphicsMapResources( 1, &sampleTex_resource, NULL ) );
hipArray *in_array;
CUDA_SAFE_CALL( hipGraphicsSubResourceGetMappedArray( &in_array, sampleTex_resource, 0, 0));
CUDA_SAFE_CALL( hipBindTextureToArray(tex2DFloat4In, in_array) );
//--------------------------------------------------------------------------------------------------------
// fill the sampleArray on device
hipLaunchKernelGGL(( krLDNISuperUnion_CopySamples), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, n, arrsize, nRes, devIndexArrayPtr);
CUDA_SAFE_CALL( hipGraphicsUnmapResources( 1, &sampleTex_resource, NULL ) );
if (n==n_max) break;
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glStencilFunc(GL_GREATER, n+1, 0xff);
glStencilOp(GL_KEEP, GL_INCR, GL_INCR);
{
glEnableClientState( GL_VERTEX_ARRAY );
for(mesh_ID = 0; mesh_ID < mesh_count; mesh_ID++)
{
glUniform1iARB(id1,mesh_ID);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo[mesh_ID]);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vboI[mesh_ID]);
glDrawElements(GL_TRIANGLES, indexCount[mesh_ID], GL_UNSIGNED_INT, 0);
}
glDisableClientState( GL_VERTEX_ARRAY );
}
glFlush();
}
tempTime=clock()-tempTime; readbackTime+=tempTime;
//------------------------------------------------------------------------
// Rendering step 4: sorting the samples
CUDA_SAFE_CALL( hipEventRecord( startClock, 0 ) );
CUDA_SAFE_CALL( hipEventSynchronize( startClock ) );
hipLaunchKernelGGL(( krLDNISampling_SortSamples), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, arrsize, devIndexArrayPtr);
CUDA_SAFE_CALL( hipEventRecord( stopClock, 0 ) );
CUDA_SAFE_CALL( hipEventSynchronize( stopClock ) );
float elapsedTime;
CUDA_SAFE_CALL( hipEventElapsedTime( &elapsedTime,
startClock, stopClock ) );
printf( "Sorting time is: %3.1f (ms)\n", elapsedTime );
sortingTime+=(long)elapsedTime;
}
//------------------------------------------------------------------------------------
// Step 3: Set the rendering parameters back
//------------------------------------------------------------------------------------
// detach FBO
glPopAttrib();
// release memory for PBO and cuda's map
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( hipGLUnregisterBufferObject( indexPBO ) );
glDeleteBuffers(1, &indexPBO);
CUDA_SAFE_CALL( hipGraphicsUnregisterResource( sampleTex_resource) );
// release memory for the 2D texture
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteTextures(1, &tex);
// release memory for the frame-buffer object
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
glDeleteFramebuffersEXT(1, &fbo);
// release memory for the render-buffer object
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glDeleteRenderbuffersEXT(1, &depth_and_stencil_rb);
//------------------------------------------------------------------------------------
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
// glEnable(GL_POLYGON_SMOOTH);// adding this will make the invalid display on the Thinkpad laptop
glEnable(GL_POINT_SMOOTH);
// glEnable(GL_LINE_SMOOTH); // adding this will make the Compaq laptop's running fail
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
printf("\nn_max=%ld \n",overall_n_max);
printf("Texture Size: %f (MB)\n",(float)((float)overall_n_max*(float)nRes*(float)nRes*7.0f)/(1024.0f*1024.0f));
printf("Readback time: %ld (ms)\nSorting time: %ld (ms)\n",
readbackTime, sortingTime);
CUDA_SAFE_CALL( hipEventDestroy( startClock ) );
CUDA_SAFE_CALL( hipEventDestroy( stopClock ) );
}
bool LDNIcudaOperation::BooleanOperation(LDNIcudaSolid* &inputSolid, QuadTrglMesh *meshB, short nOperationType, float boundingBox[])
{
//float boundingBox[6];
LDNIcudaSolid *solidB;
//-----------------------------------------------------------------------------------
// Step 1: converting the mesh surface into a LDNI solid
int res=inputSolid->GetResolution();
if (nOperationType!=3) {
LDNIcudaOperation::BRepToLDNISampling( meshB, solidB, boundingBox, res );
}
else {
solidB=inputSolid; inputSolid=0;
LDNIcudaOperation::BRepToLDNISampling( meshB, inputSolid, boundingBox, res );
nOperationType=2;
}
//-----------------------------------------------------------------------------------
// Step 2: repair and truncate the sampled LDNI solid into the current working space
//-----------------------------------------------------------------------------------
// Step 3: computing the Boolean operation results on LDNIs
printf("-----------------------------------------------------------------------\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------\n");
_booleanOperation(inputSolid, solidB, nOperationType);
inputSolid->SetBoundingBox(boundingBox);
int nres = inputSolid->GetResolution();
float gWidth=(boundingBox[1]-boundingBox[0])/(float)nres;
inputSolid->SetSampleWidth(gWidth);
//-----------------------------------------------------------------------------------
// Step 4: free the memory
delete solidB;
return true;
}
bool LDNIcudaOperation::BooleanOperation(LDNIcudaSolid* &inputSolid, QuadTrglMesh *meshB, short nOperationType)
{
float boundingBox[6]; LDNIcudaSolid *solidB;
//-----------------------------------------------------------------------------------
// Step 1: converting the mesh surface into a LDNI solid
if ( _bExpandableWorkingSpace ) {
meshB->CompBoundingBox(boundingBox);
_expansionLDNIcudaSolidByNewBoundingBox(inputSolid, boundingBox);
}
int res=inputSolid->GetResolution();
if (nOperationType!=3) {
LDNIcudaOperation::BRepToLDNISampling( meshB, solidB, boundingBox, res );
}
else {
solidB=inputSolid; inputSolid=0;
LDNIcudaOperation::BRepToLDNISampling( meshB, inputSolid, boundingBox, res );
nOperationType=2;
}
//-----------------------------------------------------------------------------------
// Step 2: repair and truncate the sampled LDNI solid into the current working space
if ( !(_bExpandableWorkingSpace) ) {
//repair solidB
}
//-----------------------------------------------------------------------------------
// Step 3: computing the Boolean operation results on LDNIs
printf("-----------------------------------------------------------------------\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------\n");
_booleanOperation(inputSolid, solidB, nOperationType);
inputSolid->SetBoundingBox(boundingBox);
int nres = inputSolid->GetResolution();
float gWidth=(boundingBox[1]-boundingBox[0])/(float)nres;
inputSolid->SetSampleWidth(gWidth);
//-----------------------------------------------------------------------------------
// Step 4: free the memory
delete solidB;
return true;
}
//bool LDNIcudaOperation::BooleanOperation(LDNIcudaSolid* &solidA, LDNIcudaSolid* &solidB, short nOperationType)
//{
// float boundingBox[6],origin[3];
//
//
//
// //solidA->GetBoundingBox(boundingBox);
// //_expansionLDNIcudaSolidByNewBoundingBox(solidB, boundingBox);
//
// //if ( _bExpandableWorkingSpace ) {
// // meshB->CompBoundingBox(boundingBox);
// // _expansionLDNIcudaSolidByNewBoundingBox(inputSolid, boundingBox);
//
// //}
//
// printf("-----------------------------------------------------------------------\n");
// printf("Starting to compute Boolean operation\n");
// printf("-----------------------------------------------------------------------\n");
// _booleanOperation(solidA, solidB, nOperationType);
// solidA->SetBoundingBox(boundingBox);
// int nres = solidA->GetResolution();
// float gWidth=(boundingBox[1]-boundingBox[0])/(float)nres;
// solidA->SetSampleWidth(gWidth);
//
// delete solidB;
//
// return true;
//}
bool LDNIcudaOperation::BooleanOperation(QuadTrglMesh *meshA, QuadTrglMesh *meshB, int res, short nOperationType, LDNIcudaSolid* &outputSolid, LDNIcudaSolid* &savedSolid)
{
float boundingBox[6]; LDNIcudaSolid *solidB; //int stA,numA,stRes,numRes,stB;
//-----------------------------------------------------------------------------------
// Step 1: converting mesh surfaces into LDNIs
float bndBoxA[6],bndBoxB[6];
meshA->CompBoundingBox(bndBoxA); meshB->CompBoundingBox(bndBoxB);
_compBoundingCube(meshA, meshB, boundingBox, res);
if (savedSolid!= NULL)
{
_expansionLDNIcudaSolidByNewBoundingBox(savedSolid, boundingBox);
res = savedSolid->GetResolution();
}
if (nOperationType!=3) {
BRepToLDNISampling(meshA, outputSolid, boundingBox, res);
BRepToLDNISampling(meshB, solidB, boundingBox, res);
}
else {
BRepToLDNISampling(meshB, outputSolid, boundingBox, res);
BRepToLDNISampling(meshA, solidB, boundingBox, res);
nOperationType=2;
}
//-----------------------------------------------------------------------------------
// Step 2: boolean operations
printf("-----------------------------------------------------------------------%d\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------%d\n");
_booleanOperation(outputSolid, solidB, nOperationType);
/*outputSolid->SetBoundingBox(boundingBox);
int nres = outputSolid->GetResolution();
float gWidth=(boundingBox[1]-boundingBox[0])/(float)nres;
outputSolid->SetSampleWidth(gWidth);*/
delete solidB;
return true;
}
bool LDNIcudaOperation::BooleanOperation(QuadTrglMesh *meshA, QuadTrglMesh *meshB, int res, short nOperationType, LDNIcudaSolid* &outputSolid)
{
float boundingBox[6]; LDNIcudaSolid *solidB; //int stA,numA,stRes,numRes,stB;
//-----------------------------------------------------------------------------------
// Step 1: converting mesh surfaces into LDNIs
float bndBoxA[6],bndBoxB[6];
meshA->CompBoundingBox(bndBoxA); meshB->CompBoundingBox(bndBoxB);
_compBoundingCube(meshA, meshB, boundingBox, res);
if (nOperationType!=3) {
BRepToLDNISampling(meshA, outputSolid, boundingBox, res);
BRepToLDNISampling(meshB, solidB, boundingBox, res);
}
else {
BRepToLDNISampling(meshB, outputSolid, boundingBox, res);
BRepToLDNISampling(meshA, solidB, boundingBox, res);
nOperationType=2;
}
//-----------------------------------------------------------------------------------
// Step 2: boolean operations
printf("-----------------------------------------------------------------------\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------\n");
_booleanOperation(outputSolid, solidB, nOperationType);
//outputSolid->SetBoundingBox(boundingBox);
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n");
delete solidB;
return true;
}
bool LDNIcudaOperation::_booleanOperation(LDNIcudaSolid* outputSolid, LDNIcudaSolid* solidB, short nOperationType)
{
int res=outputSolid->GetResolution();
unsigned int arrsize=res*res;
if (outputSolid->GetSampleNumber()==0) {
if (nOperationType==0) _switchSolid(outputSolid,solidB); // Union
if (nOperationType==1) outputSolid->CleanUpSamples(); // Intersection
// Difference
if (nOperationType==3) _switchSolid(outputSolid,solidB); // Inversed Difference
return true;
}
if (solidB->GetSampleNumber()==0) {
// Union
if (nOperationType==1) outputSolid->CleanUpSamples(); // Intersection
// Difference
if (nOperationType==3) outputSolid->CleanUpSamples(); // Inversed Difference
return true;
}
//-----------------------------------------------------------------------------------
// Step 1: Initialization
long time=clock();
unsigned int *devIndexArrayResPtr;
CUDA_SAFE_CALL( hipMalloc( (void**)&devIndexArrayResPtr, (arrsize+1)*sizeof(unsigned int) ) );
//-----------------------------------------------------------------------------------
// Step 2: computing the Boolean operation results on LDNIs
for(short nAxis=0;nAxis<3;nAxis++) {
//---------------------------------------------------------------------------------------------
// Sub-step 1: intialization
CUDA_SAFE_CALL( hipMemset( (void*)devIndexArrayResPtr, 0, (arrsize+1)*sizeof(unsigned int) ) );
//---------------------------------------------------------------------------------------------
float *devNxArrayPtrA=outputSolid->GetSampleNxArrayPtr(nAxis);
float *devNyArrayPtrA=outputSolid->GetSampleNyArrayPtr(nAxis);
float *devDepthArrayPtrA=outputSolid->GetSampleDepthArrayPtr(nAxis); //if (devDepthArrayPtrA==NULL) printf("Empty ");
unsigned int *devIndexArrayPtrA=outputSolid->GetIndexArrayPtr(nAxis);
float *devNxArrayPtrB=solidB->GetSampleNxArrayPtr(nAxis);
float *devNyArrayPtrB=solidB->GetSampleNyArrayPtr(nAxis);
float *devDepthArrayPtrB=solidB->GetSampleDepthArrayPtr(nAxis);
unsigned int *devIndexArrayPtrB=solidB->GetIndexArrayPtr(nAxis);
//---------------------------------------------------------------------------------------------
// Sub-step 2: computing the result of boolean operation ray by ray
hipLaunchKernelGGL(( krLDNIBoolean_BooleanOnRays), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtrA, devNyArrayPtrA, devDepthArrayPtrA, devIndexArrayPtrA,
devNxArrayPtrB, devNyArrayPtrB, devDepthArrayPtrB, devIndexArrayPtrB, devIndexArrayResPtr, arrsize, nOperationType);
//---------------------------------------------------------------------------------------------
// Sub-step 3: compaction of index array
thrust::device_ptr<unsigned int> dev_ptr(devIndexArrayResPtr); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int sampleNum=dev_ptr[arrsize];
//---------------------------------------------------------------------------------------------
// Sub-step 4: collecting the resultant samples into the sampleArray of solidTileA
float *newDevNxArrayPtrA, *newDevNyArrayPtrA, *newDevDepthArrayPtrA;
outputSolid->MallocSampleMemory(nAxis, sampleNum);
newDevNxArrayPtrA=outputSolid->GetSampleNxArrayPtr(nAxis);
newDevNyArrayPtrA=outputSolid->GetSampleNyArrayPtr(nAxis);
newDevDepthArrayPtrA=outputSolid->GetSampleDepthArrayPtr(nAxis);
hipLaunchKernelGGL(( krLDNIBoolean_ResultSampleCollection), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0,
devNxArrayPtrA, devNyArrayPtrA, devDepthArrayPtrA, devIndexArrayPtrA,
devNxArrayPtrB, devNyArrayPtrB, devDepthArrayPtrB, devIndexArrayPtrB,
newDevNxArrayPtrA, newDevNyArrayPtrA, newDevDepthArrayPtrA, devIndexArrayResPtr, arrsize);
CUDA_SAFE_CALL( hipMemcpy( devIndexArrayPtrA, devIndexArrayResPtr, (arrsize+1)*sizeof(unsigned int), hipMemcpyDeviceToDevice ) );
hipFree(devNxArrayPtrA); hipFree(devNyArrayPtrA); hipFree(devDepthArrayPtrA);
}
//-----------------------------------------------------------------------------------
// Step 3: free the memory
hipFree(devIndexArrayResPtr);
printf("Boolean Operation Time (ms): %ld\n",clock()-time);
return true;
}
void LDNIcudaOperation::SolidRegularization(LDNIcudaSolid *solid) // Removing samples that are nearly tangentially contacted
{
int res=solid->GetResolution();
unsigned int arrsize=res*res;
//-----------------------------------------------------------------------------------
// Step 1: Initialization
long time=clock();
unsigned int *devIndexArrayPtrRes;
CUDA_SAFE_CALL( hipMalloc( (void**)&devIndexArrayPtrRes, (arrsize+1)*sizeof(unsigned int) ) );
float ww=solid->GetSampleWidth();
//-----------------------------------------------------------------------------------
// Step 2: Remove the tangentially contacted samples
for(short nAxis=0;nAxis<3;nAxis++) {
//---------------------------------------------------------------------------------------------
// Sub-step 1: intialization
CUDA_SAFE_CALL( hipMemset( (void*)devIndexArrayPtrRes, 0, (arrsize+1)*sizeof(unsigned int) ) );
//---------------------------------------------------------------------------------------------
float *devNxArrayPtr=solid->GetSampleNxArrayPtr(nAxis);
float *devNyArrayPtr=solid->GetSampleNyArrayPtr(nAxis);
float *devDepthArrayPtr=solid->GetSampleDepthArrayPtr(nAxis);
unsigned int *devIndexArrayPtr=solid->GetIndexArrayPtr(nAxis);
//---------------------------------------------------------------------------------------------
// Sub-step 2: computing the result of regularization ray by ray
hipLaunchKernelGGL(( krLDNIRegularization_RegularizationOnRays), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0,
devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr, devIndexArrayPtrRes, arrsize, 0.01*ww);
//---------------------------------------------------------------------------------------------
// Sub-step 3: compaction of index array
thrust::device_ptr<unsigned int> dev_ptr(devIndexArrayPtrRes); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int sampleNum=dev_ptr[arrsize];
//---------------------------------------------------------------------------------------------
// Sub-step 4: collecting the resultant samples into the sampleArray of solidTileA
float *devNxArrayPtrRes, *devNyArrayPtrRes, *devDepthArrayPtrRes;
CUDA_SAFE_CALL( hipMalloc( (void**)&devNxArrayPtrRes, sampleNum*sizeof(float) ) );
CUDA_SAFE_CALL( hipMalloc( (void**)&devNyArrayPtrRes, sampleNum*sizeof(float) ) );
CUDA_SAFE_CALL( hipMalloc( (void**)&devDepthArrayPtrRes, sampleNum*sizeof(float) ) );
hipLaunchKernelGGL(( krLDNIRegularization_ResultSampleCollection), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0,
devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr,
devNxArrayPtrRes, devNyArrayPtrRes, devDepthArrayPtrRes, devIndexArrayPtrRes, arrsize);
solid->SetSampleDepthArrayPtr(nAxis,devDepthArrayPtrRes);
solid->SetSampleNxArrayPtr(nAxis,devNxArrayPtrRes);
solid->SetSampleNyArrayPtr(nAxis,devNyArrayPtrRes);
solid->SetIndexArrayPtr(nAxis,devIndexArrayPtrRes); devIndexArrayPtrRes=devIndexArrayPtr;
solid->SetSampleNumber(nAxis,sampleNum);
hipFree(devNxArrayPtr); hipFree(devNyArrayPtr); hipFree(devDepthArrayPtr);
}
//-----------------------------------------------------------------------------------
// Step 3: Free the memory
hipFree(devIndexArrayPtrRes);
printf("Solid Regularization Time (ms): %ld\n",clock()-time);
}
void LDNIcudaOperation::_compBoundingCube(QuadTrglMesh *meshA, QuadTrglMesh *meshB, float boundingBox[], int res)
{
float bndBoxA[6],bndBoxB[6];
meshA->CompBoundingBox(bndBoxA); meshB->CompBoundingBox(bndBoxB);
boundingBox[0]=MIN(bndBoxA[0],bndBoxB[0]);
boundingBox[1]=MAX(bndBoxA[1],bndBoxB[1]);
boundingBox[2]=MIN(bndBoxA[2],bndBoxB[2]);
boundingBox[3]=MAX(bndBoxA[3],bndBoxB[3]);
boundingBox[4]=MIN(bndBoxA[4],bndBoxB[4]);
boundingBox[5]=MAX(bndBoxA[5],bndBoxB[5]);
//------------------------------------------------------------------------
// making the working space cubic
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
}
bool LDNIcudaOperation::BRepToLDNISampling(QuadTrglMesh *mesh, LDNIcudaSolid* &solid, float boundingBox[], int res)
{
const bool bCube=true;
float origin[3],gWidth; long time=clock(),totalTime=clock();
int i,nodeNum;
char fileadd[256];
//----------------------------------------------------------------------------------------
// Preparation
if ((boundingBox[0]==boundingBox[1]) && (boundingBox[2]==boundingBox[3]) && (boundingBox[4]==boundingBox[5])) {
mesh->CompBoundingBox(boundingBox);
if (bCube) {
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
}
}
//---------------------------------------------------------------------------------
solid=new LDNIcudaSolid;
solid->MallocMemory(res);
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
solid->SetSampleWidth(gWidth);
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
solid->SetOrigin(origin[0],origin[1],origin[2]);
//---------------------------------------------------------------------------------
// For using OpenGL Shading Language to implement the sampling procedure
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
if (glewIsSupported("GL_VERSION_2_0")) {printf("\nReady for OpenGL 2.0\n");} else {printf("OpenGL 2.0 not supported\n"); return false;}
//-----------------------------------------------------------------------------------------
int dispListIndex; GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
GLenum InPrimType=GL_POINTS, OutPrimType=GL_TRIANGLES; int OutVertexNum=3;
GLuint vertexTexture;
const char *VshaderString[1],*GshaderString[1],*FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
char str[4096] = ""; int xF,yF;
//-----------------------------------------------------------------------------------------
// Step 1: Setup the shaders
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"sampleLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"sampleLDNIGeometryShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"sampleLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL!\n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// Configuration setting for geometry shader
glProgramParameteriEXT(g_programObj, GL_GEOMETRY_INPUT_TYPE_EXT, InPrimType);
glProgramParameteriEXT(g_programObj, GL_GEOMETRY_OUTPUT_TYPE_EXT, OutPrimType);
glProgramParameteriEXT(g_programObj, GL_GEOMETRY_VERTICES_OUT_EXT, OutVertexNum);
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating texture for vertex array and binding
long texBindingTime=clock();
glGetError(); // for clean-up the error generated before
nodeNum=mesh->GetNodeNumber(); _texCalProduct(nodeNum,xF,yF);
int temp;
for(temp=1;temp<xF;temp *= 2) {}
xF = temp; //if (xF<64) xF=64;
yF = (int)(nodeNum/xF)+1; if (yF<64) yF=64;
printf("Texture Size: xF=%d yF=%d\n",xF,yF);
float* verTex=(float*)malloc(xF*yF*3*sizeof(float));
memset(verTex,0,xF*yF*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glGenTextures(1, &vertexTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, vertexTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB32F_ARB, xF, yF, 0, GL_RGB, GL_FLOAT, verTex);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, 0);
free(verTex);
if (glGetError()!=GL_NO_ERROR) printf("Error: GL_TEXTURE_RECTANGLE_ARB texture binding!\n\n");
texBindingTime=clock()-texBindingTime;
printf("\nTime for binding texture onto the graphics memory - %ld (ms)\n\n",texBindingTime);
//-----------------------------------------------------------------------------------------
// Step 3: building GL-list for activating the geometry shader
unsigned int ver[4];
int faceNum=mesh->GetFaceNumber();
dispListIndex = glGenLists(1);
glNewList(dispListIndex, GL_COMPILE);
glBegin(GL_POINTS);
for(i=0;i<faceNum;i++) {
mesh->GetFaceNodes(i+1,ver[0],ver[1],ver[2],ver[3]);
glVertex3i(ver[0]-1,ver[1]-1,ver[2]-1);
if (mesh->IsQuadFace(i+1)) {glVertex3i(ver[0]-1,ver[2]-1,ver[3]-1);} // one more triangle
}
glEnd();
glEndList();
//-----------------------------------------------------------------------------------------
// Step 4: using program objects and the texture
GLint id0,id1; float centerPos[3];
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB,vertexTexture);
glUseProgramObjectARB(g_programObj);
id0 = glGetUniformLocationARB(g_programObj,"sizeNx");
glUniform1iARB(id0,xF);
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
id1 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id1,centerPos[0],centerPos[1],centerPos[2]);
if (glGetError()!=GL_NO_ERROR) printf("Error: vertex texture binding!\n\n");
printf("Create shader texture\n");
//-----------------------------------------------------------------------------------------
// Step 5: sampling
printf("GLList ID: %d\n",dispListIndex);
time=clock()-time; printf("GL-List building time (including uploading texture) is %ld (ms)\n",time);
_decomposeLDNIByFBOPBO(solid,dispListIndex);
//-----------------------------------------------------------------------------------------
// Step 6: free the memory
time=clock();
//-----------------------------------------------------------------------------------------
glDeleteLists(dispListIndex, 1);
glBindTexture( GL_TEXTURE_RECTANGLE_ARB, 0);
glDisable(GL_TEXTURE_RECTANGLE_ARB);
glDeleteTextures(1, &vertexTexture);
glUseProgramObjectARB(0);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
//------------------------------------------------------------------------
printf("\nMemory clean-up time is %ld (ms)\n",clock()-time);
printf("--------------------------------------------------------------\n");
printf("Total time for sampling is %ld (ms)\n\n",clock()-totalTime);
return true;
}
void LDNIcudaOperation::_decomposeLDNIByFBOPBO(LDNIcudaSolid *solid, int displayListIndex)
{
unsigned int n_max,i,n;
float gWidth,origin[3];
unsigned int overall_n_max=0;
long readbackTime=0, sortingTime=0, tempTime;
hipEvent_t startClock, stopClock;
CUDA_SAFE_CALL( hipEventCreate( &startClock ) );
CUDA_SAFE_CALL( hipEventCreate( &stopClock ) );
tempTime=clock();
//------------------------------------------------------------------------
// Preparation
int nRes=solid->GetResolution(); gWidth=solid->GetSampleWidth();
float width=gWidth*(float)nRes;
solid->GetOrigin(origin[0],origin[1],origin[2]);
int arrsize=nRes*nRes;
//------------------------------------------------------------------------
// Step 1: Setup the rendering environment
glEnable(GL_DEPTH_TEST);
glEnable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR); glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glGetError(); // for clean-up the error generated before
//------------------------------------------------------------------------
// create the FBO objects and texture for rendering
if (glewIsSupported("GL_EXT_framebuffer_object") == 0) printf("Warning: FBO is not supported!\n");
if (glGetError()!=GL_NO_ERROR) printf("Error: before framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint fbo;
glGenFramebuffersEXT(1, &fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer generation!\n");
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer binding!\n");
//------------------------------------------------------------------------
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F_ARB, nRes, nRes, 0, GL_RGBA, GL_FLOAT, 0);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching texture to framebuffer generation!\n");
cudaGraphicsResource *sampleTex_resource;
CUDA_SAFE_CALL( hipGraphicsGLRegisterImage(&sampleTex_resource, tex, GL_TEXTURE_2D, hipGraphicsMapFlagsReadOnly) );
//------------------------------------------------------------------------
GLuint depth_and_stencil_rb;
glGenRenderbuffersEXT(1, &depth_and_stencil_rb);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_STENCIL_EXT, nRes, nRes);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of depth-buffer to framebuffer generation!\n");
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of stencil-buffer to framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint indexPBO;
glGenBuffers(1,&indexPBO); // generation of PBO for index array readback
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
glBufferData(GL_PIXEL_PACK_BUFFER_ARB, nRes*nRes*sizeof(unsigned char), NULL, GL_STREAM_READ_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( hipGLRegisterBufferObject(indexPBO) );
//------------------------------------------------------------------------
if (glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT)!=GL_FRAMEBUFFER_COMPLETE_EXT)
printf("Warning: the setting for rendering on FBO is not correct!\n");
else
printf("FBO has been created successfully!\n");
glPushAttrib(GL_VIEWPORT_BIT);
glViewport(0,0,nRes,nRes);
printf("Preparation time: %ld (ms)\n",clock()-tempTime);
//------------------------------------------------------------------------
// Step 2: Rendering to get the Hermite samples
for(short nAxis=0; nAxis<3; nAxis++) {
//---------------------------------------------------------------------------------------
// Rendering step 1: setting the viewing window
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// The eye is located at (0, 0, 0), the near clipping plane is at the z=0 plane
// the far clipping plane is at the z=(boundingBox[5]-boundingBox[4]) plane
glOrtho(-width*0.5f,width*0.5f,-width*0.5f,width*0.5f,width*0.5f,-width*0.5f);
// Note that: in "glOrtho(left,right,bottom,top,near,far);"
// (left,right,bottom,top) are located at the boundary of pixel instead of
// the center of pixels
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// Rendering step 2: determine the number of layers
glClearColor( 1.0f, 1.0f, 1.0f, 1.0f );
glClearDepth(1.0);
glClearStencil(0); glColor3f(1,1,1);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glStencilFunc(GL_GREATER, 1, 0xff);
glStencilOp(GL_INCR, GL_INCR, GL_INCR);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glCallList(displayListIndex); glFlush();
//--------------------------------------------------------------------------------------------------------
// reading stencil buffer into the device memory of CUDA
tempTime=clock();
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
GLint OldPackAlignment;
glGetIntegerv(GL_PACK_ALIGNMENT,&OldPackAlignment);
glPixelStorei(GL_PACK_ALIGNMENT,1); // Important!!! Without this, the read-back could be abnormal.
glReadPixels(0,0,nRes,nRes,GL_STENCIL_INDEX,GL_UNSIGNED_BYTE,0);
glPixelStorei(GL_PACK_ALIGNMENT,OldPackAlignment);
//--------------------------------------------------------------------------------------------------------
unsigned char *devStencilBufferPtr;
unsigned int *devResArrayPtr;
unsigned int *devIndexArrayPtr=solid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( hipGLMapBufferObject__( (void **)&devStencilBufferPtr, indexPBO) );
CUDA_SAFE_CALL( hipMalloc( (void**)&devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int) ) );
//--------------------------------------------------------------------------------------------------------
// building the indexArray on device
hipLaunchKernelGGL(( krLDNISampling_CopyIndexAndFindMax), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devStencilBufferPtr,
devIndexArrayPtr,devResArrayPtr,arrsize);
//--------------------------------------------------------------------------------------------------------
// read back the max number of layers -- "n_max"
unsigned int* resArrayPtr;
resArrayPtr=(unsigned int *)malloc(BLOCKS_PER_GRID*sizeof(unsigned int));
CUDA_SAFE_CALL( hipMemcpy( resArrayPtr, devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int), hipMemcpyDeviceToHost ) );
n_max=0;
for(i=0;i<BLOCKS_PER_GRID;i++) n_max = MAX(n_max,resArrayPtr[i]);
hipFree(devResArrayPtr); free(resArrayPtr);
//--------------------------------------------------------------------------------------------------------
// read back the number of samples -- "sampleNum"
unsigned int sampleNum=0;
tempTime=clock()-tempTime; //readbackTime+=tempTime;
printf("Stencil buffer processing time: %ld (ms)\n",tempTime);
long scanTime=clock();
// for debug purpose
resArrayPtr=(unsigned int *)malloc((arrsize+1)*sizeof(unsigned int));
CUDA_SAFE_CALL( hipMemcpy( resArrayPtr, devIndexArrayPtr, (arrsize+1)*sizeof(unsigned int), hipMemcpyDeviceToHost ) );
sampleNum=0;
for(int k=0;k<arrsize;k++) {sampleNum+=resArrayPtr[k]; resArrayPtr[k]=sampleNum;}
for(int k=arrsize;k>0;k--) {resArrayPtr[k]=resArrayPtr[k-1];}
resArrayPtr[0]=0;
CUDA_SAFE_CALL( hipMemcpy( devIndexArrayPtr, resArrayPtr, (arrsize+1)*sizeof(unsigned int), hipMemcpyHostToDevice ) );
free(resArrayPtr);
scanTime=clock()-scanTime; printf("Scanning time: %ld (ms)\n",scanTime);
//--------------------------------------------------------------------------------------------------------
CUDA_SAFE_CALL( hipGLUnmapBufferObject( indexPBO ) );
glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
printf("n_max=%d sampleNum=%d\n",n_max,sampleNum);
if (n_max>overall_n_max) overall_n_max=n_max;
if (sampleNum==0) continue;
//---------------------------------------------------------------------------------------
// Rendering step 3: decomposing the Layered Depth Images (LDIs) and record its corresponding normals
solid->MallocSampleMemory(nAxis,sampleNum);
float* devNxArrayPtr=solid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr=solid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr=solid->GetSampleDepthArrayPtr(nAxis);
tempTime=clock();
for(n=1;n<=n_max;n++) {
CUDA_SAFE_CALL( hipGraphicsMapResources( 1, &sampleTex_resource, NULL ) );
hipArray *in_array;
CUDA_SAFE_CALL( hipGraphicsSubResourceGetMappedArray( &in_array, sampleTex_resource, 0, 0));
CUDA_SAFE_CALL( hipBindTextureToArray(tex2DFloat4In, in_array) );
//--------------------------------------------------------------------------------------------------------
// fill the sampleArray on device
hipLaunchKernelGGL(( krLDNISampling_CopySamples), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, n, arrsize, width, gWidth, nRes, devIndexArrayPtr);
CUDA_SAFE_CALL( hipGraphicsUnmapResources( 1, &sampleTex_resource, NULL ) );
if (n==n_max) break;
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glStencilFunc(GL_GREATER, n+1, 0xff);
glStencilOp(GL_KEEP, GL_INCR, GL_INCR);
glCallList(displayListIndex); glFlush();
}
tempTime=clock()-tempTime; readbackTime+=tempTime;
//------------------------------------------------------------------------
// Rendering step 4: sorting the samples
CUDA_SAFE_CALL( hipEventRecord( startClock, 0 ) );
CUDA_SAFE_CALL( hipEventSynchronize( startClock ) );
hipLaunchKernelGGL(( krLDNISampling_SortSamples), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, arrsize, devIndexArrayPtr);
CUDA_SAFE_CALL( hipEventRecord( stopClock, 0 ) );
CUDA_SAFE_CALL( hipEventSynchronize( stopClock ) );
float elapsedTime;
CUDA_SAFE_CALL( hipEventElapsedTime( &elapsedTime,
startClock, stopClock ) );
// printf( "Sorting time is: %3.1f (ms)\n", elapsedTime );
sortingTime+=(long)elapsedTime;
}
//------------------------------------------------------------------------------------
// Step 3: Set the rendering parameters back
//------------------------------------------------------------------------------------
// detach FBO
glPopAttrib();
// release memory for PBO and cuda's map
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( hipGLUnregisterBufferObject( indexPBO ) );
glDeleteBuffers(1, &indexPBO);
CUDA_SAFE_CALL( hipGraphicsUnregisterResource( sampleTex_resource) );
// release memory for the 2D texture
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteTextures(1, &tex);
// release memory for the frame-buffer object
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
glDeleteFramebuffersEXT(1, &fbo);
// release memory for the render-buffer object
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glDeleteRenderbuffersEXT(1, &depth_and_stencil_rb);
//------------------------------------------------------------------------------------
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
// glEnable(GL_POLYGON_SMOOTH);// adding this will make the invalid display on the Thinkpad laptop
glEnable(GL_POINT_SMOOTH);
// glEnable(GL_LINE_SMOOTH); // adding this will make the Compaq laptop's running fail
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
printf("\nn_max=%ld \n",overall_n_max);
printf("Texture Size: %f (MB)\n",(float)((float)overall_n_max*(float)nRes*(float)nRes*7.0f)/(1024.0f*1024.0f));
printf("Readback time: %ld (ms)\nSorting time: %ld (ms)\n",
readbackTime, sortingTime);
CUDA_SAFE_CALL( hipEventDestroy( startClock ) );
CUDA_SAFE_CALL( hipEventDestroy( stopClock ) );
}
unsigned char* LDNIcudaOperation::_readShaderFile( const char *fileName )
{
FILE *file = fopen( fileName, "r" );
if ( file == NULL ) {
printf("Cannot open shader file!");
return 0;
}
struct _stat fileStats;
if ( _stat( fileName, &fileStats ) != 0 ) {
printf("Cannot get file stats for shader file!");
return 0;
}
unsigned char *buffer = new unsigned char[fileStats.st_size];
int bytes = (int)(fread( buffer,1, fileStats.st_size, file ));
buffer[bytes] = 0;
fclose( file );
return buffer;
}
void LDNIcudaOperation::_texCalProduct(int in, int &outx, int &outy)
{
int left=0,right=0,div3left=0,div3right=0;
left = int(floor(sqrt((float)in)))-1;
right = int(ceil(sqrt((float)in)));
while(left*right < in) {right++;}
if (left%3 == 0 && left*right>=in) {
div3left = left;
div3right = right;
}
else if (right%3 == 0 && left*right>=in) {
div3left = right;
div3right = left;
}
right++; left--;
if (left%3 == 0 && left*right>=in) {
div3left = left;
div3right = right;
}
else if (right%3 == 0 && left*right>=in){
div3left = right;
div3right = left;
}
while(left*right > in){
right++; left--;
if (left%3 == 0 && left*right>in){
div3left = left;
div3right = right;
}
else if (right%3 == 0 && left*right>in){
div3left = right;
div3right = left;
}
}
if (right*left < in){
right--; left++;
if (left%3 == 0 ){
div3left = left;
div3right = right;
}
else if (right%3 == 0){
div3left = right;
div3right = left;
}
}
outx=div3left; outy=div3right;
if (outx==0 || outy==0) {outx=in; outy=1;}
}
//--------------------------------------------------------------------------------------------
void LDNIcudaOperation::OrientedNormalReconstruction(LDNIcudaSolid *solid, unsigned int nSupportSize, bool bWithOrientationVoting)
{
unsigned int *indexArray[3]; float *depthArray[3],*nxArray[3],*nyArray[3];
int res; short nAxis;
float ww,origin[3];
float *buffer; int sampleNum,xNum,yNum,zNum;
//---------------------------------------------------------------------------------------------------------
// preparation
res=solid->GetResolution(); ww=solid->GetSampleWidth();
solid->GetOrigin(origin[0],origin[1],origin[2]);
for(nAxis=0;nAxis<3;nAxis++) {
nxArray[nAxis]=solid->GetSampleNxArrayPtr(nAxis);
nyArray[nAxis]=solid->GetSampleNyArrayPtr(nAxis);
depthArray[nAxis]=solid->GetSampleDepthArrayPtr(nAxis);
indexArray[nAxis]=solid->GetIndexArrayPtr(nAxis);
}
xNum=solid->GetSampleNumber(0); yNum=solid->GetSampleNumber(1); zNum=solid->GetSampleNumber(2);
sampleNum=MAX3(xNum,yNum,zNum);
CUDA_SAFE_CALL( hipMalloc( (void**)&(buffer), sampleNum*3*sizeof(float) ) );
//--------------------------------------------------------------------------------------------------------------------------
// Phase 1: estimation of the oriented normal vectors
for(nAxis=0;nAxis<3;nAxis++) {
//----------------------------------------------------------------------------------------------------------------------
// Preprocessing
hipLaunchKernelGGL(( krLDNINormalProcessing_PreProc), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, indexArray[nAxis],buffer,res,res*res);
//----------------------------------------------------------------------------------------------------------------------
// The following kernel is sample-based normal reconstruction
hipLaunchKernelGGL(( krLDNINormalReconstruction_PerSample), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, indexArray[0], indexArray[1], indexArray[2],
nxArray[0], nxArray[1], nxArray[2], nyArray[0], nyArray[1], nyArray[2],
depthArray[0], depthArray[1], depthArray[2], buffer,
solid->GetSampleNumber(nAxis), nAxis, res, ww, nSupportSize);
//----------------------------------------------------------------------------------------------------------------------
// Updating the result of computation
int sNum=solid->GetSampleNumber(nAxis);
hipLaunchKernelGGL(( krLDNINormalProcessing_Update), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0,
sNum, nxArray[nAxis], nyArray[nAxis], depthArray[nAxis], buffer);
}
//--------------------------------------------------------------------------------------------------------------------------
// Phase 2: voting based correction of normal vectors' orientation
if (bWithOrientationVoting)
for(nAxis=0;nAxis<3;nAxis++) {
//----------------------------------------------------------------------------------------------------------------------
// Preprocessing
hipLaunchKernelGGL(( krLDNINormalProcessing_PreProc), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, indexArray[nAxis],buffer,res,res*res);
//----------------------------------------------------------------------------------------------------------------------
// The following kernel is voting-based orientation correction for normal vectors
hipLaunchKernelGGL(( krLDNINormalProcessing_OrientationCorrectionByVoting), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, indexArray[0], indexArray[1], indexArray[2],
nxArray[0], nxArray[1], nxArray[2], nyArray[0], nyArray[1], nyArray[2],
depthArray[0], depthArray[1], depthArray[2], buffer,
solid->GetSampleNumber(nAxis), nAxis, res, ww, nSupportSize);
}
//-----------------------------------------------------------------------------------------
// release the memory
hipFree(buffer);
}
void LDNIcudaOperation::ParallelProcessingNormalVector(LDNIcudaSolid *solid, unsigned int nSupportSize, float normalPara)
{
// hipEvent_t startClock, stopClock;
// float elapsedTime;
// CUDA_SAFE_CALL( hipEventCreate( &startClock ) );
// CUDA_SAFE_CALL( hipEventCreate( &stopClock ) );
unsigned int *indexArray[3]; float *depthArray[3],*nxArray[3],*nyArray[3];
int res; short nAxis;
float ww,origin[3];
float *buffer; int sampleNum,xNum,yNum,zNum;
//---------------------------------------------------------------------------------------------------------
// preparation
res=solid->GetResolution(); ww=solid->GetSampleWidth();
solid->GetOrigin(origin[0],origin[1],origin[2]);
for(nAxis=0;nAxis<3;nAxis++) {
nxArray[nAxis]=solid->GetSampleNxArrayPtr(nAxis);
nyArray[nAxis]=solid->GetSampleNyArrayPtr(nAxis);
depthArray[nAxis]=solid->GetSampleDepthArrayPtr(nAxis);
indexArray[nAxis]=solid->GetIndexArrayPtr(nAxis);
}
xNum=solid->GetSampleNumber(0); yNum=solid->GetSampleNumber(1); zNum=solid->GetSampleNumber(2);
sampleNum=MAX3(xNum,yNum,zNum);
CUDA_SAFE_CALL( hipMalloc( (void**)&(buffer), sampleNum*3*sizeof(float) ) );
for(nAxis=0;nAxis<3;nAxis++)
{ //nAxis=0;
// CUDA_SAFE_CALL( hipMemset( (void*)buffer, 0, sampleNum*3*sizeof(float) ) );
// CUDA_SAFE_CALL( hipEventRecord( startClock, 0 ) );
// CUDA_SAFE_CALL( hipEventSynchronize( startClock ) );
hipLaunchKernelGGL(( krLDNINormalProcessing_PreProc), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, indexArray[nAxis],buffer,res,res*res);
// CUDA_SAFE_CALL( hipEventRecord( stopClock, 0 ) );
// CUDA_SAFE_CALL( hipEventSynchronize( stopClock ) ); // This confirms the kernel's running has completed
// CUDA_SAFE_CALL( hipEventElapsedTime( &elapsedTime, startClock, stopClock ) );
// printf("%d-direction pre-processing time: %3.1f (ms)\n",(int)nAxis,elapsedTime);
// CUDA_SAFE_CALL( hipEventRecord( startClock, 0 ) );
// CUDA_SAFE_CALL( hipEventSynchronize( startClock ) );
//----------------------------------------------------------------------------------------------------------------------
// The following kernel is ray-based filtering, which is too slow to process
/* krLDNIBilateralNormalFilter_PerRay<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
indexArray[0], indexArray[1], indexArray[2],
nxArray[0], nxArray[1], nxArray[2],
nyArray[0], nyArray[1], nyArray[2],
depthArray[0], depthArray[1], depthArray[2], buffer,
res*res, nAxis, res, ww, origin[0], origin[1], origin[2], nSupportSize, normalPara);*/
//----------------------------------------------------------------------------------------------------------------------
// The following kernel is sample-based filtering
hipLaunchKernelGGL(( krLDNIBilateralNormalFilter_PerSample), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0,
indexArray[0], indexArray[1], indexArray[2],
nxArray[0], nxArray[1], nxArray[2],
nyArray[0], nyArray[1], nyArray[2],
depthArray[0], depthArray[1], depthArray[2], buffer,
solid->GetSampleNumber(nAxis), nAxis, res, ww, nSupportSize, normalPara);
// CUDA_SAFE_CALL( hipEventRecord( stopClock, 0 ) );
// CUDA_SAFE_CALL( hipEventSynchronize( stopClock ) ); // This confirms the kernel's running has completed
// CUDA_SAFE_CALL( hipEventElapsedTime( &elapsedTime, startClock, stopClock ) );
// printf("%d-direction processing time: %3.1f (ms)\n",(int)nAxis,elapsedTime);
int sNum=solid->GetSampleNumber(nAxis);
// CUDA_SAFE_CALL( hipEventRecord( startClock, 0 ) );
// CUDA_SAFE_CALL( hipEventSynchronize( startClock ) );
hipLaunchKernelGGL(( krLDNINormalProcessing_Update), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0,
sNum, nxArray[nAxis], nyArray[nAxis], depthArray[nAxis], buffer);
// CUDA_SAFE_CALL( hipEventRecord( stopClock, 0 ) );
// CUDA_SAFE_CALL( hipEventSynchronize( stopClock ) );
// CUDA_SAFE_CALL( hipEventElapsedTime( &elapsedTime, startClock, stopClock ) );
// printf("Buffer updating time: %3.1f (ms)\n",elapsedTime);
}
//-----------------------------------------------------------------------------------------
// release the memory
hipFree(buffer);
// CUDA_SAFE_CALL( hipEventDestroy( startClock ) );
// CUDA_SAFE_CALL( hipEventDestroy( stopClock ) );
}
//--------------------------------------------------------------------------------------------
void LDNIcudaOperation::CopyCPUSolidToCUDASolid(LDNIcpuSolid *cpuSolid, LDNIcudaSolid* &cudaSolid)
{
float ox,oy,oz,gWidth; int i,num,res; short nAxis;
LDNIcpuRay *rays; LDNIcpuSample *sampleArray;
cpuSolid->GetOrigin(ox,oy,oz);
gWidth=cpuSolid->GetSampleWidth();
res=cpuSolid->GetResolution();
cudaSolid=new LDNIcudaSolid;
cudaSolid->SetOrigin(ox,oy,oz); cudaSolid->SetSampleWidth(gWidth);
cudaSolid->MallocMemory(res);
//-----------------------------------------------------------------------------------------
// copy the index arrays
unsigned int *dev_indexArray,*indexArray;
num=res*res;
indexArray=(unsigned int *)malloc((num+1)*sizeof(unsigned int));
for(nAxis=0;nAxis<3;nAxis++) {
rays=cpuSolid->GetRayArrayPtr(nAxis);
indexArray[0]=0;
for(i=0;i<num;i++) indexArray[i+1]=rays[i].sampleIndex;
dev_indexArray=cudaSolid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( hipMemcpy( dev_indexArray, indexArray, (num+1)*sizeof(unsigned int), hipMemcpyHostToDevice ) );
}
free(indexArray);
//-----------------------------------------------------------------------------------------
// copy the sample arrays
for(nAxis=0;nAxis<3;nAxis++) {
rays=cpuSolid->GetRayArrayPtr(nAxis);
int sampleNum=rays[res*res-1].sampleIndex;
float *sampleNxArray,*sampleNyArray,*sampleDepthArray;
sampleNxArray=(float*)malloc(sampleNum*sizeof(float));
sampleNyArray=(float*)malloc(sampleNum*sizeof(float));
sampleDepthArray=(float*)malloc(sampleNum*sizeof(float));
sampleArray=cpuSolid->GetSampleArrayPtr(nAxis);
for(i=0;i<sampleNum;i++) {
sampleNxArray[i]=sampleArray[i].nx;
sampleNyArray[i]=sampleArray[i].ny;
if (sampleArray[i].nz<0)
sampleDepthArray[i]=-sampleArray[i].depth;
else
sampleDepthArray[i]=sampleArray[i].depth;
}
cudaSolid->MallocSampleMemory(nAxis,sampleNum);
float *dev_sampleNxArray=cudaSolid->GetSampleNxArrayPtr(nAxis);
float *dev_sampleNyArray=cudaSolid->GetSampleNyArrayPtr(nAxis);
float *dev_sampleDepthArray=cudaSolid->GetSampleDepthArrayPtr(nAxis);
CUDA_SAFE_CALL( hipMemcpy( dev_sampleNxArray, sampleNxArray, sampleNum*sizeof(float), hipMemcpyHostToDevice ) );
CUDA_SAFE_CALL( hipMemcpy( dev_sampleNyArray, sampleNyArray, sampleNum*sizeof(float), hipMemcpyHostToDevice ) );
CUDA_SAFE_CALL( hipMemcpy( dev_sampleDepthArray, sampleDepthArray, sampleNum*sizeof(float), hipMemcpyHostToDevice ) );
free(sampleNxArray); free(sampleNyArray); free(sampleDepthArray);
}
}
void LDNIcudaOperation::CopyCUDASolidToCPUSolid(LDNIcudaSolid *cudaSolid, LDNIcpuSolid* &cpuSolid)
{
float ox,oy,oz,gWidth; int i,num,res; short nAxis;
LDNIcpuRay *rays; LDNIcpuSample *sampleArray;
cudaSolid->GetOrigin(ox,oy,oz); gWidth=cudaSolid->GetSampleWidth();
res=cudaSolid->GetResolution();
cpuSolid=new LDNIcpuSolid; cpuSolid->SetOrigin(ox,oy,oz);
cpuSolid->SetSampleWidth(gWidth); cpuSolid->MallocMemory(res);
//-----------------------------------------------------------------------------------------
// copy the index arrays
unsigned int *dev_indexArray,*indexArray;
num=res*res;
indexArray=(unsigned int *)malloc((num+1)*sizeof(unsigned int));
for(nAxis=0;nAxis<3;nAxis++) {
rays=cpuSolid->GetRayArrayPtr(nAxis);
dev_indexArray=cudaSolid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( hipMemcpy( indexArray, dev_indexArray, (num+1)*sizeof(unsigned int), hipMemcpyDeviceToHost ) );
for(i=0;i<num;i++) rays[i].sampleIndex=indexArray[i+1];
}
free(indexArray);
//-----------------------------------------------------------------------------------------
// copy the sample arrays
for(nAxis=0;nAxis<3;nAxis++) {
rays=cpuSolid->GetRayArrayPtr(nAxis);
int sampleNum=rays[res*res-1].sampleIndex;
float *sampleNxArray,*sampleNyArray,*sampleDepthArray;
sampleNxArray=(float*)malloc(sampleNum*sizeof(float));
sampleNyArray=(float*)malloc(sampleNum*sizeof(float));
sampleDepthArray=(float*)malloc(sampleNum*sizeof(float));
float *dev_sampleNxArray=cudaSolid->GetSampleNxArrayPtr(nAxis);
float *dev_sampleNyArray=cudaSolid->GetSampleNyArrayPtr(nAxis);
float *dev_sampleDepthArray=cudaSolid->GetSampleDepthArrayPtr(nAxis);
CUDA_SAFE_CALL( hipMemcpy( sampleNxArray, dev_sampleNxArray, sampleNum*sizeof(float), hipMemcpyDeviceToHost ) );
CUDA_SAFE_CALL( hipMemcpy( sampleNyArray, dev_sampleNyArray, sampleNum*sizeof(float), hipMemcpyDeviceToHost ) );
CUDA_SAFE_CALL( hipMemcpy( sampleDepthArray, dev_sampleDepthArray, sampleNum*sizeof(float), hipMemcpyDeviceToHost ) );
cpuSolid->MallocSampleMemory(nAxis,sampleNum);
sampleArray=cpuSolid->GetSampleArrayPtr(nAxis);
for(i=0;i<sampleNum;i++) {
sampleArray[i].nx=sampleNxArray[i];
sampleArray[i].ny=sampleNyArray[i];
double dd=1.0-sampleArray[i].nx*sampleArray[i].nx-sampleArray[i].ny*sampleArray[i].ny;
if (dd<0.0) dd=0.0; if (dd>1.0) dd=1.0;
if (sampleDepthArray[i]<0) sampleArray[i].nz=-sqrt(dd); else sampleArray[i].nz=sqrt(dd);
sampleArray[i].depth=fabs(sampleDepthArray[i]);
}
free(sampleNxArray); free(sampleNyArray); free(sampleDepthArray);
}
}
void LDNIcudaOperation::_switchSolid(LDNIcudaSolid* solidA, LDNIcudaSolid* solidB)
{
unsigned int *dev_indexArrayA[3];
float *dev_sampleNxArrayA[3];
float *dev_sampleNyArrayA[3];
float *dev_sampleDepthArrayA[3];
float originA[3],sampleWidthA;
int res,xSampleNum,ySampleNum,zSampleNum;
float originB[3];
dev_indexArrayA[0]=solidA->GetIndexArrayPtr(0); dev_indexArrayA[1]=solidA->GetIndexArrayPtr(1); dev_indexArrayA[2]=solidA->GetIndexArrayPtr(2);
dev_sampleNxArrayA[0]=solidA->GetSampleNxArrayPtr(0); dev_sampleNxArrayA[1]=solidA->GetSampleNxArrayPtr(1); dev_sampleNxArrayA[2]=solidA->GetSampleNxArrayPtr(2);
dev_sampleNyArrayA[0]=solidA->GetSampleNyArrayPtr(0); dev_sampleNyArrayA[1]=solidA->GetSampleNyArrayPtr(1); dev_sampleNyArrayA[2]=solidA->GetSampleNyArrayPtr(2);
dev_sampleDepthArrayA[0]=solidA->GetSampleDepthArrayPtr(0); dev_sampleDepthArrayA[1]=solidA->GetSampleDepthArrayPtr(1); dev_sampleDepthArrayA[2]=solidA->GetSampleDepthArrayPtr(2);
solidA->GetOrigin(originA[0],originA[1],originA[2]); sampleWidthA=solidA->GetSampleWidth(); res=solidA->GetResolution();
xSampleNum=solidA->GetSampleNumber(0); ySampleNum=solidA->GetSampleNumber(1); zSampleNum=solidA->GetSampleNumber(2);
solidA->SetIndexArrayPtr(0,solidB->GetIndexArrayPtr(0)); solidA->SetIndexArrayPtr(1,solidB->GetIndexArrayPtr(1)); solidA->SetIndexArrayPtr(2,solidB->GetIndexArrayPtr(2));
solidA->SetSampleNxArrayPtr(0,solidB->GetSampleNxArrayPtr(0)); solidA->SetSampleNxArrayPtr(1,solidB->GetSampleNxArrayPtr(1)); solidA->SetSampleNxArrayPtr(2,solidB->GetSampleNxArrayPtr(2));
solidA->SetSampleNyArrayPtr(0,solidB->GetSampleNyArrayPtr(0)); solidA->SetSampleNyArrayPtr(1,solidB->GetSampleNyArrayPtr(1)); solidA->SetSampleNyArrayPtr(2,solidB->GetSampleNyArrayPtr(2));
solidA->SetSampleDepthArrayPtr(0,solidB->GetSampleDepthArrayPtr(0)); solidA->SetSampleDepthArrayPtr(1,solidB->GetSampleDepthArrayPtr(1)); solidA->SetSampleDepthArrayPtr(2,solidB->GetSampleDepthArrayPtr(2));
solidB->GetOrigin(originB[0],originB[1],originB[2]); solidA->SetOrigin(originB[0],originB[1],originB[2]);
solidA->SetSampleWidth(solidB->GetSampleWidth()); solidA->SetResolution(solidB->GetResolution());
solidA->SetSampleNumber(0,solidB->GetSampleNumber(0)); solidA->SetSampleNumber(1,solidB->GetSampleNumber(1)); solidA->SetSampleNumber(2,solidB->GetSampleNumber(2));
solidB->SetIndexArrayPtr(0,dev_indexArrayA[0]); solidB->SetIndexArrayPtr(1,dev_indexArrayA[1]); solidB->SetIndexArrayPtr(2,dev_indexArrayA[2]);
solidB->SetSampleNxArrayPtr(0,dev_sampleNxArrayA[0]); solidB->SetSampleNxArrayPtr(1,dev_sampleNxArrayA[1]); solidB->SetSampleNxArrayPtr(2,dev_sampleNxArrayA[2]);
solidB->SetSampleNyArrayPtr(0,dev_sampleNyArrayA[0]); solidB->SetSampleNyArrayPtr(1,dev_sampleNyArrayA[1]); solidB->SetSampleNyArrayPtr(2,dev_sampleNyArrayA[2]);
solidB->SetSampleDepthArrayPtr(0,dev_sampleDepthArrayA[0]); solidB->SetSampleDepthArrayPtr(1,dev_sampleDepthArrayA[1]); solidB->SetSampleDepthArrayPtr(2,dev_sampleDepthArrayA[2]);
solidB->SetOrigin(originA[0],originA[1],originA[2]); solidB->SetSampleWidth(sampleWidthA); solidB->SetResolution(res);
solidB->SetSampleNumber(0,xSampleNum); solidB->SetSampleNumber(1,ySampleNum); solidB->SetSampleNumber(2,zSampleNum);
}
void LDNIcudaOperation::_expansionLDNIcudaSolidByNewBoundingBox(LDNIcudaSolid *cudaSolid, float boundingBox[])
{
unsigned int sd[3],ed[3],total; float wx,wy,wz,origin[3],gWidth;
unsigned int *dev_indexArray;
float *dev_sampleDepthArray;
long time=clock();
cudaSolid->GetOrigin(origin[0],origin[1],origin[2]);
gWidth=cudaSolid->GetSampleWidth();
int res=cudaSolid->GetResolution();
origin[0]=origin[0]-gWidth*0.5f;
origin[1]=origin[1]-gWidth*0.5f;
origin[2]=origin[2]-gWidth*0.5f;
//------------------------------------------------------------------------------
// Step 1: determine the number of expansion
boundingBox[0]=boundingBox[0]-gWidth*2.0f;
boundingBox[2]=boundingBox[2]-gWidth*2.0f;
boundingBox[4]=boundingBox[4]-gWidth*2.0f;
boundingBox[1]=boundingBox[1]+gWidth*2.0f;
boundingBox[3]=boundingBox[3]+gWidth*2.0f;
boundingBox[5]=boundingBox[5]+gWidth*2.0f;
//------------------------------------------------------------------------------
sd[0]=sd[1]=sd[2]=0;
if (boundingBox[0]<origin[0]) sd[0]=(unsigned int)((origin[0]-boundingBox[0])/gWidth)+1;
if (boundingBox[2]<origin[1]) sd[1]=(unsigned int)((origin[1]-boundingBox[2])/gWidth)+1;
if (boundingBox[4]<origin[2]) sd[2]=(unsigned int)((origin[2]-boundingBox[4])/gWidth)+1;
//------------------------------------------------------------------------------
wx=origin[0]+gWidth*(float)(res);
wy=origin[1]+gWidth*(float)(res);
wz=origin[2]+gWidth*(float)(res);
ed[0]=ed[1]=ed[2]=0;
if (boundingBox[1]>wx) ed[0]=(int)((boundingBox[1]-wx)/gWidth+0.5);
if (boundingBox[3]>wy) ed[1]=(int)((boundingBox[3]-wy)/gWidth+0.5);
if (boundingBox[5]>wz) ed[2]=(int)((boundingBox[5]-wz)/gWidth+0.5);
//------------------------------------------------------------------------------
total=sd[0]+ed[0];
if ((sd[1]+ed[1])>total) total=sd[1]+ed[1];
if ((sd[2]+ed[2])>total) total=sd[2]+ed[2];
ed[0]=total-sd[0]; ed[1]=total-sd[1]; ed[2]=total-sd[2];
//------------------------------------------------------------------------------
// Step 2: create new index Arrays of LDNISolidNode
unsigned int newArrsize;
newArrsize=(unsigned int)(res+total)*(res+total);
unsigned int *tempIndexArray;
CUDA_SAFE_CALL( hipMalloc( (void**)&tempIndexArray, (newArrsize+1)*sizeof(unsigned int) ) );
for(short nAxis=0; nAxis<3; nAxis++) {
dev_indexArray=cudaSolid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( hipMemset( (void*)tempIndexArray, 0, (newArrsize+1)*sizeof(unsigned int) ) );
//------------------------------------------------------------------
// fill the temporary index array by number of samples on each ray
hipLaunchKernelGGL(( krLDNIcudaSolid_fillNewIndexBySampleNumber), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0,
tempIndexArray, dev_indexArray, res, res+total, sd[(nAxis+1)%3], sd[(nAxis+2)%3]);
//------------------------------------------------------------------
// scan the index array
thrust::device_ptr<unsigned int> dev_ptr(tempIndexArray); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(newArrsize+1), dev_ptr); // in-place scan
//------------------------------------------------------------------
// update the temporary index array
hipFree(dev_indexArray);
CUDA_SAFE_CALL( hipMalloc( (void**)&(dev_indexArray), (newArrsize+1)*sizeof(unsigned int) ) );
cudaSolid->SetIndexArrayPtr(nAxis,dev_indexArray);
CUDA_SAFE_CALL( hipMemcpy( dev_indexArray, tempIndexArray, (newArrsize+1)*sizeof(unsigned int), hipMemcpyDeviceToDevice ) );
}
hipFree(tempIndexArray);
//------------------------------------------------------------------------------
// Step 3: update the depth-values of samples when necessary
origin[0]=origin[0]-gWidth*(float)(sd[0])+gWidth*0.5;
origin[1]=origin[1]-gWidth*(float)(sd[1])+gWidth*0.5;
origin[2]=origin[2]-gWidth*(float)(sd[2])+gWidth*0.5;
cudaSolid->SetOrigin(origin[0],origin[1],origin[2]);
res+=total; cudaSolid->SetResolution(res);
for(short nAxis=0; nAxis<3; nAxis++) {
if (sd[nAxis]==0) continue;
float updateDepth=gWidth*(float)sd[nAxis];
dev_indexArray=cudaSolid->GetIndexArrayPtr(nAxis);
dev_sampleDepthArray=cudaSolid->GetSampleDepthArrayPtr(nAxis);
unsigned int sampleNum;
CUDA_SAFE_CALL( hipMemcpy( &sampleNum, &(dev_indexArray[newArrsize]), sizeof(unsigned int), hipMemcpyDeviceToHost ) );
hipLaunchKernelGGL(( krLDNIcudaSolid_depthSampleAdd), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, dev_sampleDepthArray, updateDepth, sampleNum);
}
//------------------------------------------------------------------------------
// Step 4: update the boundingBox[] for the sampling of mesh surface bounded by it
boundingBox[0]=origin[0]-gWidth*0.5;
boundingBox[2]=origin[1]-gWidth*0.5;
boundingBox[4]=origin[2]-gWidth*0.5;
boundingBox[1]=boundingBox[0]+gWidth*((float)res);
boundingBox[3]=boundingBox[2]+gWidth*((float)res);
boundingBox[5]=boundingBox[4]+gWidth*((float)res);
printf("-----------------------------------------------------------------------\n");
printf("Expanding the working space of existing cuda solid takes: %ld (ms)\n",clock()-time);
printf("The resolution is extended from %d to %d\n",res-total,res);
printf("-----------------------------------------------------------------------\n");
}
//--------------------------------------------------------------------------------------------
bool initGLInteroperabilityOnCUDA(int major, int minor) {
hipDeviceProp_t prop;
int dev;
memset( &prop, 0, sizeof( hipDeviceProp_t ) );
prop.major = major;
prop.minor = minor;
CUDA_SAFE_CALL( hipChooseDevice( &dev, &prop ) );
// tell CUDA which dev we will be using for graphic interop
// from the programming guide: Interoperability with OpenGL
// requires that the CUDA device be specified by
// hipGLSetGLDevice() before any other runtime calls.
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major < 2)
{
return false;
}
else
{
printf("Current device support compute capability 2.0 \n");
}
CUDA_SAFE_CALL( hipGLSetGLDevice( dev ) );
return true;
}
//--------------------------------------------------------------------------------------------
void LDNIcudaOperation::GetCudaDeviceProperty()
{
hipDeviceProp_t prop;
int count;
CUDA_SAFE_CALL( hipGetDeviceCount( &count ) );
for (int i=0; i< count; i++) {
CUDA_SAFE_CALL( hipGetDeviceProperties( &prop, i ) );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
//--------------------------------------------------------------------------------------------------------
// adaptive slicing related
bool LDNIcudaOperation::AdaptiveSlicing_CalculateLayerArea(LDNIcudaSolid* &cudaSolid, float layerAreaArray[]) {
int nAxis = 0; // meaning the x axis
float* devNxArrayPtr = cudaSolid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr = cudaSolid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr = cudaSolid->GetSampleDepthArrayPtr(nAxis);
unsigned int* devIndexArrayPtr = cudaSolid->GetIndexArrayPtr(nAxis);
int res = cudaSolid->GetResolution();
int numRay = res*res;
float* devRayLengthArrayPtr;
CUDA_SAFE_CALL(hipMalloc((void**)&(devRayLengthArrayPtr), (numRay) * sizeof(float)));
float* devLayerAreaArrayPtr;
CUDA_SAFE_CALL(hipMalloc((void**)&(devLayerAreaArrayPtr), (res) * sizeof(float)));
//-----------------------------------------------------------------
printf("begin calculating area\n");
hipLaunchKernelGGL(( krLDNIAdaptiveSlicing_CalculateRayLength), dim3(BLOCKS_PER_GRID), dim3(THREADS_PER_BLOCK) , 0, 0, devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, numRay, devIndexArrayPtr, devRayLengthArrayPtr, res);
krLDNIAdaptiveSlicing_CalculateLayerArea << <BLOCKS_PER_GRID, THREADS_PER_BLOCK >> > (devRayLengthArrayPtr, devIndexArrayPtr, devLayerAreaArrayPtr, res);
CUDA_SAFE_CALL(hipMemcpy(layerAreaArray, devLayerAreaArrayPtr, (res) * sizeof(float), hipMemcpyDeviceToHost));
hipFree(devRayLengthArrayPtr);
hipFree(devLayerAreaArrayPtr);
printf("end calculating area\n");
return true;
}
bool LDNIcudaOperation::AdaptiveSlicing_CalculateVolumeError(LDNIcudaSolid* &cudaSolid, float volumeErrorMatrix[], int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float boundingBox[]) {
int nAxis = 1; // meaning the y axis, pointing up axis
float* devNxArrayPtr = cudaSolid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr = cudaSolid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr = cudaSolid->GetSampleDepthArrayPtr(nAxis);
unsigned int* devIndexArrayPtr = cudaSolid->GetIndexArrayPtr(nAxis);
float ox, oy, oz, ww;
cudaSolid->GetOrigin(ox, oy, oz); ww = cudaSolid->GetSampleWidth();
float origin[3] = { ox,oy,oz };
origin[0] = ox;
origin[1] = oy;
origin[2] = oz;
int res = cudaSolid->GetResolution();
float* devVolumeErrorMatrix;
int thread_per_block_volume = 256;
int blocks_per_grid_volume = 32;
int tileCount = blocks_per_grid_volume*thread_per_block_volume;
CUDA_SAFE_CALL(hipMalloc((void**)&(devVolumeErrorMatrix), (oneLayerSliceCount*totalSliceCount*tileCount) * sizeof(float)));
//CUDA_SAFE_CALL(hipMemset((void*)devVolumeErrorMatrix, 1, (oneLayerSliceCount*totalSliceCount*tileCount) * sizeof(float)));
//-----------------------------------------------------------------
printf("begin calculating volume error\n");
printf("oy %.6f\n", origin[0]);
float y_min = boundingBox[2];
printf("ymin %.6f\n", y_min);
krLDNIAdaptiveSlicing_CalculateVolumeErrorPerTile << <blocks_per_grid_volume, thread_per_block_volume >> > (devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, res*res, devIndexArrayPtr, devVolumeErrorMatrix, res, minSliceCount, totalSliceCount, oneLayerSliceCount, sliceSize, y_min, oy, ww, tileCount);
krLDNIAdaptiveSlicing_ReduceVolumeErrorByTile << <BLOCKS_PER_GRID, THREADS_PER_BLOCK >> > (devVolumeErrorMatrix, oneLayerSliceCount, totalSliceCount, tileCount);
CUDA_SAFE_CALL(hipMemcpy(volumeErrorMatrix, devVolumeErrorMatrix, (oneLayerSliceCount*totalSliceCount) * sizeof(float), hipMemcpyDeviceToHost));
hipFree(devVolumeErrorMatrix);
printf("Now %.3f\n", volumeErrorMatrix[0]);
printf("end calculating volume error\n");
return true;
}
//
//--------------------------------------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////////////
//
// The following functions are running on the graphics hardware by CUDA
//
__global__ void krLDNIRegularization_RegularizationOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
unsigned int *devIndexArrayPtr, unsigned int *devIndexArrayPtrRes, int arrsize, float eps)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int stIndex,sampleNum,i,resSampleNum;
float resNx[MAX_NUM_OF_SAMPLES_ON_RAY],resNy[MAX_NUM_OF_SAMPLES_ON_RAY],resDepth[MAX_NUM_OF_SAMPLES_ON_RAY];
while(index<arrsize) {
stIndex=devIndexArrayPtr[index]; sampleNum=devIndexArrayPtr[index+1]-stIndex;
// if (sampleNum>0) sampleNum=sampleNum-2;
{
//------------------------------------------------------------------------------
// Eliminating gaps
resSampleNum=0;
if (sampleNum>0) {
resNx[0]=devNxArrayPtr[stIndex]; resNy[0]=devNyArrayPtr[stIndex]; resDepth[0]=devDepthArrayPtr[stIndex]; resSampleNum++;
for(i=1;i<sampleNum;i+=2) {
if (fabs(devDepthArrayPtr[stIndex+i+1])-fabs(devDepthArrayPtr[stIndex+i])<eps) continue;
resNx[resSampleNum]=devNxArrayPtr[stIndex+i];
resNy[resSampleNum]=devNyArrayPtr[stIndex+i];
resDepth[resSampleNum]=devDepthArrayPtr[stIndex+i];
resSampleNum++;
resNx[resSampleNum]=devNxArrayPtr[stIndex+i+1];
resNy[resSampleNum]=devNyArrayPtr[stIndex+i+1];
resDepth[resSampleNum]=devDepthArrayPtr[stIndex+i+1];
resSampleNum++;
}
resNx[resSampleNum]=devNxArrayPtr[stIndex+sampleNum-1];
resNy[resSampleNum]=devNyArrayPtr[stIndex+sampleNum-1];
resDepth[resSampleNum]=devDepthArrayPtr[stIndex+sampleNum-1];
resSampleNum++;
}
//------------------------------------------------------------------------------
// Eliminating super-thin sheets
sampleNum=0;
for(i=0;i<resSampleNum;i+=2) {
if (fabs(resDepth[i+1])-fabs(resDepth[i])<eps) continue;
devNxArrayPtr[stIndex+sampleNum]=resNx[i];
devNyArrayPtr[stIndex+sampleNum]=resNy[i];
devDepthArrayPtr[stIndex+sampleNum]=resDepth[i];
sampleNum++;
devNxArrayPtr[stIndex+sampleNum]=resNx[i+1];
devNyArrayPtr[stIndex+sampleNum]=resNy[i+1];
devDepthArrayPtr[stIndex+sampleNum]=resDepth[i+1];
sampleNum++;
}
}
devIndexArrayPtrRes[index]=sampleNum;
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIRegularization_ResultSampleCollection(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
unsigned int *devIndexArrayPtr,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes,
unsigned int *devIndexArrayPtrRes, int arrsize)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int st,num,stRes,numRes,k;
while(index<arrsize) {
st=devIndexArrayPtr[index]; num=devIndexArrayPtr[index+1]-st;
stRes=devIndexArrayPtrRes[index]; numRes=devIndexArrayPtrRes[index+1]-stRes;
if (numRes<=num) {
for(k=0;k<numRes;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtr[st+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtr[st+k];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtr[st+k];
}
}
else { // This rarely occurs.
for(k=0;k<num;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtr[st+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtr[st+k];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtr[st+k];
}
}
index += blockDim.x * gridDim.x;
}
}
#define S_EPS 1.0e-6
__global__ void krLDNIBoolean_SuperUnionOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr, unsigned int *devIndexArrayPtr,
unsigned int *devIndexArrayPtrRes, int arrsize)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int k,st,num,count_start,count_end,count;
float resNx[MAX_NUM_OF_SAMPLES_ON_RAY],resNy[MAX_NUM_OF_SAMPLES_ON_RAY],resDepth[MAX_NUM_OF_SAMPLES_ON_RAY],s_depth, e_depth;
while(index<arrsize) {
st=devIndexArrayPtr[index]; num=devIndexArrayPtr[index+1]-st;
count_start = 0;
count_end = 0;
count = 0;
if (num%2 == 1)
{
for(k=0; k <num ; k++)
{
devNxArrayPtr[st+k]=0;
devNyArrayPtr[st+k]=0;
devDepthArrayPtr[st+k]=0;
devIndexArrayPtrRes[index]=0;
}
}
if (num > 0 && num%2==0)
{
resDepth[0] = s_depth = START_DEPTH(devDepthArrayPtr[st]);
resNx[0] = devNxArrayPtr[st];
resNy[0] = devNyArrayPtr[st];
count_start++;
count++;
e_depth = END_DEPTH(devDepthArrayPtr[num/2+st]);
count_end++;
for(k=1; k < num/2; k++)
{
s_depth = START_DEPTH(devDepthArrayPtr[k+st]);
if (((fabs(s_depth)- fabs(e_depth))>S_EPS) && (count_start == count_end))
{
resDepth[count] = e_depth;
resNx[count] = devNxArrayPtr[st+(k-1)+num/2];
resNy[count] = devNyArrayPtr[st+(k-1)+num/2];
count++;
resDepth[count] = s_depth;
resNx[count] = devNxArrayPtr[st+k];
resNy[count] = devNyArrayPtr[st+k];
count_start++;
count++;
}
//else if (fabs(s_depth) <= fabs(e_depth))
else if ((fabs(s_depth)- fabs(e_depth))<=S_EPS)
{
count_start++;
}
e_depth = END_DEPTH(devDepthArrayPtr[num/2+k+st]);
count_end++;
}
if ((fabs(e_depth)-fabs(s_depth))<S_EPS)
{
count--;
}
else
{
resDepth[count] = e_depth;
resNx[count] = devNxArrayPtr[st+(k-1)+num/2];
resNy[count] = devNyArrayPtr[st+(k-1)+num/2];
count++;
}
devIndexArrayPtrRes[index]=count;
for(k=0; k <count ; k++)
{
devNxArrayPtr[st+k]=resNx[k];
devNyArrayPtr[st+k]=resNy[k];
devDepthArrayPtr[st+k]=resDepth[k];
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIBoolean_IdentifyEnterLeaveOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr, unsigned int *devIndexArrayPtr, int arrsize)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int k,st,num;
unsigned int prev_mesh,count;
float depth, fdepth;
float resDepth[MAX_NUM_OF_SAMPLES_ON_RAY];
while(index<arrsize) {
st=devIndexArrayPtr[index]; num=devIndexArrayPtr[index+1]-st;
prev_mesh = 0;
count = 0;
if (num > 0)
{
prev_mesh = floor(fabs(devDepthArrayPtr[st]));
for(k=0; k<num; k++)
{
depth = devDepthArrayPtr[k+st];
fdepth = fabs(depth);
//if (floor(fdepth) != prev_mesh)
if (fabs(floor(fdepth)-prev_mesh) >= 1.0)
{
prev_mesh = floor(fdepth); count=0;
}
if (count%2 == 0)
{
fdepth = fdepth - floor(fdepth) + 1; // all starting pos : 1.xxx
}
else
{
fdepth = fdepth - floor(fdepth) + 2; // all ending pos : 2.xxx
}
if (depth < 0) resDepth[k] = -fdepth;
else resDepth[k] = fdepth;
count++;
}
for(k=0; k <num; k++)
{
devDepthArrayPtr[st+k]=resDepth[k];
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIBoolean_BooleanOnRays(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrB, float *devNyArrayPtrB, float *devDepthArrayPtrB, unsigned int *devIndexArrayPtrB,
unsigned int *devIndexArrayPtrRes, int arrsize, short nOperationType)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int k,stA,stB,numA,numB,numRes,aIndex,bIndex;
bool last_op,op,insideA,insideB;
float lastNx,lastNy,lastDepth;
float resNx[MAX_NUM_OF_SAMPLES_ON_RAY],resNy[MAX_NUM_OF_SAMPLES_ON_RAY],resDepth[MAX_NUM_OF_SAMPLES_ON_RAY];
while(index<arrsize) {
stA=devIndexArrayPtrA[index]; numA=devIndexArrayPtrA[index+1]-stA;
stB=devIndexArrayPtrB[index]; numB=devIndexArrayPtrB[index+1]-stB;
last_op=insideA=insideB=false; numRes=0;
//-------------------------------------------------------------------------------------------------------
// Generate the temporary resultant samples
if (numA>0 && numB>0) {
aIndex=bIndex=0;
while( (aIndex<numA) || (bIndex<numB) ) { // scaning the samples on solidA and solidB together
if ((bIndex==numB) || (aIndex<numA && fabs(devDepthArrayPtrA[aIndex+stA])<fabs(devDepthArrayPtrB[bIndex+stB])))
{
// advancing on ray-A
lastDepth=devDepthArrayPtrA[aIndex+stA];
lastNx=devNxArrayPtrA[aIndex+stA];
lastNy=devNyArrayPtrA[aIndex+stA];
insideA=!insideA; aIndex++;
}
else {
// advancing on ray-B
lastDepth=devDepthArrayPtrB[bIndex+stB];
lastNx=devNxArrayPtrB[bIndex+stB];
lastNy=devNyArrayPtrB[bIndex+stB];
if (nOperationType==2) {lastNx=-lastNx; lastNy=-lastNy; lastDepth=-lastDepth;} // inverse the normal
insideB=!insideB; bIndex++;
}
switch(nOperationType) {
case 0:{op=LOGIC_UNION(insideA,insideB); }break;
case 1:{op=LOGIC_INTER(insideA,insideB); }break;
case 2:{op=LOGIC_SUBTR(insideA,insideB); }break;
}
if (op!=last_op)
{
if (numRes>0 && fabs(fabs(lastDepth)-fabs(resDepth[numRes-1]))<0.00001f)
{numRes--;}
else {
resDepth[numRes]=lastDepth;
resNx[numRes]=lastNx; resNy[numRes]=lastNy;
numRes++;
}
last_op=op;
}
}
}
else if ((numA==0) && (numB>0)) { // scaning the samples on solidB
if (nOperationType==0) {
for(k=0;k<numB;k++) {
resNx[k]=devNxArrayPtrB[stB+k];
resNy[k]=devNyArrayPtrB[stB+k];
resDepth[k]=devDepthArrayPtrB[stB+k];
}
numRes=numB;
}
// for "intersect" and "difference", keeping NULL will be fine
}
else if ((numA>0) && (numB==0)) { // scaning the samples on solidA
if (nOperationType==0 || nOperationType==2) { // union and difference
for(k=0;k<numA;k++) {
resNx[k]=devNxArrayPtrA[stA+k];
resNy[k]=devNyArrayPtrA[stA+k];
resDepth[k]=devDepthArrayPtrA[stA+k];
}
numRes=numA;
}
}
//-------------------------------------------------------------------------------------------------------
// Copy the resultant samples into solidA and solidB
if (numRes>numA) {
for(k=0;k<numA;k++) {
devNxArrayPtrA[stA+k]=resNx[k];
devNyArrayPtrA[stA+k]=resNy[k];
devDepthArrayPtrA[stA+k]=resDepth[k];
}
for(k=numA;k<numRes;k++) {
devNxArrayPtrB[stB+k-numA]=resNx[k];
devNyArrayPtrB[stB+k-numA]=resNy[k];
devDepthArrayPtrB[stB+k-numA]=resDepth[k];
}
}
else {
for(k=0;k<numRes;k++) {
devNxArrayPtrA[stA+k]=resNx[k];
devNyArrayPtrA[stA+k]=resNy[k];
devDepthArrayPtrA[stA+k]=resDepth[k];
}
}
devIndexArrayPtrRes[index]=numRes;
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIBoolean_ResultSampleCollection(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes, unsigned int *devIndexArrayPtrRes, int arrsize, float width, float gwidth)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int stA,stRes,numRes,k,numA;
float depth, temp;
while(index<arrsize) {
stA=devIndexArrayPtrA[index]; numA=devIndexArrayPtrA[index+1]-stA;
stRes=devIndexArrayPtrRes[index]; numRes=devIndexArrayPtrRes[index+1]-stRes;
if (numRes>0) {
for(k=0;k<numRes;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtrA[stA+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtrA[stA+k];
depth = devDepthArrayPtrA[stA+k];
temp = fabs(depth)*width-gwidth*0.5f;
if (depth < 0)
devDepthArrayPtrRes[stRes+k]=-temp;
else
devDepthArrayPtrRes[stRes+k]=temp;
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIBoolean_ResultSampleCollection(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrB, float *devNyArrayPtrB, float *devDepthArrayPtrB, unsigned int *devIndexArrayPtrB,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes, unsigned int *devIndexArrayPtrRes, int arrsize)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int stA,numA,stB,stRes,numRes,k;
while(index<arrsize) {
stA=devIndexArrayPtrA[index]; numA=devIndexArrayPtrA[index+1]-stA;
stRes=devIndexArrayPtrRes[index]; numRes=devIndexArrayPtrRes[index+1]-stRes;
if (numRes>0) {
if (numRes>numA) {
for(k=0;k<numA;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtrA[stA+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtrA[stA+k];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtrA[stA+k];
}
stB=devIndexArrayPtrB[index];
for(k=numA;k<numRes;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtrB[stB+(k-numA)];
devNyArrayPtrRes[stRes+k]=devNyArrayPtrB[stB+(k-numA)];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtrB[stB+(k-numA)];
}
}
else {
for(k=0;k<numRes;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtrA[stA+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtrA[stA+k];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtrA[stA+k];
}
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNISampling_SortSamples(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int st,ed,i,j,n;
float nx[MAX_NUM_OF_SAMPLES_ON_RAY],ny[MAX_NUM_OF_SAMPLES_ON_RAY],depth[MAX_NUM_OF_SAMPLES_ON_RAY];
float tempnx,tempny,tempdepth;
// float auxNx[MAX_NUM_OF_SAMPLES_ON_RAY/2+1],auxNy[MAX_NUM_OF_SAMPLES_ON_RAY/2+1],auxDepth[MAX_NUM_OF_SAMPLES_ON_RAY/2+1]; // for merge-sort
// int lo,hi,m,k; // for merge-sort
while(index<arrsize) {
st=devIndexArrayPtr[index]; ed=devIndexArrayPtr[index+1]; n=ed-st;
//-----------------------------------------------------------------------------------------------------------
// Download data set
for(i=0;i<n;i++) nx[i]=devNxArrayPtr[st+i];
for(i=0;i<n;i++) ny[i]=devNyArrayPtr[st+i];
for(i=0;i<n;i++) depth[i]=devDepthArrayPtr[st+i];
//-----------------------------------------------------------------------------------------------------------
for(i=0;i<n;i++) {
for(j=i+1;j<n;j++) {
if (fabs(depth[i])>fabs(depth[j])) {
tempnx=nx[i]; nx[i]=nx[j]; nx[j]=tempnx;
tempny=ny[i]; ny[i]=ny[j]; ny[j]=tempny;
tempdepth=depth[i]; depth[i]=depth[j]; depth[j]=tempdepth;
}
}
}
//-----------------------------------------------------------------------------------------------------------
// Upload data set
for(i=0;i<n;i++) devNxArrayPtr[st+i]=nx[i];
for(i=0;i<n;i++) devNyArrayPtr[st+i]=ny[i];
for(i=0;i<n;i++) devDepthArrayPtr[st+i]=depth[i];
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNISampling_CopySamples(float *devNxArrayPtr,
float *devNyArrayPtr, float *devDepthArrayPtr,
int n, int arrsize, float width, float sampleWidth, int res,
unsigned int *devIndexArrayPtr)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int arrindex, num, ix, iy;
float4 rgb; float temp;
while(index<arrsize) {
num=devIndexArrayPtr[index+1]-devIndexArrayPtr[index];
if (num>=n) {
arrindex=(int)(devIndexArrayPtr[index])+n-1;
ix=index%res; iy=(index/res);
rgb = tex2D(tex2DFloat4In, ix, iy);
temp=fabs(rgb.z)*width-sampleWidth*0.5f;
devNxArrayPtr[arrindex]=rgb.x; // x-component of normal
devNyArrayPtr[arrindex]=rgb.y; // y-component of normal
if (rgb.z<0) devDepthArrayPtr[arrindex]=-temp; else devDepthArrayPtr[arrindex]=temp;
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNISuperUnion_CopySamples(float *devNxArrayPtr,
float *devNyArrayPtr, float *devDepthArrayPtr,
int n, int arrsize, int res,
unsigned int *devIndexArrayPtr)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int arrindex, num, ix, iy;
float4 rgb; //float temp;
while(index<arrsize) {
num=devIndexArrayPtr[index+1]-devIndexArrayPtr[index];
if (num>=n) {
arrindex=(int)(devIndexArrayPtr[index])+n-1;
ix=index%res; iy=(index/res);
rgb = tex2D(tex2DFloat4In, ix, iy);
devNxArrayPtr[arrindex]=rgb.x; // x-component of normal
devNyArrayPtr[arrindex]=rgb.y; // y-component of normal
devDepthArrayPtr[arrindex]=rgb.z;
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNISampling_CopyIndexAndFindMax(unsigned char *devStencilBufferPtr, unsigned int *devIndexArrayPtr,
unsigned int *devResArrayPtr, int arrsize )
{
__shared__ unsigned int cache[THREADS_PER_BLOCK];
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int cacheIndex=threadIdx.x;
unsigned int temp=0,temp2;
while(tid<arrsize) {
temp2=(unsigned int)(devStencilBufferPtr[tid]);
devIndexArrayPtr[tid]=temp2;
temp= MAX(temp, temp2);
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex]=temp;
// synchronize threads in this block
__syncthreads();
// for reductions, THREADS_PER_BLOCK must be a power of 2 because of the following code
int i = blockDim.x/2;
while (i!=0) {
if (cacheIndex < i) {cache[cacheIndex] = MAX(cache[cacheIndex], cache[cacheIndex+i]);}
__syncthreads();
i /= 2;
}
if (cacheIndex==0) devResArrayPtr[blockIdx.x] = cache[0];
}
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
bool LDNIcudaOperation::ScaffoldBooleanOperation(LDNIcudaSolid* &outputSolid,QuadTrglMesh *UnitMesh, int UnitNum[], float UnitOff[], int UnitFlip[], int nRes, LDNIcudaSolid* savedSolid)
{
LDNIcudaSolid *solidB;
int res=nRes;
float boundingbox[6];
float UnitWidth[3];
boundingbox[0]=boundingbox[1]=boundingbox[2]=boundingbox[3]=boundingbox[4]=boundingbox[5]=0;
UnitMesh->CompBoundingBox(boundingbox);
UnitWidth[0] = boundingbox[1] - boundingbox[0] ;
UnitWidth[1] = boundingbox[3] - boundingbox[2] ;
UnitWidth[2] = boundingbox[5] - boundingbox[4] ;
boundingbox[1] = boundingbox[1] + (UnitNum[0]-1)*(UnitWidth[0]+UnitOff[0]);
boundingbox[3] = boundingbox[3] + (UnitNum[1]-1)*(UnitWidth[1]+UnitOff[1]);
boundingbox[5] = boundingbox[5] + (UnitNum[2]-1)*(UnitWidth[2]+UnitOff[2]);
float xx=(boundingbox[0]+boundingbox[1])*0.5f;
float yy=(boundingbox[2]+boundingbox[3])*0.5f;
float zz=(boundingbox[4]+boundingbox[5])*0.5f;
float ww=boundingbox[1]-boundingbox[0];
if ((boundingbox[3]-boundingbox[2])>ww) ww=boundingbox[3]-boundingbox[2];
if ((boundingbox[5]-boundingbox[4])>ww) ww=boundingbox[5]-boundingbox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingbox[0]=xx-ww; boundingbox[1]=xx+ww;
boundingbox[2]=yy-ww; boundingbox[3]=yy+ww;
boundingbox[4]=zz-ww; boundingbox[5]=zz+ww;
if (savedSolid!= NULL)
{
_expansionLDNIcudaSolidByNewBoundingBox(savedSolid, boundingbox);
res = savedSolid->GetResolution();
}
//even row + even column
InstancedBRepToLDNISampling(UnitMesh, outputSolid, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, false, true);
//even row + single column
InstancedBRepToLDNISampling(UnitMesh, solidB, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, false, false);
printf("-----------------------------------------------------------------------\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------\n");
_booleanOperation(outputSolid, solidB, 0);
//LDNIcudaSolid *solidA;
InstancedBRepToLDNISampling(UnitMesh, solidB, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, true, true);
_booleanOperation(outputSolid, solidB, 0);
InstancedBRepToLDNISampling(UnitMesh, solidB, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, true, false);
_booleanOperation(outputSolid, solidB, 0);
//even row + single column
//InstancedBRepToLDNISampling(UnitMesh, solidA, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, false, false);
outputSolid->SetBoundingBox(boundingbox);
//-----------------------------------------------------------------------------------
// Step 4: free the memory
delete solidB;
return true;
}
bool LDNIcudaOperation::InstancedBRepToLDNISampling(QuadTrglMesh *mesh, LDNIcudaSolid* &solid, float boundingBox[], int res, float UnitOff[], int UnitNum[], float UnitWidth[], int UnitFlip[], bool bsingleRow, bool bsingleCol)
{
const bool bCube=true;
float origin[3],gWidth; long time=clock(),totalTime=clock();
int i,nodeNum,faceNum;
char fileadd[256];
solid=new LDNIcudaSolid;
solid->MallocMemory(res);
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
solid->SetSampleWidth(gWidth);
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
solid->SetOrigin(origin[0],origin[1],origin[2]);
//---------------------------------------------------------------------------------
// For using OpenGL Shading Language to implement the sampling procedure
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
if (glewIsSupported("GL_VERSION_2_0")) {printf("\nReady for OpenGL 2.0\n");} else {printf("OpenGL 2.0 not supported\n"); return false;}
//-----------------------------------------------------------------------------------------
GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
const char *VshaderString[1],*GshaderString[1], *FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
GLuint vbo, vboInd;
char str[4096] = "";
//-----------------------------------------------------------------------------------------
// Step 1: Setup the shaders
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"ScaffoldLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error \n%s\n",str); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"ScaffoldLDNIGeometryShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n%s\n",str); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"ScaffoldLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL!\n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// Configuration setting for geometry shader
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating vertex and index array buffer
glGetError(); // for clean-up the error generated before
nodeNum=mesh->GetNodeNumber();
faceNum=mesh->GetFaceNumber();
float* verTex=(float*)malloc(nodeNum*3*sizeof(float));
memset(verTex,0,nodeNum*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
int* inDex=(int*)malloc(faceNum*3*sizeof(int));
memset(inDex,0,faceNum*3*sizeof(int));
unsigned int* meshptr = mesh->GetFaceTablePtr();
for(int i=0; i < faceNum; i++)
{ inDex[3*i] = meshptr[4*i]-1; inDex[3*i+1] = meshptr[4*i+1]-1; inDex[3*i+2] = meshptr[4*i+2]-1;
}
//memcpy(inDex,mesh->GetFaceTablePtr(),faceNum*3*sizeof(int));
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, nodeNum*3*sizeof(GLfloat), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, nodeNum*3*sizeof(GLfloat), verTex);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glGenBuffers(1, &vboInd);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB, vboInd);
glBufferData(GL_ELEMENT_ARRAY_BUFFER_ARB, faceNum*3*sizeof(GL_UNSIGNED_INT), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, faceNum*3*sizeof(GL_UNSIGNED_INT), inDex);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: buffer binding!\n\n");
free(verTex);
free(inDex);
//-----------------------------------------------------------------------------------------
GLint id0,id1,id2,id3,id4,id5,id6;
float centerPos[3];
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
glUseProgramObjectARB(g_programObj);
{
id0 = glGetUniformLocationARB(g_programObj,"Unum");
glUniform3iARB(id0,UnitNum[0],UnitNum[1],UnitNum[2]);
id1 = glGetUniformLocationARB(g_programObj,"UOff");
glUniform3fARB(id1,UnitOff[0],UnitOff[1],UnitOff[2]);
id2 = glGetUniformLocationARB(g_programObj,"UWidth");
glUniform3fARB(id2,UnitWidth[0],UnitWidth[1],UnitWidth[2]);
id3 = glGetUniformLocationARB(g_programObj,"UFlip");
glUniform3iARB(id3,UnitFlip[0],UnitFlip[1],UnitFlip[2]);
id4 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id4,centerPos[0],centerPos[1],centerPos[2]);
id5 = glGetUniformLocationARB(g_programObj,"bsingleCol");
glUniform1iARB(id5,bsingleCol);
id6 = glGetUniformLocationARB(g_programObj,"bsingleRow");
glUniform1iARB(id6,bsingleRow);
if (glGetError()!=GL_NO_ERROR) printf("Error: Unit Constant !\n\n");
_decomposeLDNIByFBOPBO(solid, vbo, vboInd, UnitNum[0]*UnitNum[1]*UnitNum[2], faceNum*3);
}
glUseProgramObjectARB(0);
//-----------------------------------------------------------------------------------------
// Step 6: free the memory
time=clock();
//-----------------------------------------------------------------------------------------
glDeleteBuffers(1, &vboInd);
glDeleteBuffers(1, &vbo);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
//------------------------------------------------------------------------
printf("\nMemory clean-up time is %ld (ms)\n",clock()-time);
printf("--------------------------------------------------------------\n");
printf("Total time for sampling is %ld (ms)\n\n",clock()-totalTime);
return true;
}
void LDNIcudaOperation::_decomposeLDNIByFBOPBO(LDNIcudaSolid *solid, GLuint vbo, GLuint vboI, int instanceCount, int indexCount)
{
unsigned int n_max,i,n;
float gWidth,origin[3];
unsigned int overall_n_max=0;
long readbackTime=0, sortingTime=0, tempTime;
hipEvent_t startClock, stopClock;
CUDA_SAFE_CALL( hipEventCreate( &startClock ) );
CUDA_SAFE_CALL( hipEventCreate( &stopClock ) );
tempTime=clock();
//------------------------------------------------------------------------
// Preparation
int nRes=solid->GetResolution(); gWidth=solid->GetSampleWidth();
float width=gWidth*(float)nRes;
solid->GetOrigin(origin[0],origin[1],origin[2]);
int arrsize=nRes*nRes;
//------------------------------------------------------------------------
// Step 1: Setup the rendering environment
glEnable(GL_DEPTH_TEST);
glEnable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR); glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glGetError(); // for clean-up the error generated before
//------------------------------------------------------------------------
// create the FBO objects and texture for rendering
if (glewIsSupported("GL_EXT_framebuffer_object") == 0) printf("Warning: FBO is not supported!\n");
if (glGetError()!=GL_NO_ERROR) printf("Error: before framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint fbo;
glGenFramebuffersEXT(1, &fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer generation!\n");
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer binding!\n");
//------------------------------------------------------------------------
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F_ARB, nRes, nRes, 0, GL_RGBA, GL_FLOAT, 0);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching texture to framebuffer generation!\n");
cudaGraphicsResource *sampleTex_resource;
CUDA_SAFE_CALL( hipGraphicsGLRegisterImage(&sampleTex_resource, tex, GL_TEXTURE_2D, hipGraphicsMapFlagsReadOnly) );
//------------------------------------------------------------------------
GLuint depth_and_stencil_rb;
glGenRenderbuffersEXT(1, &depth_and_stencil_rb);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_STENCIL_EXT, nRes, nRes);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of depth-buffer to framebuffer generation!\n");
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of stencil-buffer to framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint indexPBO;
glGenBuffers(1,&indexPBO); // generation of PBO for index array readback
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
glBufferData(GL_PIXEL_PACK_BUFFER_ARB, nRes*nRes*sizeof(unsigned char), NULL, GL_STREAM_READ_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( hipGLRegisterBufferObject(indexPBO) );
//------------------------------------------------------------------------
if (glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT)!=GL_FRAMEBUFFER_COMPLETE_EXT)
printf("Warning: the setting for rendering on FBO is not correct!\n");
else
printf("FBO has been created successfully!\n");
glPushAttrib(GL_VIEWPORT_BIT);
glViewport(0,0,nRes,nRes);
printf("Preparation time: %ld (ms)\n",clock()-tempTime);
//------------------------------------------------------------------------
// Step 2: Rendering to get the Hermite samples
for(short nAxis=0; nAxis<3; nAxis++) {
//---------------------------------------------------------------------------------------
// Rendering step 1: setting the viewing window
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// The eye is located at (0, 0, 0), the near clipping plane is at the z=0 plane
// the far clipping plane is at the z=(boundingBox[5]-boundingBox[4]) plane
glOrtho(-width*0.5f,width*0.5f,-width*0.5f,width*0.5f,width*0.5f,-width*0.5f);
// Note that: in "glOrtho(left,right,bottom,top,near,far);"
// (left,right,bottom,top) are located at the boundary of pixel instead of
// the center of pixels
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// Rendering step 2: determine the number of layers
glClearColor( 1.0f, 1.0f, 1.0f, 1.0f );
glClearDepth(1.0);
glClearStencil(0); glColor3f(1,1,1);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glStencilFunc(GL_GREATER, 1, 0xff);
glStencilOp(GL_INCR, GL_INCR, GL_INCR);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glEnableClientState( GL_VERTEX_ARRAY );
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vboI);
glDrawElementsInstanced(GL_TRIANGLES,indexCount,GL_UNSIGNED_INT, 0 ,instanceCount);
glDisableClientState( GL_VERTEX_ARRAY );
glFlush();
//--------------------------------------------------------------------------------------------------------
// reading stencil buffer into the device memory of CUDA
tempTime=clock();
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
GLint OldPackAlignment;
glGetIntegerv(GL_PACK_ALIGNMENT,&OldPackAlignment);
glPixelStorei(GL_PACK_ALIGNMENT,1); // Important!!! Without this, the read-back could be abnormal.
glReadPixels(0,0,nRes,nRes,GL_STENCIL_INDEX,GL_UNSIGNED_BYTE,0);
glPixelStorei(GL_PACK_ALIGNMENT,OldPackAlignment);
//--------------------------------------------------------------------------------------------------------
unsigned char *devStencilBufferPtr;
unsigned int *devResArrayPtr;
unsigned int *devIndexArrayPtr=solid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( hipGLMapBufferObject__( (void **)&devStencilBufferPtr, indexPBO) );
CUDA_SAFE_CALL( hipMalloc( (void**)&devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int) ) );
//--------------------------------------------------------------------------------------------------------
// building the indexArray on device
hipLaunchKernelGGL(( krLDNISampling_CopyIndexAndFindMax), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devStencilBufferPtr,
devIndexArrayPtr,devResArrayPtr,arrsize);
//--------------------------------------------------------------------------------------------------------
// read back the max number of layers -- "n_max"
unsigned int* resArrayPtr;
resArrayPtr=(unsigned int *)malloc(BLOCKS_PER_GRID*sizeof(unsigned int));
CUDA_SAFE_CALL( hipMemcpy( resArrayPtr, devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int), hipMemcpyDeviceToHost ) );
n_max=0;
for(i=0;i<BLOCKS_PER_GRID;i++) n_max = MAX(n_max,resArrayPtr[i]);
hipFree(devResArrayPtr); free(resArrayPtr);
//--------------------------------------------------------------------------------------------------------
// read back the number of samples -- "sampleNum"
unsigned int sampleNum=0;
tempTime=clock()-tempTime; //readbackTime+=tempTime;
printf("Stencil buffer processing time: %ld (ms)\n",tempTime);
long scanTime=clock();
// for debug purpose
resArrayPtr=(unsigned int *)malloc((arrsize+1)*sizeof(unsigned int));
CUDA_SAFE_CALL( hipMemcpy( resArrayPtr, devIndexArrayPtr, (arrsize+1)*sizeof(unsigned int), hipMemcpyDeviceToHost ) );
sampleNum=0;
for(int k=0;k<arrsize;k++) {sampleNum+=resArrayPtr[k]; resArrayPtr[k]=sampleNum;}
for(int k=arrsize;k>0;k--) {resArrayPtr[k]=resArrayPtr[k-1];}
resArrayPtr[0]=0;
CUDA_SAFE_CALL( hipMemcpy( devIndexArrayPtr, resArrayPtr, (arrsize+1)*sizeof(unsigned int), hipMemcpyHostToDevice ) );
free(resArrayPtr);
scanTime=clock()-scanTime; printf("Scanning time: %ld (ms)\n",scanTime);
//--------------------------------------------------------------------------------------------------------
CUDA_SAFE_CALL( hipGLUnmapBufferObject( indexPBO ) );
glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
printf("n_max=%d sampleNum=%d\n",n_max,sampleNum);
if (n_max>overall_n_max) overall_n_max=n_max;
if (sampleNum==0) continue;
//---------------------------------------------------------------------------------------
// Rendering step 3: decomposing the Layered Depth Images (LDIs) and record its corresponding normals
solid->MallocSampleMemory(nAxis,sampleNum);
float* devNxArrayPtr=solid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr=solid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr=solid->GetSampleDepthArrayPtr(nAxis);
tempTime=clock();
for(n=1;n<=n_max;n++) {
CUDA_SAFE_CALL( hipGraphicsMapResources( 1, &sampleTex_resource, NULL ) );
hipArray *in_array;
CUDA_SAFE_CALL( hipGraphicsSubResourceGetMappedArray( &in_array, sampleTex_resource, 0, 0));
CUDA_SAFE_CALL( hipBindTextureToArray(tex2DFloat4In, in_array) );
//--------------------------------------------------------------------------------------------------------
// fill the sampleArray on device
hipLaunchKernelGGL(( krLDNISampling_CopySamples), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, n, arrsize, width, gWidth, nRes, devIndexArrayPtr);
CUDA_SAFE_CALL( hipGraphicsUnmapResources( 1, &sampleTex_resource, NULL ) );
if (n==n_max) break;
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glStencilFunc(GL_GREATER, n+1, 0xff);
glStencilOp(GL_KEEP, GL_INCR, GL_INCR);
{
glEnableClientState( GL_VERTEX_ARRAY );
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vboI);
glDrawElementsInstanced(GL_TRIANGLES,indexCount,GL_UNSIGNED_INT, 0 ,instanceCount);
glDisableClientState( GL_VERTEX_ARRAY );
}
glFlush();
}
tempTime=clock()-tempTime; readbackTime+=tempTime;
//------------------------------------------------------------------------
// Rendering step 4: sorting the samples
CUDA_SAFE_CALL( hipEventRecord( startClock, 0 ) );
CUDA_SAFE_CALL( hipEventSynchronize( startClock ) );
hipLaunchKernelGGL(( krLDNISampling_SortSamples), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, arrsize, devIndexArrayPtr);
CUDA_SAFE_CALL( hipEventRecord( stopClock, 0 ) );
CUDA_SAFE_CALL( hipEventSynchronize( stopClock ) );
float elapsedTime;
CUDA_SAFE_CALL( hipEventElapsedTime( &elapsedTime,
startClock, stopClock ) );
// printf( "Sorting time is: %3.1f (ms)\n", elapsedTime );
sortingTime+=(long)elapsedTime;
}
//------------------------------------------------------------------------------------
// Step 3: Set the rendering parameters back
//------------------------------------------------------------------------------------
// detach FBO
glPopAttrib();
// release memory for PBO and cuda's map
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( hipGLUnregisterBufferObject( indexPBO ) );
glDeleteBuffers(1, &indexPBO);
CUDA_SAFE_CALL( hipGraphicsUnregisterResource( sampleTex_resource) );
// release memory for the 2D texture
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteTextures(1, &tex);
// release memory for the frame-buffer object
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
glDeleteFramebuffersEXT(1, &fbo);
// release memory for the render-buffer object
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glDeleteRenderbuffersEXT(1, &depth_and_stencil_rb);
//------------------------------------------------------------------------------------
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
// glEnable(GL_POLYGON_SMOOTH);// adding this will make the invalid display on the Thinkpad laptop
glEnable(GL_POINT_SMOOTH);
// glEnable(GL_LINE_SMOOTH); // adding this will make the Compaq laptop's running fail
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
printf("\nn_max=%ld \n",overall_n_max);
printf("Texture Size: %f (MB)\n",(float)((float)overall_n_max*(float)nRes*(float)nRes*7.0f)/(1024.0f*1024.0f));
printf("Readback time: %ld (ms)\nSorting time: %ld (ms)\n",
readbackTime, sortingTime);
CUDA_SAFE_CALL( hipEventDestroy( startClock ) );
CUDA_SAFE_CALL( hipEventDestroy( stopClock ) );
}
//--------------------------------------------------------------------------------------------
// adpative slicing related
extern __global__ void krLDNIAdaptiveSlicing_CalculateRayLength(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devRayLengthArrayPtr, int res) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
int st, num, stRes, numRes, k;
int ix, iy;
float length = 0.0f;
while (index<arrsize) {
st = devIndexArrayPtr[index]; num = devIndexArrayPtr[index + 1] - st;
ix = index%res; iy = (index / res);
for (int k = 0; k < num - 1; k = k + 2) {
length += fabs(devDepthArrayPtr[k +1 + st]) - fabs(devDepthArrayPtr[k + st]);
}
devRayLengthArrayPtr[index] = length;
index += blockDim.x * gridDim.x;
}
}
extern __global__ void krLDNIAdaptiveSlicing_CalculateLayerArea(float *devRayLengthArrayPtr, unsigned int *devIndexArrayPtr, float *devAreaArrayPtr, int res) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
float area = 0.0f;
while (index < res) {
for (int k = 0; k < res; k++) {
area += devRayLengthArrayPtr[k*res+index];
}
devAreaArrayPtr[index] = area;
index += blockDim.x * gridDim.x;
}
}
extern __global__ void krLDNIAdaptiveSlicing_CalculateVolumeErrorPerRow(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devVolumeErrorMatrix, int res, int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float y_min, float oy, float ww) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
int iRow = index;
float raySliceArray[MAX_NUM_OF_SLICE_ON_RAY]; // temp array to record the 0/1 value for each slice on a ray
float raySliceCumArray[MAX_NUM_OF_SLICE_ON_RAY]; // temp array to record the cummulative 0/1 value from first slice throught each slice on a ray
while (index < oneLayerSliceCount) {
int buildingAxis = 1;
int i = 0;
int j = 0;
float temp = oy;
for (int j = 0; j<res; j++) {
for (int i = 0; i<res; i++) {
// process each ray
int index_ray = j*res + i;
float inside = 0.0f;
int pre_slice = 0;
unsigned int now_slice = 0;
raySliceArray[0] = 0.0f;
for (int is = 0; is < totalSliceCount; is++) {
raySliceArray[is] = raySliceArray[0] + 1.0f;
raySliceCumArray[is] = 1.0f;
}
for (int k = devIndexArrayPtr[index_ray]; k<devIndexArrayPtr[index_ray + 1]; k++) {
now_slice = floor(oy + fabs(devDepthArrayPtr[k]) - y_min)/sliceSize;
now_slice = MIN(now_slice, totalSliceCount - 1); now_slice = MAX(now_slice, 0);
for (int is = pre_slice + 1; is <= now_slice; is++) {
raySliceArray[is] = inside;
raySliceCumArray[is] = raySliceCumArray[is - 1] + inside;
}
pre_slice = now_slice;
inside = 1 - inside;
}
// the last point on the ray to the top slice
for (int is = pre_slice; is+1 <totalSliceCount; is++) {
raySliceArray[is] = 0;
raySliceCumArray[is] = raySliceCumArray[is - 1];
}
//// add to the volume error matrix
for (int iSlice = 0; iSlice < totalSliceCount; iSlice++) {
int ivol = iSlice + iRow*totalSliceCount;
if (iSlice - iRow - minSliceCount >= 0) {
bool isThisLayerSolid = raySliceArray[iSlice - iRow - minSliceCount + 1] == 1;
if (isThisLayerSolid) {
// the error is number of non-solid voxels
devVolumeErrorMatrix[ivol] += (iRow + minSliceCount) - (raySliceCumArray[iSlice] - raySliceCumArray[iSlice - iRow - minSliceCount]);
}
else {
devVolumeErrorMatrix[ivol] += raySliceCumArray[iSlice] - raySliceCumArray[iSlice - iRow - minSliceCount];
}
}
}
}
}
index += blockDim.x * gridDim.x;
}
}
extern __global__ void krLDNIAdaptiveSlicing_CalculateVolumeErrorPerTile(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devVolumeErrorMatrix, int res, int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float y_min, float oy, float ww, int tileCount) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
int iTile = index;
float raySliceArray[MAX_NUM_OF_SLICE_ON_RAY]; // temp array to record the 0/1 value for each slice on a ray
float raySliceCumArray[MAX_NUM_OF_SLICE_ON_RAY]; // temp array to record the cummulative 0/1 value from first slice throught each slice on a ray
int rayCountPerTile = arrsize / tileCount + 1;
while (index < tileCount) {
if (rayCountPerTile*index >= arrsize)
break;
//initialize;
for (int ivol = iTile*totalSliceCount*oneLayerSliceCount; ivol < (iTile + 1)*totalSliceCount*oneLayerSliceCount; ivol++) {
devVolumeErrorMatrix[ivol] = 0.0f;
}
for (int index_ray = iTile*rayCountPerTile; index_ray < MIN((iTile+1)*rayCountPerTile,arrsize); index_ray++) {
// process each ray
float inside = 0.0f;
int pre_slice = 0;
int now_slice = 0;
for (int is = 0; is < totalSliceCount; is++) {
raySliceArray[is] = 0.0f;
raySliceCumArray[is] = 0.0f;
}
for (int k = devIndexArrayPtr[index_ray]; k<devIndexArrayPtr[index_ray + 1]; k++) {
now_slice = floor((oy + fabs(devDepthArrayPtr[k]) - y_min) / sliceSize);
now_slice = MIN(now_slice, totalSliceCount - 1); now_slice = MAX(now_slice, 0);
for (int is = pre_slice + 1; is <= now_slice; is++) {
raySliceArray[is] = inside;
raySliceCumArray[is] = raySliceCumArray[is - 1] + inside;
}
pre_slice = now_slice;
inside = 1 - inside;
}
// the last point on the ray to the top slice
for (int is = pre_slice + 1; is < totalSliceCount; is++) {
raySliceArray[is] = 0;
raySliceCumArray[is] = raySliceCumArray[is - 1];
}
//// add to the volume error matrix
for (int ivol = iTile*totalSliceCount*oneLayerSliceCount; ivol < (iTile + 1)*totalSliceCount*oneLayerSliceCount; ivol++) {
int iSlice = (ivol % (totalSliceCount*oneLayerSliceCount)) % totalSliceCount;
int iRow = (ivol % (totalSliceCount*oneLayerSliceCount)) / totalSliceCount;
if (iSlice - iRow - minSliceCount >= 0) {
bool isThisLayerSolid = raySliceArray[iSlice - iRow - minSliceCount + 1] == 1;
if (isThisLayerSolid) {
// the error is number of non-solid voxels
devVolumeErrorMatrix[ivol] += (iRow + minSliceCount) - (raySliceCumArray[iSlice] - raySliceCumArray[iSlice - iRow - minSliceCount]);
}
else {
devVolumeErrorMatrix[ivol] += raySliceCumArray[iSlice] - raySliceCumArray[iSlice - iRow - minSliceCount];
}
}
}
}
index += blockDim.x * gridDim.x;
}
}
extern __global__ void krLDNIAdaptiveSlicing_ReduceVolumeErrorByTile(float *devVolumeErrorMatrix, int oneLayerSliceCount, int totalSliceCount, int tileCount) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
while (index < oneLayerSliceCount*totalSliceCount) {
for (int i = 1; i < tileCount; i++) {
devVolumeErrorMatrix[index] += devVolumeErrorMatrix[i*oneLayerSliceCount*totalSliceCount + index];
}
index += blockDim.x * gridDim.x;
}
} | d0a1fe76dbaac360ed78b0369658e9c0a66ae44e.cu | /*
* Copyright (C) 2014, Geometric Design and Manufacturing Lab in THE CHINESE UNIVERSITY OF HONG KONG
* All rights reserved.
*
* http://ldnibasedsolidmodeling.sourceforge.net/
*
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <sys/stat.h>
#include "../common/GL/glew.h"
#include "cuda.h"
#include "cutil.h"
#include "cuda_gl_interop.h"
#include "..\GLKLib\GLK.h"
#include "PMBody.h"
#include "LDNIcpuSolid.h"
#include "LDNIcudaSolid.h"
#include "LDNIcudaOperation.h"
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
#define GPU_BASED_SCAN true
extern GLK _pGLK;
extern bool _bExpandableWorkingSpace;
//--------------------------------------------------------------------------------------------
texture<float4,2> tex2DFloat4In;
extern __global__ void krLDNISuperUnion_CopySamples(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int n, int arrsize, int res, unsigned int *devIndexArrayPtr);
extern __global__ void krLDNIBoolean_SuperUnionOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr, unsigned int *devIndexArrayPtr,
unsigned int *devIndexArrayPtrRes, int arrsize);
extern __global__ void krLDNIBoolean_IdentifyEnterLeaveOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr, unsigned int *devIndexArrayPtr, int arrsize);
extern __global__ void krLDNIBoolean_BooleanOnRays(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrB, float *devNyArrayPtrB, float *devDepthArrayPtrB, unsigned int *devIndexArrayPtrB,
unsigned int *devIndexArrayPtrRes, int arrsize, short nOperationType);
extern __global__ void krLDNIBoolean_ResultSampleCollection(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrB, float *devNyArrayPtrB, float *devDepthArrayPtrB, unsigned int *devIndexArrayPtrB,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes, unsigned int *devIndexArrayPtrRes, int arrsize);
extern __global__ void krLDNIBoolean_ResultSampleCollection(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes, unsigned int *devIndexArrayPtrRes, int arrsize, float width, float gwidth);
extern __global__ void krLDNIBilateralNormalFilter_PerRay(unsigned int* xIndexArray, unsigned int* yIndexArray, unsigned int* zIndexArray,
float* xNxArray, float* yNxArray, float* zNxArray, float* xNyArray, float* yNyArray, float* zNyArray,
float* xDepthArray, float* yDepthArray, float* zDepthArray, float *buffer,
int arrsize, short nAxis, int res, float ww, float ox, float oy, float oz, unsigned int nSupportSize, float normalPara);
extern __global__ void krLDNIBilateralNormalFilter_PerSample(unsigned int* xIndexArray, unsigned int* yIndexArray, unsigned int* zIndexArray,
float* xNxArray, float* yNxArray, float* zNxArray, float* xNyArray, float* yNyArray, float* zNyArray,
float* xDepthArray, float* yDepthArray, float* zDepthArray, float *buffer,
int sampleNum, short nAxis, int res, float ww, unsigned int nSupportSize, float normalPara);
extern __global__ void krLDNINormalProcessing_PreProc(unsigned int* indexArray, float *buffer, int res, int arrsize);
extern __global__ void krLDNINormalProcessing_Update(int sampleNum, float *nxArray, float *nyArray, float *depthArray, float *buffer);
extern __global__ void krLDNINormalProcessing_OrientationCorrectionByVoting(
unsigned int* xIndexArray, unsigned int* yIndexArray, unsigned int* zIndexArray,
float* xNxArray, float* yNxArray, float* zNxArray, float* xNyArray, float* yNyArray, float* zNyArray,
float* xDepthArray, float* yDepthArray, float* zDepthArray, float *buffer,
int sampleNum, short nAxis, int res, float ww, unsigned int nSupportSize);
extern __global__ void krLDNINormalReconstruction_PerSample(unsigned int* xIndexArray, unsigned int* yIndexArray, unsigned int* zIndexArray,
float* xNxArray, float* yNxArray, float* zNxArray, float* xNyArray, float* yNyArray, float* zNyArray,
float* xDepthArray, float* yDepthArray, float* zDepthArray, float *buffer,
int sampleNum, short nAxis, int res, float ww, unsigned int nSupportSize);
extern __global__ void krLDNISampling_SortSamples(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr);
extern __global__ void krLDNISampling_CopySamples(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int n, int arrsize, float width, float sampleWidth, int res,
unsigned int *devIndexArrayPtr);
extern __global__ void krLDNISampling_CopyIndexAndFindMax(unsigned char *devStencilBufferPtr, unsigned int *devIndexArrayPtr,
unsigned int *devResArrayPtr, int arrsize );
extern __global__ void krLDNIcudaSolid_depthSampleAdd(float *depthSamples, float addValue, unsigned int sampleNum);
extern __global__ void krLDNIcudaSolid_fillNewIndexBySampleNumber(unsigned int *newIndexArray, unsigned int *indexArray, int res, int newRes, int sdi, int sdj);
extern __global__ void krLDNIRegularization_RegularizationOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
unsigned int *devIndexArrayPtr, unsigned int *devIndexArrayPtrRes, int arrsize, float eps);
extern __global__ void krLDNIRegularization_ResultSampleCollection(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
unsigned int *devIndexArrayPtr, float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes,
unsigned int *devIndexArrayPtrRes, int arrsize);
extern bool initGLInteroperabilityOnCUDA(int major, int minor);
//--------------------------------------------------------------------------------------------
// adpative slicing related
extern __global__ void krLDNIAdaptiveSlicing_CalculateRayLength(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devRayLengthArrayPtr, int res);
extern __global__ void krLDNIAdaptiveSlicing_CalculateLayerArea(float *devRayLengthArrayPtr, unsigned int *devIndexArrayPtr, float *devAreaArrayPtr, int res);
extern __global__ void krLDNIAdaptiveSlicing_CalculateVolumeErrorPerRow(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devVolumeErrorMatrix, int res, int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float y_min, float oy, float ww);
extern __global__ void krLDNIAdaptiveSlicing_CalculateVolumeErrorPerTile(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devVolumeErrorMatrix, int res, int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float y_min, float oy, float ww, int tileCount);
extern __global__ void krLDNIAdaptiveSlicing_ReduceVolumeErrorByTile(float *devVolumeErrorMatrix, int oneLayerSliceCount, int totalSliceCount, int tileCount);
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
bool LDNIcudaOperation::MultiObjectSamplingInOneSolid(LDNIcudaSolid* &solid, GLKObList* meshlist, float boundingBox[], int res)
{
float origin[3],gWidth;
char fileadd[256];
long time=clock(),totalTime=clock();
//---------------------------------------------------------------------------------
solid=new LDNIcudaSolid;
solid->MallocMemory(res);
solid->SetBoundingBox(boundingBox);
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
solid->SetSampleWidth(gWidth);
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
solid->SetOrigin(origin[0],origin[1],origin[2]);
//---------------------------------------------------------------------------------
// For using OpenGL Shading Language to implement the sampling procedure
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
//-----------------------------------------------------------------------------------------
GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
const char *VshaderString[1],*GshaderString[1], *FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
char str[4096] = "";
//-----------------------------------------------------------------------------------------
// Step 1: Setup the shaders
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"SuperUnionLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error \n%s\n",str); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"SuperUnionLDNIGeometryShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n%s\n",str); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"SuperUnionLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL!\n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// Configuration setting for geometry shader
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating vertex and index array buffer
glGetError(); // for clean-up the error generated before
int meshNum = meshlist->GetCount();
GLuint* vbo = (GLuint*)malloc(meshNum*sizeof(GLuint));
GLuint* vboInd = (GLuint*)malloc(meshNum*sizeof(GLuint));
GLKPOSITION Pos;
int nodeNum,faceNum,i=0,j=0;
float* verTex;
float* tempver;
int* inDex;
int* tempinD;
unsigned int* meshptr;
int* indexCount;
indexCount = (int*)malloc(meshNum*sizeof(int));
printf("Mesh Num : %d \n",meshNum);
verTex = (float*)malloc(sizeof(float));
inDex = (int*)malloc(sizeof(int));
glGenBuffers(meshNum, vbo);
glGenBuffers(meshNum, vboInd);
for(Pos=meshlist->GetHeadPosition();Pos!=NULL;j++) {
QuadTrglMesh *mesh=(QuadTrglMesh *)(meshlist->GetNext(Pos));
nodeNum = mesh->GetNodeNumber();
faceNum = mesh->GetFaceNumber();
printf("node num %d %d\n",nodeNum,faceNum);
tempver = (float*)realloc(verTex,nodeNum*3*sizeof(float));
if (tempver!=NULL)
verTex = tempver;
else
{
free(verTex);
printf("realloc memeory error!!");
return false;
}
tempinD = (int*)realloc(inDex,faceNum*3*sizeof(int));
if (tempinD!=NULL)
inDex = tempinD;
else
{
free(inDex);
printf("realloc memeory error!!");
return false;
}
memset(verTex,0,nodeNum*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
memset(inDex,0,faceNum*3*sizeof(int));
meshptr = mesh->GetFaceTablePtr();
for(i=0; i < faceNum; i++)
{ inDex[3*i] = meshptr[4*i]-1; inDex[3*i+1] = meshptr[4*i+1]-1; inDex[3*i+2] = meshptr[4*i+2]-1;
}
indexCount[j] = faceNum*3;
glBindBuffer(GL_ARRAY_BUFFER, vbo[j]);
glBufferData(GL_ARRAY_BUFFER, nodeNum*3*sizeof(GLfloat), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, nodeNum*3*sizeof(GLfloat), verTex);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB, vboInd[j]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER_ARB, faceNum*3*sizeof(GL_UNSIGNED_INT), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, faceNum*3*sizeof(GL_UNSIGNED_INT), inDex);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: buffer binding!\n\n");
}
free(verTex);
free(inDex);
//-----------------------------------------------------------------------------------------
float centerPos[3];
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
glUseProgramObjectARB(g_programObj);
{
_decomposeLDNIByFBOPBO(solid, vbo, vboInd, meshNum, centerPos, g_programObj,indexCount);
}
glUseProgramObjectARB(0);
//-----------------------------------------------------------------------------------------
// Step 6: free the memory
time=clock();
//-----------------------------------------------------------------------------------------
glDeleteBuffers(meshNum, vboInd);
glDeleteBuffers(meshNum, vbo);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
free(indexCount);
//------------------------------------------------------------------------
printf("\nMemory clean-up time is %ld (ms)\n",clock()-time);
printf("--------------------------------------------------------------\n");
printf("Total time for sampling is %ld (ms)\n\n",clock()-totalTime);
return true;
}
bool LDNIcudaOperation::SuperUnionOperation(LDNIcudaSolid* &solid, GLKObList* meshlist, float boundingBox[],int res)
{
long time=clock(),totalTime=clock();
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
if (!MultiObjectSamplingInOneSolid(solid, meshlist, boundingBox, res)) return false;
if (!_UnionMultiObjects(solid, res)) return false;
return true;
}
bool LDNIcudaOperation::_UnionMultiObjects(LDNIcudaSolid* &inputSolid, int res)
{
unsigned int arrsize=res*res;
float width, gwidth;
float bbox[6];
if (inputSolid->GetSampleNumber()==0) {
printf("No Samples!");
return false;
}
inputSolid->GetBoundingBox(bbox);
width = bbox[1]-bbox[0];
gwidth = inputSolid->GetSampleWidth();
//-----------------------------------------------------------------------------------
// Step 1: Initialization
long time=clock();
unsigned int *devIndexArrayResPtr;
CUDA_SAFE_CALL( cudaMalloc( (void**)&devIndexArrayResPtr, (arrsize+1)*sizeof(unsigned int) ) );
//-----------------------------------------------------------------------------------
// Step 2: computing the Boolean operation results on LDNIs
for(short nAxis=0;nAxis<3;nAxis++) {
//---------------------------------------------------------------------------------------------
// Sub-step 1: intialization
CUDA_SAFE_CALL( cudaMemset( (void*)devIndexArrayResPtr, 0, (arrsize+1)*sizeof(unsigned int) ) );
//---------------------------------------------------------------------------------------------
float *devNxArrayPtr=inputSolid->GetSampleNxArrayPtr(nAxis);
float *devNyArrayPtr=inputSolid->GetSampleNyArrayPtr(nAxis);
float *devDepthArrayPtr=inputSolid->GetSampleDepthArrayPtr(nAxis); //if (devDepthArrayPtrA==NULL) printf("Empty ");
unsigned int *devIndexArrayPtr=inputSolid->GetIndexArrayPtr(nAxis);
//---------------------------------------------------------------------------------------------
// Sub-step 2: identify the entering and leaving samples ray by ray
krLDNIBoolean_IdentifyEnterLeaveOnRays<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr, arrsize);
//---------------------------------------------------------------------------------------------
// Sub-step 3: Sorting the entering and leaving samples ray by ray
krLDNISampling_SortSamples<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, arrsize, devIndexArrayPtr);
//---------------------------------------------------------------------------------------------
// Sub-step 4: Super - union samples ray by ray
krLDNIBoolean_SuperUnionOnRays<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr,
devIndexArrayResPtr, arrsize);
//---------------------------------------------------------------------------------------------
// Sub-step 5: compaction of index array
thrust::device_ptr<unsigned int> dev_ptr(devIndexArrayResPtr); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int sampleNum=dev_ptr[arrsize];
//printf("max sample ----- %d\n",sampleNum);
//---------------------------------------------------------------------------------------------
// Sub-step 6: collecting the resultant samples into the sampleArray of solidTileA
float *newDevNxArrayPtr, *newDevNyArrayPtr, *newDevDepthArrayPtr;
inputSolid->MallocSampleMemory(nAxis, sampleNum);
newDevNxArrayPtr=inputSolid->GetSampleNxArrayPtr(nAxis);
newDevNyArrayPtr=inputSolid->GetSampleNyArrayPtr(nAxis);
newDevDepthArrayPtr=inputSolid->GetSampleDepthArrayPtr(nAxis);
krLDNIBoolean_ResultSampleCollection<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr,
newDevNxArrayPtr, newDevNyArrayPtr, newDevDepthArrayPtr, devIndexArrayResPtr, arrsize, width, gwidth);
CUDA_SAFE_CALL( cudaMemcpy( devIndexArrayPtr, devIndexArrayResPtr, (arrsize+1)*sizeof(unsigned int), cudaMemcpyDeviceToDevice ) );
cudaFree(devNxArrayPtr); cudaFree(devNyArrayPtr); cudaFree(devDepthArrayPtr);
}
//-----------------------------------------------------------------------------------
// Step 3: free the memory
cudaFree(devIndexArrayResPtr);
printf("Boolean Operation Time (ms): %ld\n",clock()-time);
return true;
}
void LDNIcudaOperation::_decomposeLDNIByFBOPBO(LDNIcudaSolid *solid, GLuint* vbo, GLuint* vboI, int mesh_count, float Cent[], GLhandleARB g_programObj, int indexCount[])
{
unsigned int n_max,i,n,mesh_ID;
float gWidth,origin[3];
unsigned int overall_n_max=0;
long readbackTime=0, sortingTime=0, tempTime;
GLint id0,id1;
cudaEvent_t startClock, stopClock;
CUDA_SAFE_CALL( cudaEventCreate( &startClock ) );
CUDA_SAFE_CALL( cudaEventCreate( &stopClock ) );
tempTime=clock();
//------------------------------------------------------------------------
// Preparation
int nRes=solid->GetResolution(); gWidth=solid->GetSampleWidth();
float width=gWidth*(float)nRes;
solid->GetOrigin(origin[0],origin[1],origin[2]);
int arrsize=nRes*nRes;
//------------------------------------------------------------------------
// Step 1: Setup the rendering environment
glEnable(GL_DEPTH_TEST);
glEnable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR); glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glGetError(); // for clean-up the error generated before
//------------------------------------------------------------------------
// create the FBO objects and texture for rendering
if (glewIsSupported("GL_EXT_framebuffer_object") == 0) printf("Warning: FBO is not supported!\n");
if (glGetError()!=GL_NO_ERROR) printf("Error: before framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint fbo;
glGenFramebuffersEXT(1, &fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer generation!\n");
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer binding!\n");
//------------------------------------------------------------------------
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F_ARB, nRes, nRes, 0, GL_RGBA, GL_FLOAT, 0);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching texture to framebuffer generation!\n");
cudaGraphicsResource *sampleTex_resource;
CUDA_SAFE_CALL( cudaGraphicsGLRegisterImage(&sampleTex_resource, tex, GL_TEXTURE_2D, cudaGraphicsMapFlagsReadOnly) );
//------------------------------------------------------------------------
GLuint depth_and_stencil_rb;
glGenRenderbuffersEXT(1, &depth_and_stencil_rb);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_STENCIL_EXT, nRes, nRes);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of depth-buffer to framebuffer generation!\n");
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of stencil-buffer to framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint indexPBO;
glGenBuffers(1,&indexPBO); // generation of PBO for index array readback
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
glBufferData(GL_PIXEL_PACK_BUFFER_ARB, nRes*nRes*sizeof(unsigned char), NULL, GL_STREAM_READ_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( cudaGLRegisterBufferObject(indexPBO) );
//------------------------------------------------------------------------
if (glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT)!=GL_FRAMEBUFFER_COMPLETE_EXT)
printf("Warning: the setting for rendering on FBO is not correct!\n");
else
printf("FBO has been created successfully!\n");
glPushAttrib(GL_VIEWPORT_BIT);
glViewport(0,0,nRes,nRes);
printf("Preparation time: %ld (ms)\n",clock()-tempTime);
id0 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id0,Cent[0],Cent[1],Cent[2]);
id1 = glGetUniformLocationARB(g_programObj,"mesh_ID");
//------------------------------------------------------------------------
// Step 2: Rendering to get the Hermite samples
for(short nAxis=0; nAxis<3; nAxis++) {
//---------------------------------------------------------------------------------------
// Rendering step 1: setting the viewing window
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// The eye is located at (0, 0, 0), the near clipping plane is at the z=0 plane
// the far clipping plane is at the z=(boundingBox[5]-boundingBox[4]) plane
glOrtho(-width*0.5f,width*0.5f,-width*0.5f,width*0.5f,width*0.5f,-width*0.5f);
// Note that: in "glOrtho(left,right,bottom,top,near,far);"
// (left,right,bottom,top) are located at the boundary of pixel instead of
// the center of pixels
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// Rendering step 2: determine the number of layers
glClearColor( 1.0f, 1.0f, 1.0f, 1.0f );
glClearDepth(1.0);
glClearStencil(0); glColor3f(1,1,1);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glStencilFunc(GL_GREATER, 1, 0xff);
glStencilOp(GL_INCR, GL_INCR, GL_INCR);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glEnableClientState( GL_VERTEX_ARRAY );
for(mesh_ID = 0; mesh_ID < mesh_count; mesh_ID++)
{
glUniform1iARB(id1,mesh_ID);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo[mesh_ID]);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vboI[mesh_ID]);
glDrawElements(GL_TRIANGLES, indexCount[mesh_ID], GL_UNSIGNED_INT, 0);
}
glDisableClientState( GL_VERTEX_ARRAY );
glFlush();
//--------------------------------------------------------------------------------------------------------
// reading stencil buffer into the device memory of CUDA
tempTime=clock();
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
GLint OldPackAlignment;
glGetIntegerv(GL_PACK_ALIGNMENT,&OldPackAlignment);
glPixelStorei(GL_PACK_ALIGNMENT,1); // Important!!! Without this, the read-back could be abnormal.
glReadPixels(0,0,nRes,nRes,GL_STENCIL_INDEX,GL_UNSIGNED_BYTE,0);
glPixelStorei(GL_PACK_ALIGNMENT,OldPackAlignment);
//--------------------------------------------------------------------------------------------------------
unsigned char *devStencilBufferPtr;
unsigned int *devResArrayPtr;
unsigned int *devIndexArrayPtr=solid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( cudaGLMapBufferObject( (void **)&devStencilBufferPtr, indexPBO) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int) ) );
//--------------------------------------------------------------------------------------------------------
// building the indexArray on device
krLDNISampling_CopyIndexAndFindMax<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devStencilBufferPtr,
devIndexArrayPtr,devResArrayPtr,arrsize);
//--------------------------------------------------------------------------------------------------------
// read back the max number of layers -- "n_max"
unsigned int* resArrayPtr;
resArrayPtr=(unsigned int *)malloc(BLOCKS_PER_GRID*sizeof(unsigned int));
CUDA_SAFE_CALL( cudaMemcpy( resArrayPtr, devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
n_max=0;
for(i=0;i<BLOCKS_PER_GRID;i++) n_max = MAX(n_max,resArrayPtr[i]);
cudaFree(devResArrayPtr); free(resArrayPtr);
//--------------------------------------------------------------------------------------------------------
// read back the number of samples -- "sampleNum"
unsigned int sampleNum=0;
tempTime=clock()-tempTime; //readbackTime+=tempTime;
printf("Stencil buffer processing time: %ld (ms)\n",tempTime);
long scanTime=clock();
// for debug purpose
resArrayPtr=(unsigned int *)malloc((arrsize+1)*sizeof(unsigned int));
CUDA_SAFE_CALL( cudaMemcpy( resArrayPtr, devIndexArrayPtr, (arrsize+1)*sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
sampleNum=0;
for(int k=0;k<arrsize;k++) {sampleNum+=resArrayPtr[k]; resArrayPtr[k]=sampleNum;}
for(int k=arrsize;k>0;k--) {resArrayPtr[k]=resArrayPtr[k-1];}
resArrayPtr[0]=0;
CUDA_SAFE_CALL( cudaMemcpy( devIndexArrayPtr, resArrayPtr, (arrsize+1)*sizeof(unsigned int), cudaMemcpyHostToDevice ) );
free(resArrayPtr);
scanTime=clock()-scanTime; printf("Scanning time: %ld (ms)\n",scanTime);
//--------------------------------------------------------------------------------------------------------
CUDA_SAFE_CALL( cudaGLUnmapBufferObject( indexPBO ) );
glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
printf("n_max=%d sampleNum=%d\n",n_max,sampleNum);
if (n_max>overall_n_max) overall_n_max=n_max;
if (sampleNum==0) continue;
//---------------------------------------------------------------------------------------
// Rendering step 3: decomposing the Layered Depth Images (LDIs) and record its corresponding normals
solid->MallocSampleMemory(nAxis,sampleNum);
float* devNxArrayPtr=solid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr=solid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr=solid->GetSampleDepthArrayPtr(nAxis);
tempTime=clock();
for(n=1;n<=n_max;n++) {
CUDA_SAFE_CALL( cudaGraphicsMapResources( 1, &sampleTex_resource, NULL ) );
cudaArray *in_array;
CUDA_SAFE_CALL( cudaGraphicsSubResourceGetMappedArray( &in_array, sampleTex_resource, 0, 0));
CUDA_SAFE_CALL( cudaBindTextureToArray(tex2DFloat4In, in_array) );
//--------------------------------------------------------------------------------------------------------
// fill the sampleArray on device
krLDNISuperUnion_CopySamples<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, n, arrsize, nRes, devIndexArrayPtr);
CUDA_SAFE_CALL( cudaGraphicsUnmapResources( 1, &sampleTex_resource, NULL ) );
if (n==n_max) break;
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glStencilFunc(GL_GREATER, n+1, 0xff);
glStencilOp(GL_KEEP, GL_INCR, GL_INCR);
{
glEnableClientState( GL_VERTEX_ARRAY );
for(mesh_ID = 0; mesh_ID < mesh_count; mesh_ID++)
{
glUniform1iARB(id1,mesh_ID);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo[mesh_ID]);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vboI[mesh_ID]);
glDrawElements(GL_TRIANGLES, indexCount[mesh_ID], GL_UNSIGNED_INT, 0);
}
glDisableClientState( GL_VERTEX_ARRAY );
}
glFlush();
}
tempTime=clock()-tempTime; readbackTime+=tempTime;
//------------------------------------------------------------------------
// Rendering step 4: sorting the samples
CUDA_SAFE_CALL( cudaEventRecord( startClock, 0 ) );
CUDA_SAFE_CALL( cudaEventSynchronize( startClock ) );
krLDNISampling_SortSamples<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, arrsize, devIndexArrayPtr);
CUDA_SAFE_CALL( cudaEventRecord( stopClock, 0 ) );
CUDA_SAFE_CALL( cudaEventSynchronize( stopClock ) );
float elapsedTime;
CUDA_SAFE_CALL( cudaEventElapsedTime( &elapsedTime,
startClock, stopClock ) );
printf( "Sorting time is: %3.1f (ms)\n", elapsedTime );
sortingTime+=(long)elapsedTime;
}
//------------------------------------------------------------------------------------
// Step 3: Set the rendering parameters back
//------------------------------------------------------------------------------------
// detach FBO
glPopAttrib();
// release memory for PBO and cuda's map
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( cudaGLUnregisterBufferObject( indexPBO ) );
glDeleteBuffers(1, &indexPBO);
CUDA_SAFE_CALL( cudaGraphicsUnregisterResource( sampleTex_resource) );
// release memory for the 2D texture
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteTextures(1, &tex);
// release memory for the frame-buffer object
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
glDeleteFramebuffersEXT(1, &fbo);
// release memory for the render-buffer object
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glDeleteRenderbuffersEXT(1, &depth_and_stencil_rb);
//------------------------------------------------------------------------------------
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
// glEnable(GL_POLYGON_SMOOTH);// adding this will make the invalid display on the Thinkpad laptop
glEnable(GL_POINT_SMOOTH);
// glEnable(GL_LINE_SMOOTH); // adding this will make the Compaq laptop's running fail
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
printf("\nn_max=%ld \n",overall_n_max);
printf("Texture Size: %f (MB)\n",(float)((float)overall_n_max*(float)nRes*(float)nRes*7.0f)/(1024.0f*1024.0f));
printf("Readback time: %ld (ms)\nSorting time: %ld (ms)\n",
readbackTime, sortingTime);
CUDA_SAFE_CALL( cudaEventDestroy( startClock ) );
CUDA_SAFE_CALL( cudaEventDestroy( stopClock ) );
}
bool LDNIcudaOperation::BooleanOperation(LDNIcudaSolid* &inputSolid, QuadTrglMesh *meshB, short nOperationType, float boundingBox[])
{
//float boundingBox[6];
LDNIcudaSolid *solidB;
//-----------------------------------------------------------------------------------
// Step 1: converting the mesh surface into a LDNI solid
int res=inputSolid->GetResolution();
if (nOperationType!=3) {
LDNIcudaOperation::BRepToLDNISampling( meshB, solidB, boundingBox, res );
}
else {
solidB=inputSolid; inputSolid=0;
LDNIcudaOperation::BRepToLDNISampling( meshB, inputSolid, boundingBox, res );
nOperationType=2;
}
//-----------------------------------------------------------------------------------
// Step 2: repair and truncate the sampled LDNI solid into the current working space
//-----------------------------------------------------------------------------------
// Step 3: computing the Boolean operation results on LDNIs
printf("-----------------------------------------------------------------------\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------\n");
_booleanOperation(inputSolid, solidB, nOperationType);
inputSolid->SetBoundingBox(boundingBox);
int nres = inputSolid->GetResolution();
float gWidth=(boundingBox[1]-boundingBox[0])/(float)nres;
inputSolid->SetSampleWidth(gWidth);
//-----------------------------------------------------------------------------------
// Step 4: free the memory
delete solidB;
return true;
}
bool LDNIcudaOperation::BooleanOperation(LDNIcudaSolid* &inputSolid, QuadTrglMesh *meshB, short nOperationType)
{
float boundingBox[6]; LDNIcudaSolid *solidB;
//-----------------------------------------------------------------------------------
// Step 1: converting the mesh surface into a LDNI solid
if ( _bExpandableWorkingSpace ) {
meshB->CompBoundingBox(boundingBox);
_expansionLDNIcudaSolidByNewBoundingBox(inputSolid, boundingBox);
}
int res=inputSolid->GetResolution();
if (nOperationType!=3) {
LDNIcudaOperation::BRepToLDNISampling( meshB, solidB, boundingBox, res );
}
else {
solidB=inputSolid; inputSolid=0;
LDNIcudaOperation::BRepToLDNISampling( meshB, inputSolid, boundingBox, res );
nOperationType=2;
}
//-----------------------------------------------------------------------------------
// Step 2: repair and truncate the sampled LDNI solid into the current working space
if ( !(_bExpandableWorkingSpace) ) {
//repair solidB
}
//-----------------------------------------------------------------------------------
// Step 3: computing the Boolean operation results on LDNIs
printf("-----------------------------------------------------------------------\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------\n");
_booleanOperation(inputSolid, solidB, nOperationType);
inputSolid->SetBoundingBox(boundingBox);
int nres = inputSolid->GetResolution();
float gWidth=(boundingBox[1]-boundingBox[0])/(float)nres;
inputSolid->SetSampleWidth(gWidth);
//-----------------------------------------------------------------------------------
// Step 4: free the memory
delete solidB;
return true;
}
//bool LDNIcudaOperation::BooleanOperation(LDNIcudaSolid* &solidA, LDNIcudaSolid* &solidB, short nOperationType)
//{
// float boundingBox[6],origin[3];
//
//
//
// //solidA->GetBoundingBox(boundingBox);
// //_expansionLDNIcudaSolidByNewBoundingBox(solidB, boundingBox);
//
// //if ( _bExpandableWorkingSpace ) {
// // meshB->CompBoundingBox(boundingBox);
// // _expansionLDNIcudaSolidByNewBoundingBox(inputSolid, boundingBox);
//
// //}
//
// printf("-----------------------------------------------------------------------\n");
// printf("Starting to compute Boolean operation\n");
// printf("-----------------------------------------------------------------------\n");
// _booleanOperation(solidA, solidB, nOperationType);
// solidA->SetBoundingBox(boundingBox);
// int nres = solidA->GetResolution();
// float gWidth=(boundingBox[1]-boundingBox[0])/(float)nres;
// solidA->SetSampleWidth(gWidth);
//
// delete solidB;
//
// return true;
//}
bool LDNIcudaOperation::BooleanOperation(QuadTrglMesh *meshA, QuadTrglMesh *meshB, int res, short nOperationType, LDNIcudaSolid* &outputSolid, LDNIcudaSolid* &savedSolid)
{
float boundingBox[6]; LDNIcudaSolid *solidB; //int stA,numA,stRes,numRes,stB;
//-----------------------------------------------------------------------------------
// Step 1: converting mesh surfaces into LDNIs
float bndBoxA[6],bndBoxB[6];
meshA->CompBoundingBox(bndBoxA); meshB->CompBoundingBox(bndBoxB);
_compBoundingCube(meshA, meshB, boundingBox, res);
if (savedSolid!= NULL)
{
_expansionLDNIcudaSolidByNewBoundingBox(savedSolid, boundingBox);
res = savedSolid->GetResolution();
}
if (nOperationType!=3) {
BRepToLDNISampling(meshA, outputSolid, boundingBox, res);
BRepToLDNISampling(meshB, solidB, boundingBox, res);
}
else {
BRepToLDNISampling(meshB, outputSolid, boundingBox, res);
BRepToLDNISampling(meshA, solidB, boundingBox, res);
nOperationType=2;
}
//-----------------------------------------------------------------------------------
// Step 2: boolean operations
printf("-----------------------------------------------------------------------%d\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------%d\n");
_booleanOperation(outputSolid, solidB, nOperationType);
/*outputSolid->SetBoundingBox(boundingBox);
int nres = outputSolid->GetResolution();
float gWidth=(boundingBox[1]-boundingBox[0])/(float)nres;
outputSolid->SetSampleWidth(gWidth);*/
delete solidB;
return true;
}
bool LDNIcudaOperation::BooleanOperation(QuadTrglMesh *meshA, QuadTrglMesh *meshB, int res, short nOperationType, LDNIcudaSolid* &outputSolid)
{
float boundingBox[6]; LDNIcudaSolid *solidB; //int stA,numA,stRes,numRes,stB;
//-----------------------------------------------------------------------------------
// Step 1: converting mesh surfaces into LDNIs
float bndBoxA[6],bndBoxB[6];
meshA->CompBoundingBox(bndBoxA); meshB->CompBoundingBox(bndBoxB);
_compBoundingCube(meshA, meshB, boundingBox, res);
if (nOperationType!=3) {
BRepToLDNISampling(meshA, outputSolid, boundingBox, res);
BRepToLDNISampling(meshB, solidB, boundingBox, res);
}
else {
BRepToLDNISampling(meshB, outputSolid, boundingBox, res);
BRepToLDNISampling(meshA, solidB, boundingBox, res);
nOperationType=2;
}
//-----------------------------------------------------------------------------------
// Step 2: boolean operations
printf("-----------------------------------------------------------------------\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------\n");
_booleanOperation(outputSolid, solidB, nOperationType);
//outputSolid->SetBoundingBox(boundingBox);
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n");
delete solidB;
return true;
}
bool LDNIcudaOperation::_booleanOperation(LDNIcudaSolid* outputSolid, LDNIcudaSolid* solidB, short nOperationType)
{
int res=outputSolid->GetResolution();
unsigned int arrsize=res*res;
if (outputSolid->GetSampleNumber()==0) {
if (nOperationType==0) _switchSolid(outputSolid,solidB); // Union
if (nOperationType==1) outputSolid->CleanUpSamples(); // Intersection
// Difference
if (nOperationType==3) _switchSolid(outputSolid,solidB); // Inversed Difference
return true;
}
if (solidB->GetSampleNumber()==0) {
// Union
if (nOperationType==1) outputSolid->CleanUpSamples(); // Intersection
// Difference
if (nOperationType==3) outputSolid->CleanUpSamples(); // Inversed Difference
return true;
}
//-----------------------------------------------------------------------------------
// Step 1: Initialization
long time=clock();
unsigned int *devIndexArrayResPtr;
CUDA_SAFE_CALL( cudaMalloc( (void**)&devIndexArrayResPtr, (arrsize+1)*sizeof(unsigned int) ) );
//-----------------------------------------------------------------------------------
// Step 2: computing the Boolean operation results on LDNIs
for(short nAxis=0;nAxis<3;nAxis++) {
//---------------------------------------------------------------------------------------------
// Sub-step 1: intialization
CUDA_SAFE_CALL( cudaMemset( (void*)devIndexArrayResPtr, 0, (arrsize+1)*sizeof(unsigned int) ) );
//---------------------------------------------------------------------------------------------
float *devNxArrayPtrA=outputSolid->GetSampleNxArrayPtr(nAxis);
float *devNyArrayPtrA=outputSolid->GetSampleNyArrayPtr(nAxis);
float *devDepthArrayPtrA=outputSolid->GetSampleDepthArrayPtr(nAxis); //if (devDepthArrayPtrA==NULL) printf("Empty ");
unsigned int *devIndexArrayPtrA=outputSolid->GetIndexArrayPtr(nAxis);
float *devNxArrayPtrB=solidB->GetSampleNxArrayPtr(nAxis);
float *devNyArrayPtrB=solidB->GetSampleNyArrayPtr(nAxis);
float *devDepthArrayPtrB=solidB->GetSampleDepthArrayPtr(nAxis);
unsigned int *devIndexArrayPtrB=solidB->GetIndexArrayPtr(nAxis);
//---------------------------------------------------------------------------------------------
// Sub-step 2: computing the result of boolean operation ray by ray
krLDNIBoolean_BooleanOnRays<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtrA, devNyArrayPtrA, devDepthArrayPtrA, devIndexArrayPtrA,
devNxArrayPtrB, devNyArrayPtrB, devDepthArrayPtrB, devIndexArrayPtrB, devIndexArrayResPtr, arrsize, nOperationType);
//---------------------------------------------------------------------------------------------
// Sub-step 3: compaction of index array
thrust::device_ptr<unsigned int> dev_ptr(devIndexArrayResPtr); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int sampleNum=dev_ptr[arrsize];
//---------------------------------------------------------------------------------------------
// Sub-step 4: collecting the resultant samples into the sampleArray of solidTileA
float *newDevNxArrayPtrA, *newDevNyArrayPtrA, *newDevDepthArrayPtrA;
outputSolid->MallocSampleMemory(nAxis, sampleNum);
newDevNxArrayPtrA=outputSolid->GetSampleNxArrayPtr(nAxis);
newDevNyArrayPtrA=outputSolid->GetSampleNyArrayPtr(nAxis);
newDevDepthArrayPtrA=outputSolid->GetSampleDepthArrayPtr(nAxis);
krLDNIBoolean_ResultSampleCollection<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
devNxArrayPtrA, devNyArrayPtrA, devDepthArrayPtrA, devIndexArrayPtrA,
devNxArrayPtrB, devNyArrayPtrB, devDepthArrayPtrB, devIndexArrayPtrB,
newDevNxArrayPtrA, newDevNyArrayPtrA, newDevDepthArrayPtrA, devIndexArrayResPtr, arrsize);
CUDA_SAFE_CALL( cudaMemcpy( devIndexArrayPtrA, devIndexArrayResPtr, (arrsize+1)*sizeof(unsigned int), cudaMemcpyDeviceToDevice ) );
cudaFree(devNxArrayPtrA); cudaFree(devNyArrayPtrA); cudaFree(devDepthArrayPtrA);
}
//-----------------------------------------------------------------------------------
// Step 3: free the memory
cudaFree(devIndexArrayResPtr);
printf("Boolean Operation Time (ms): %ld\n",clock()-time);
return true;
}
void LDNIcudaOperation::SolidRegularization(LDNIcudaSolid *solid) // Removing samples that are nearly tangentially contacted
{
int res=solid->GetResolution();
unsigned int arrsize=res*res;
//-----------------------------------------------------------------------------------
// Step 1: Initialization
long time=clock();
unsigned int *devIndexArrayPtrRes;
CUDA_SAFE_CALL( cudaMalloc( (void**)&devIndexArrayPtrRes, (arrsize+1)*sizeof(unsigned int) ) );
float ww=solid->GetSampleWidth();
//-----------------------------------------------------------------------------------
// Step 2: Remove the tangentially contacted samples
for(short nAxis=0;nAxis<3;nAxis++) {
//---------------------------------------------------------------------------------------------
// Sub-step 1: intialization
CUDA_SAFE_CALL( cudaMemset( (void*)devIndexArrayPtrRes, 0, (arrsize+1)*sizeof(unsigned int) ) );
//---------------------------------------------------------------------------------------------
float *devNxArrayPtr=solid->GetSampleNxArrayPtr(nAxis);
float *devNyArrayPtr=solid->GetSampleNyArrayPtr(nAxis);
float *devDepthArrayPtr=solid->GetSampleDepthArrayPtr(nAxis);
unsigned int *devIndexArrayPtr=solid->GetIndexArrayPtr(nAxis);
//---------------------------------------------------------------------------------------------
// Sub-step 2: computing the result of regularization ray by ray
krLDNIRegularization_RegularizationOnRays<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr, devIndexArrayPtrRes, arrsize, 0.01*ww);
//---------------------------------------------------------------------------------------------
// Sub-step 3: compaction of index array
thrust::device_ptr<unsigned int> dev_ptr(devIndexArrayPtrRes); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int sampleNum=dev_ptr[arrsize];
//---------------------------------------------------------------------------------------------
// Sub-step 4: collecting the resultant samples into the sampleArray of solidTileA
float *devNxArrayPtrRes, *devNyArrayPtrRes, *devDepthArrayPtrRes;
CUDA_SAFE_CALL( cudaMalloc( (void**)&devNxArrayPtrRes, sampleNum*sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&devNyArrayPtrRes, sampleNum*sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&devDepthArrayPtrRes, sampleNum*sizeof(float) ) );
krLDNIRegularization_ResultSampleCollection<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, devIndexArrayPtr,
devNxArrayPtrRes, devNyArrayPtrRes, devDepthArrayPtrRes, devIndexArrayPtrRes, arrsize);
solid->SetSampleDepthArrayPtr(nAxis,devDepthArrayPtrRes);
solid->SetSampleNxArrayPtr(nAxis,devNxArrayPtrRes);
solid->SetSampleNyArrayPtr(nAxis,devNyArrayPtrRes);
solid->SetIndexArrayPtr(nAxis,devIndexArrayPtrRes); devIndexArrayPtrRes=devIndexArrayPtr;
solid->SetSampleNumber(nAxis,sampleNum);
cudaFree(devNxArrayPtr); cudaFree(devNyArrayPtr); cudaFree(devDepthArrayPtr);
}
//-----------------------------------------------------------------------------------
// Step 3: Free the memory
cudaFree(devIndexArrayPtrRes);
printf("Solid Regularization Time (ms): %ld\n",clock()-time);
}
void LDNIcudaOperation::_compBoundingCube(QuadTrglMesh *meshA, QuadTrglMesh *meshB, float boundingBox[], int res)
{
float bndBoxA[6],bndBoxB[6];
meshA->CompBoundingBox(bndBoxA); meshB->CompBoundingBox(bndBoxB);
boundingBox[0]=MIN(bndBoxA[0],bndBoxB[0]);
boundingBox[1]=MAX(bndBoxA[1],bndBoxB[1]);
boundingBox[2]=MIN(bndBoxA[2],bndBoxB[2]);
boundingBox[3]=MAX(bndBoxA[3],bndBoxB[3]);
boundingBox[4]=MIN(bndBoxA[4],bndBoxB[4]);
boundingBox[5]=MAX(bndBoxA[5],bndBoxB[5]);
//------------------------------------------------------------------------
// making the working space cubic
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
}
bool LDNIcudaOperation::BRepToLDNISampling(QuadTrglMesh *mesh, LDNIcudaSolid* &solid, float boundingBox[], int res)
{
const bool bCube=true;
float origin[3],gWidth; long time=clock(),totalTime=clock();
int i,nodeNum;
char fileadd[256];
//----------------------------------------------------------------------------------------
// Preparation
if ((boundingBox[0]==boundingBox[1]) && (boundingBox[2]==boundingBox[3]) && (boundingBox[4]==boundingBox[5])) {
mesh->CompBoundingBox(boundingBox);
if (bCube) {
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
}
}
//---------------------------------------------------------------------------------
solid=new LDNIcudaSolid;
solid->MallocMemory(res);
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
solid->SetSampleWidth(gWidth);
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
solid->SetOrigin(origin[0],origin[1],origin[2]);
//---------------------------------------------------------------------------------
// For using OpenGL Shading Language to implement the sampling procedure
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
if (glewIsSupported("GL_VERSION_2_0")) {printf("\nReady for OpenGL 2.0\n");} else {printf("OpenGL 2.0 not supported\n"); return false;}
//-----------------------------------------------------------------------------------------
int dispListIndex; GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
GLenum InPrimType=GL_POINTS, OutPrimType=GL_TRIANGLES; int OutVertexNum=3;
GLuint vertexTexture;
const char *VshaderString[1],*GshaderString[1],*FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
char str[4096] = ""; int xF,yF;
//-----------------------------------------------------------------------------------------
// Step 1: Setup the shaders
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"sampleLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"sampleLDNIGeometryShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"sampleLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL!\n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// Configuration setting for geometry shader
glProgramParameteriEXT(g_programObj, GL_GEOMETRY_INPUT_TYPE_EXT, InPrimType);
glProgramParameteriEXT(g_programObj, GL_GEOMETRY_OUTPUT_TYPE_EXT, OutPrimType);
glProgramParameteriEXT(g_programObj, GL_GEOMETRY_VERTICES_OUT_EXT, OutVertexNum);
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating texture for vertex array and binding
long texBindingTime=clock();
glGetError(); // for clean-up the error generated before
nodeNum=mesh->GetNodeNumber(); _texCalProduct(nodeNum,xF,yF);
int temp;
for(temp=1;temp<xF;temp *= 2) {}
xF = temp; //if (xF<64) xF=64;
yF = (int)(nodeNum/xF)+1; if (yF<64) yF=64;
printf("Texture Size: xF=%d yF=%d\n",xF,yF);
float* verTex=(float*)malloc(xF*yF*3*sizeof(float));
memset(verTex,0,xF*yF*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glGenTextures(1, &vertexTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, vertexTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB32F_ARB, xF, yF, 0, GL_RGB, GL_FLOAT, verTex);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, 0);
free(verTex);
if (glGetError()!=GL_NO_ERROR) printf("Error: GL_TEXTURE_RECTANGLE_ARB texture binding!\n\n");
texBindingTime=clock()-texBindingTime;
printf("\nTime for binding texture onto the graphics memory - %ld (ms)\n\n",texBindingTime);
//-----------------------------------------------------------------------------------------
// Step 3: building GL-list for activating the geometry shader
unsigned int ver[4];
int faceNum=mesh->GetFaceNumber();
dispListIndex = glGenLists(1);
glNewList(dispListIndex, GL_COMPILE);
glBegin(GL_POINTS);
for(i=0;i<faceNum;i++) {
mesh->GetFaceNodes(i+1,ver[0],ver[1],ver[2],ver[3]);
glVertex3i(ver[0]-1,ver[1]-1,ver[2]-1);
if (mesh->IsQuadFace(i+1)) {glVertex3i(ver[0]-1,ver[2]-1,ver[3]-1);} // one more triangle
}
glEnd();
glEndList();
//-----------------------------------------------------------------------------------------
// Step 4: using program objects and the texture
GLint id0,id1; float centerPos[3];
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB,vertexTexture);
glUseProgramObjectARB(g_programObj);
id0 = glGetUniformLocationARB(g_programObj,"sizeNx");
glUniform1iARB(id0,xF);
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
id1 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id1,centerPos[0],centerPos[1],centerPos[2]);
if (glGetError()!=GL_NO_ERROR) printf("Error: vertex texture binding!\n\n");
printf("Create shader texture\n");
//-----------------------------------------------------------------------------------------
// Step 5: sampling
printf("GLList ID: %d\n",dispListIndex);
time=clock()-time; printf("GL-List building time (including uploading texture) is %ld (ms)\n",time);
_decomposeLDNIByFBOPBO(solid,dispListIndex);
//-----------------------------------------------------------------------------------------
// Step 6: free the memory
time=clock();
//-----------------------------------------------------------------------------------------
glDeleteLists(dispListIndex, 1);
glBindTexture( GL_TEXTURE_RECTANGLE_ARB, 0);
glDisable(GL_TEXTURE_RECTANGLE_ARB);
glDeleteTextures(1, &vertexTexture);
glUseProgramObjectARB(0);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
//------------------------------------------------------------------------
printf("\nMemory clean-up time is %ld (ms)\n",clock()-time);
printf("--------------------------------------------------------------\n");
printf("Total time for sampling is %ld (ms)\n\n",clock()-totalTime);
return true;
}
void LDNIcudaOperation::_decomposeLDNIByFBOPBO(LDNIcudaSolid *solid, int displayListIndex)
{
unsigned int n_max,i,n;
float gWidth,origin[3];
unsigned int overall_n_max=0;
long readbackTime=0, sortingTime=0, tempTime;
cudaEvent_t startClock, stopClock;
CUDA_SAFE_CALL( cudaEventCreate( &startClock ) );
CUDA_SAFE_CALL( cudaEventCreate( &stopClock ) );
tempTime=clock();
//------------------------------------------------------------------------
// Preparation
int nRes=solid->GetResolution(); gWidth=solid->GetSampleWidth();
float width=gWidth*(float)nRes;
solid->GetOrigin(origin[0],origin[1],origin[2]);
int arrsize=nRes*nRes;
//------------------------------------------------------------------------
// Step 1: Setup the rendering environment
glEnable(GL_DEPTH_TEST);
glEnable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR); glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glGetError(); // for clean-up the error generated before
//------------------------------------------------------------------------
// create the FBO objects and texture for rendering
if (glewIsSupported("GL_EXT_framebuffer_object") == 0) printf("Warning: FBO is not supported!\n");
if (glGetError()!=GL_NO_ERROR) printf("Error: before framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint fbo;
glGenFramebuffersEXT(1, &fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer generation!\n");
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer binding!\n");
//------------------------------------------------------------------------
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F_ARB, nRes, nRes, 0, GL_RGBA, GL_FLOAT, 0);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching texture to framebuffer generation!\n");
cudaGraphicsResource *sampleTex_resource;
CUDA_SAFE_CALL( cudaGraphicsGLRegisterImage(&sampleTex_resource, tex, GL_TEXTURE_2D, cudaGraphicsMapFlagsReadOnly) );
//------------------------------------------------------------------------
GLuint depth_and_stencil_rb;
glGenRenderbuffersEXT(1, &depth_and_stencil_rb);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_STENCIL_EXT, nRes, nRes);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of depth-buffer to framebuffer generation!\n");
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of stencil-buffer to framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint indexPBO;
glGenBuffers(1,&indexPBO); // generation of PBO for index array readback
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
glBufferData(GL_PIXEL_PACK_BUFFER_ARB, nRes*nRes*sizeof(unsigned char), NULL, GL_STREAM_READ_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( cudaGLRegisterBufferObject(indexPBO) );
//------------------------------------------------------------------------
if (glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT)!=GL_FRAMEBUFFER_COMPLETE_EXT)
printf("Warning: the setting for rendering on FBO is not correct!\n");
else
printf("FBO has been created successfully!\n");
glPushAttrib(GL_VIEWPORT_BIT);
glViewport(0,0,nRes,nRes);
printf("Preparation time: %ld (ms)\n",clock()-tempTime);
//------------------------------------------------------------------------
// Step 2: Rendering to get the Hermite samples
for(short nAxis=0; nAxis<3; nAxis++) {
//---------------------------------------------------------------------------------------
// Rendering step 1: setting the viewing window
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// The eye is located at (0, 0, 0), the near clipping plane is at the z=0 plane
// the far clipping plane is at the z=(boundingBox[5]-boundingBox[4]) plane
glOrtho(-width*0.5f,width*0.5f,-width*0.5f,width*0.5f,width*0.5f,-width*0.5f);
// Note that: in "glOrtho(left,right,bottom,top,near,far);"
// (left,right,bottom,top) are located at the boundary of pixel instead of
// the center of pixels
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// Rendering step 2: determine the number of layers
glClearColor( 1.0f, 1.0f, 1.0f, 1.0f );
glClearDepth(1.0);
glClearStencil(0); glColor3f(1,1,1);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glStencilFunc(GL_GREATER, 1, 0xff);
glStencilOp(GL_INCR, GL_INCR, GL_INCR);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glCallList(displayListIndex); glFlush();
//--------------------------------------------------------------------------------------------------------
// reading stencil buffer into the device memory of CUDA
tempTime=clock();
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
GLint OldPackAlignment;
glGetIntegerv(GL_PACK_ALIGNMENT,&OldPackAlignment);
glPixelStorei(GL_PACK_ALIGNMENT,1); // Important!!! Without this, the read-back could be abnormal.
glReadPixels(0,0,nRes,nRes,GL_STENCIL_INDEX,GL_UNSIGNED_BYTE,0);
glPixelStorei(GL_PACK_ALIGNMENT,OldPackAlignment);
//--------------------------------------------------------------------------------------------------------
unsigned char *devStencilBufferPtr;
unsigned int *devResArrayPtr;
unsigned int *devIndexArrayPtr=solid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( cudaGLMapBufferObject( (void **)&devStencilBufferPtr, indexPBO) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int) ) );
//--------------------------------------------------------------------------------------------------------
// building the indexArray on device
krLDNISampling_CopyIndexAndFindMax<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devStencilBufferPtr,
devIndexArrayPtr,devResArrayPtr,arrsize);
//--------------------------------------------------------------------------------------------------------
// read back the max number of layers -- "n_max"
unsigned int* resArrayPtr;
resArrayPtr=(unsigned int *)malloc(BLOCKS_PER_GRID*sizeof(unsigned int));
CUDA_SAFE_CALL( cudaMemcpy( resArrayPtr, devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
n_max=0;
for(i=0;i<BLOCKS_PER_GRID;i++) n_max = MAX(n_max,resArrayPtr[i]);
cudaFree(devResArrayPtr); free(resArrayPtr);
//--------------------------------------------------------------------------------------------------------
// read back the number of samples -- "sampleNum"
unsigned int sampleNum=0;
tempTime=clock()-tempTime; //readbackTime+=tempTime;
printf("Stencil buffer processing time: %ld (ms)\n",tempTime);
long scanTime=clock();
// for debug purpose
resArrayPtr=(unsigned int *)malloc((arrsize+1)*sizeof(unsigned int));
CUDA_SAFE_CALL( cudaMemcpy( resArrayPtr, devIndexArrayPtr, (arrsize+1)*sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
sampleNum=0;
for(int k=0;k<arrsize;k++) {sampleNum+=resArrayPtr[k]; resArrayPtr[k]=sampleNum;}
for(int k=arrsize;k>0;k--) {resArrayPtr[k]=resArrayPtr[k-1];}
resArrayPtr[0]=0;
CUDA_SAFE_CALL( cudaMemcpy( devIndexArrayPtr, resArrayPtr, (arrsize+1)*sizeof(unsigned int), cudaMemcpyHostToDevice ) );
free(resArrayPtr);
scanTime=clock()-scanTime; printf("Scanning time: %ld (ms)\n",scanTime);
//--------------------------------------------------------------------------------------------------------
CUDA_SAFE_CALL( cudaGLUnmapBufferObject( indexPBO ) );
glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
printf("n_max=%d sampleNum=%d\n",n_max,sampleNum);
if (n_max>overall_n_max) overall_n_max=n_max;
if (sampleNum==0) continue;
//---------------------------------------------------------------------------------------
// Rendering step 3: decomposing the Layered Depth Images (LDIs) and record its corresponding normals
solid->MallocSampleMemory(nAxis,sampleNum);
float* devNxArrayPtr=solid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr=solid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr=solid->GetSampleDepthArrayPtr(nAxis);
tempTime=clock();
for(n=1;n<=n_max;n++) {
CUDA_SAFE_CALL( cudaGraphicsMapResources( 1, &sampleTex_resource, NULL ) );
cudaArray *in_array;
CUDA_SAFE_CALL( cudaGraphicsSubResourceGetMappedArray( &in_array, sampleTex_resource, 0, 0));
CUDA_SAFE_CALL( cudaBindTextureToArray(tex2DFloat4In, in_array) );
//--------------------------------------------------------------------------------------------------------
// fill the sampleArray on device
krLDNISampling_CopySamples<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, n, arrsize, width, gWidth, nRes, devIndexArrayPtr);
CUDA_SAFE_CALL( cudaGraphicsUnmapResources( 1, &sampleTex_resource, NULL ) );
if (n==n_max) break;
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glStencilFunc(GL_GREATER, n+1, 0xff);
glStencilOp(GL_KEEP, GL_INCR, GL_INCR);
glCallList(displayListIndex); glFlush();
}
tempTime=clock()-tempTime; readbackTime+=tempTime;
//------------------------------------------------------------------------
// Rendering step 4: sorting the samples
CUDA_SAFE_CALL( cudaEventRecord( startClock, 0 ) );
CUDA_SAFE_CALL( cudaEventSynchronize( startClock ) );
krLDNISampling_SortSamples<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, arrsize, devIndexArrayPtr);
CUDA_SAFE_CALL( cudaEventRecord( stopClock, 0 ) );
CUDA_SAFE_CALL( cudaEventSynchronize( stopClock ) );
float elapsedTime;
CUDA_SAFE_CALL( cudaEventElapsedTime( &elapsedTime,
startClock, stopClock ) );
// printf( "Sorting time is: %3.1f (ms)\n", elapsedTime );
sortingTime+=(long)elapsedTime;
}
//------------------------------------------------------------------------------------
// Step 3: Set the rendering parameters back
//------------------------------------------------------------------------------------
// detach FBO
glPopAttrib();
// release memory for PBO and cuda's map
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( cudaGLUnregisterBufferObject( indexPBO ) );
glDeleteBuffers(1, &indexPBO);
CUDA_SAFE_CALL( cudaGraphicsUnregisterResource( sampleTex_resource) );
// release memory for the 2D texture
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteTextures(1, &tex);
// release memory for the frame-buffer object
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
glDeleteFramebuffersEXT(1, &fbo);
// release memory for the render-buffer object
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glDeleteRenderbuffersEXT(1, &depth_and_stencil_rb);
//------------------------------------------------------------------------------------
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
// glEnable(GL_POLYGON_SMOOTH);// adding this will make the invalid display on the Thinkpad laptop
glEnable(GL_POINT_SMOOTH);
// glEnable(GL_LINE_SMOOTH); // adding this will make the Compaq laptop's running fail
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
printf("\nn_max=%ld \n",overall_n_max);
printf("Texture Size: %f (MB)\n",(float)((float)overall_n_max*(float)nRes*(float)nRes*7.0f)/(1024.0f*1024.0f));
printf("Readback time: %ld (ms)\nSorting time: %ld (ms)\n",
readbackTime, sortingTime);
CUDA_SAFE_CALL( cudaEventDestroy( startClock ) );
CUDA_SAFE_CALL( cudaEventDestroy( stopClock ) );
}
unsigned char* LDNIcudaOperation::_readShaderFile( const char *fileName )
{
FILE *file = fopen( fileName, "r" );
if ( file == NULL ) {
printf("Cannot open shader file!");
return 0;
}
struct _stat fileStats;
if ( _stat( fileName, &fileStats ) != 0 ) {
printf("Cannot get file stats for shader file!");
return 0;
}
unsigned char *buffer = new unsigned char[fileStats.st_size];
int bytes = (int)(fread( buffer,1, fileStats.st_size, file ));
buffer[bytes] = 0;
fclose( file );
return buffer;
}
void LDNIcudaOperation::_texCalProduct(int in, int &outx, int &outy)
{
int left=0,right=0,div3left=0,div3right=0;
left = int(floor(sqrt((float)in)))-1;
right = int(ceil(sqrt((float)in)));
while(left*right < in) {right++;}
if (left%3 == 0 && left*right>=in) {
div3left = left;
div3right = right;
}
else if (right%3 == 0 && left*right>=in) {
div3left = right;
div3right = left;
}
right++; left--;
if (left%3 == 0 && left*right>=in) {
div3left = left;
div3right = right;
}
else if (right%3 == 0 && left*right>=in){
div3left = right;
div3right = left;
}
while(left*right > in){
right++; left--;
if (left%3 == 0 && left*right>in){
div3left = left;
div3right = right;
}
else if (right%3 == 0 && left*right>in){
div3left = right;
div3right = left;
}
}
if (right*left < in){
right--; left++;
if (left%3 == 0 ){
div3left = left;
div3right = right;
}
else if (right%3 == 0){
div3left = right;
div3right = left;
}
}
outx=div3left; outy=div3right;
if (outx==0 || outy==0) {outx=in; outy=1;}
}
//--------------------------------------------------------------------------------------------
void LDNIcudaOperation::OrientedNormalReconstruction(LDNIcudaSolid *solid, unsigned int nSupportSize, bool bWithOrientationVoting)
{
unsigned int *indexArray[3]; float *depthArray[3],*nxArray[3],*nyArray[3];
int res; short nAxis;
float ww,origin[3];
float *buffer; int sampleNum,xNum,yNum,zNum;
//---------------------------------------------------------------------------------------------------------
// preparation
res=solid->GetResolution(); ww=solid->GetSampleWidth();
solid->GetOrigin(origin[0],origin[1],origin[2]);
for(nAxis=0;nAxis<3;nAxis++) {
nxArray[nAxis]=solid->GetSampleNxArrayPtr(nAxis);
nyArray[nAxis]=solid->GetSampleNyArrayPtr(nAxis);
depthArray[nAxis]=solid->GetSampleDepthArrayPtr(nAxis);
indexArray[nAxis]=solid->GetIndexArrayPtr(nAxis);
}
xNum=solid->GetSampleNumber(0); yNum=solid->GetSampleNumber(1); zNum=solid->GetSampleNumber(2);
sampleNum=MAX3(xNum,yNum,zNum);
CUDA_SAFE_CALL( cudaMalloc( (void**)&(buffer), sampleNum*3*sizeof(float) ) );
//--------------------------------------------------------------------------------------------------------------------------
// Phase 1: estimation of the oriented normal vectors
for(nAxis=0;nAxis<3;nAxis++) {
//----------------------------------------------------------------------------------------------------------------------
// Preprocessing
krLDNINormalProcessing_PreProc<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(indexArray[nAxis],buffer,res,res*res);
//----------------------------------------------------------------------------------------------------------------------
// The following kernel is sample-based normal reconstruction
krLDNINormalReconstruction_PerSample<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(indexArray[0], indexArray[1], indexArray[2],
nxArray[0], nxArray[1], nxArray[2], nyArray[0], nyArray[1], nyArray[2],
depthArray[0], depthArray[1], depthArray[2], buffer,
solid->GetSampleNumber(nAxis), nAxis, res, ww, nSupportSize);
//----------------------------------------------------------------------------------------------------------------------
// Updating the result of computation
int sNum=solid->GetSampleNumber(nAxis);
krLDNINormalProcessing_Update<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
sNum, nxArray[nAxis], nyArray[nAxis], depthArray[nAxis], buffer);
}
//--------------------------------------------------------------------------------------------------------------------------
// Phase 2: voting based correction of normal vectors' orientation
if (bWithOrientationVoting)
for(nAxis=0;nAxis<3;nAxis++) {
//----------------------------------------------------------------------------------------------------------------------
// Preprocessing
krLDNINormalProcessing_PreProc<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(indexArray[nAxis],buffer,res,res*res);
//----------------------------------------------------------------------------------------------------------------------
// The following kernel is voting-based orientation correction for normal vectors
krLDNINormalProcessing_OrientationCorrectionByVoting<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(indexArray[0], indexArray[1], indexArray[2],
nxArray[0], nxArray[1], nxArray[2], nyArray[0], nyArray[1], nyArray[2],
depthArray[0], depthArray[1], depthArray[2], buffer,
solid->GetSampleNumber(nAxis), nAxis, res, ww, nSupportSize);
}
//-----------------------------------------------------------------------------------------
// release the memory
cudaFree(buffer);
}
void LDNIcudaOperation::ParallelProcessingNormalVector(LDNIcudaSolid *solid, unsigned int nSupportSize, float normalPara)
{
// cudaEvent_t startClock, stopClock;
// float elapsedTime;
// CUDA_SAFE_CALL( cudaEventCreate( &startClock ) );
// CUDA_SAFE_CALL( cudaEventCreate( &stopClock ) );
unsigned int *indexArray[3]; float *depthArray[3],*nxArray[3],*nyArray[3];
int res; short nAxis;
float ww,origin[3];
float *buffer; int sampleNum,xNum,yNum,zNum;
//---------------------------------------------------------------------------------------------------------
// preparation
res=solid->GetResolution(); ww=solid->GetSampleWidth();
solid->GetOrigin(origin[0],origin[1],origin[2]);
for(nAxis=0;nAxis<3;nAxis++) {
nxArray[nAxis]=solid->GetSampleNxArrayPtr(nAxis);
nyArray[nAxis]=solid->GetSampleNyArrayPtr(nAxis);
depthArray[nAxis]=solid->GetSampleDepthArrayPtr(nAxis);
indexArray[nAxis]=solid->GetIndexArrayPtr(nAxis);
}
xNum=solid->GetSampleNumber(0); yNum=solid->GetSampleNumber(1); zNum=solid->GetSampleNumber(2);
sampleNum=MAX3(xNum,yNum,zNum);
CUDA_SAFE_CALL( cudaMalloc( (void**)&(buffer), sampleNum*3*sizeof(float) ) );
for(nAxis=0;nAxis<3;nAxis++)
{ //nAxis=0;
// CUDA_SAFE_CALL( cudaMemset( (void*)buffer, 0, sampleNum*3*sizeof(float) ) );
// CUDA_SAFE_CALL( cudaEventRecord( startClock, 0 ) );
// CUDA_SAFE_CALL( cudaEventSynchronize( startClock ) );
krLDNINormalProcessing_PreProc<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(indexArray[nAxis],buffer,res,res*res);
// CUDA_SAFE_CALL( cudaEventRecord( stopClock, 0 ) );
// CUDA_SAFE_CALL( cudaEventSynchronize( stopClock ) ); // This confirms the kernel's running has completed
// CUDA_SAFE_CALL( cudaEventElapsedTime( &elapsedTime, startClock, stopClock ) );
// printf("%d-direction pre-processing time: %3.1f (ms)\n",(int)nAxis,elapsedTime);
// CUDA_SAFE_CALL( cudaEventRecord( startClock, 0 ) );
// CUDA_SAFE_CALL( cudaEventSynchronize( startClock ) );
//----------------------------------------------------------------------------------------------------------------------
// The following kernel is ray-based filtering, which is too slow to process
/* krLDNIBilateralNormalFilter_PerRay<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
indexArray[0], indexArray[1], indexArray[2],
nxArray[0], nxArray[1], nxArray[2],
nyArray[0], nyArray[1], nyArray[2],
depthArray[0], depthArray[1], depthArray[2], buffer,
res*res, nAxis, res, ww, origin[0], origin[1], origin[2], nSupportSize, normalPara);*/
//----------------------------------------------------------------------------------------------------------------------
// The following kernel is sample-based filtering
krLDNIBilateralNormalFilter_PerSample<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
indexArray[0], indexArray[1], indexArray[2],
nxArray[0], nxArray[1], nxArray[2],
nyArray[0], nyArray[1], nyArray[2],
depthArray[0], depthArray[1], depthArray[2], buffer,
solid->GetSampleNumber(nAxis), nAxis, res, ww, nSupportSize, normalPara);
// CUDA_SAFE_CALL( cudaEventRecord( stopClock, 0 ) );
// CUDA_SAFE_CALL( cudaEventSynchronize( stopClock ) ); // This confirms the kernel's running has completed
// CUDA_SAFE_CALL( cudaEventElapsedTime( &elapsedTime, startClock, stopClock ) );
// printf("%d-direction processing time: %3.1f (ms)\n",(int)nAxis,elapsedTime);
int sNum=solid->GetSampleNumber(nAxis);
// CUDA_SAFE_CALL( cudaEventRecord( startClock, 0 ) );
// CUDA_SAFE_CALL( cudaEventSynchronize( startClock ) );
krLDNINormalProcessing_Update<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
sNum, nxArray[nAxis], nyArray[nAxis], depthArray[nAxis], buffer);
// CUDA_SAFE_CALL( cudaEventRecord( stopClock, 0 ) );
// CUDA_SAFE_CALL( cudaEventSynchronize( stopClock ) );
// CUDA_SAFE_CALL( cudaEventElapsedTime( &elapsedTime, startClock, stopClock ) );
// printf("Buffer updating time: %3.1f (ms)\n",elapsedTime);
}
//-----------------------------------------------------------------------------------------
// release the memory
cudaFree(buffer);
// CUDA_SAFE_CALL( cudaEventDestroy( startClock ) );
// CUDA_SAFE_CALL( cudaEventDestroy( stopClock ) );
}
//--------------------------------------------------------------------------------------------
void LDNIcudaOperation::CopyCPUSolidToCUDASolid(LDNIcpuSolid *cpuSolid, LDNIcudaSolid* &cudaSolid)
{
float ox,oy,oz,gWidth; int i,num,res; short nAxis;
LDNIcpuRay *rays; LDNIcpuSample *sampleArray;
cpuSolid->GetOrigin(ox,oy,oz);
gWidth=cpuSolid->GetSampleWidth();
res=cpuSolid->GetResolution();
cudaSolid=new LDNIcudaSolid;
cudaSolid->SetOrigin(ox,oy,oz); cudaSolid->SetSampleWidth(gWidth);
cudaSolid->MallocMemory(res);
//-----------------------------------------------------------------------------------------
// copy the index arrays
unsigned int *dev_indexArray,*indexArray;
num=res*res;
indexArray=(unsigned int *)malloc((num+1)*sizeof(unsigned int));
for(nAxis=0;nAxis<3;nAxis++) {
rays=cpuSolid->GetRayArrayPtr(nAxis);
indexArray[0]=0;
for(i=0;i<num;i++) indexArray[i+1]=rays[i].sampleIndex;
dev_indexArray=cudaSolid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( cudaMemcpy( dev_indexArray, indexArray, (num+1)*sizeof(unsigned int), cudaMemcpyHostToDevice ) );
}
free(indexArray);
//-----------------------------------------------------------------------------------------
// copy the sample arrays
for(nAxis=0;nAxis<3;nAxis++) {
rays=cpuSolid->GetRayArrayPtr(nAxis);
int sampleNum=rays[res*res-1].sampleIndex;
float *sampleNxArray,*sampleNyArray,*sampleDepthArray;
sampleNxArray=(float*)malloc(sampleNum*sizeof(float));
sampleNyArray=(float*)malloc(sampleNum*sizeof(float));
sampleDepthArray=(float*)malloc(sampleNum*sizeof(float));
sampleArray=cpuSolid->GetSampleArrayPtr(nAxis);
for(i=0;i<sampleNum;i++) {
sampleNxArray[i]=sampleArray[i].nx;
sampleNyArray[i]=sampleArray[i].ny;
if (sampleArray[i].nz<0)
sampleDepthArray[i]=-sampleArray[i].depth;
else
sampleDepthArray[i]=sampleArray[i].depth;
}
cudaSolid->MallocSampleMemory(nAxis,sampleNum);
float *dev_sampleNxArray=cudaSolid->GetSampleNxArrayPtr(nAxis);
float *dev_sampleNyArray=cudaSolid->GetSampleNyArrayPtr(nAxis);
float *dev_sampleDepthArray=cudaSolid->GetSampleDepthArrayPtr(nAxis);
CUDA_SAFE_CALL( cudaMemcpy( dev_sampleNxArray, sampleNxArray, sampleNum*sizeof(float), cudaMemcpyHostToDevice ) );
CUDA_SAFE_CALL( cudaMemcpy( dev_sampleNyArray, sampleNyArray, sampleNum*sizeof(float), cudaMemcpyHostToDevice ) );
CUDA_SAFE_CALL( cudaMemcpy( dev_sampleDepthArray, sampleDepthArray, sampleNum*sizeof(float), cudaMemcpyHostToDevice ) );
free(sampleNxArray); free(sampleNyArray); free(sampleDepthArray);
}
}
void LDNIcudaOperation::CopyCUDASolidToCPUSolid(LDNIcudaSolid *cudaSolid, LDNIcpuSolid* &cpuSolid)
{
float ox,oy,oz,gWidth; int i,num,res; short nAxis;
LDNIcpuRay *rays; LDNIcpuSample *sampleArray;
cudaSolid->GetOrigin(ox,oy,oz); gWidth=cudaSolid->GetSampleWidth();
res=cudaSolid->GetResolution();
cpuSolid=new LDNIcpuSolid; cpuSolid->SetOrigin(ox,oy,oz);
cpuSolid->SetSampleWidth(gWidth); cpuSolid->MallocMemory(res);
//-----------------------------------------------------------------------------------------
// copy the index arrays
unsigned int *dev_indexArray,*indexArray;
num=res*res;
indexArray=(unsigned int *)malloc((num+1)*sizeof(unsigned int));
for(nAxis=0;nAxis<3;nAxis++) {
rays=cpuSolid->GetRayArrayPtr(nAxis);
dev_indexArray=cudaSolid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( cudaMemcpy( indexArray, dev_indexArray, (num+1)*sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
for(i=0;i<num;i++) rays[i].sampleIndex=indexArray[i+1];
}
free(indexArray);
//-----------------------------------------------------------------------------------------
// copy the sample arrays
for(nAxis=0;nAxis<3;nAxis++) {
rays=cpuSolid->GetRayArrayPtr(nAxis);
int sampleNum=rays[res*res-1].sampleIndex;
float *sampleNxArray,*sampleNyArray,*sampleDepthArray;
sampleNxArray=(float*)malloc(sampleNum*sizeof(float));
sampleNyArray=(float*)malloc(sampleNum*sizeof(float));
sampleDepthArray=(float*)malloc(sampleNum*sizeof(float));
float *dev_sampleNxArray=cudaSolid->GetSampleNxArrayPtr(nAxis);
float *dev_sampleNyArray=cudaSolid->GetSampleNyArrayPtr(nAxis);
float *dev_sampleDepthArray=cudaSolid->GetSampleDepthArrayPtr(nAxis);
CUDA_SAFE_CALL( cudaMemcpy( sampleNxArray, dev_sampleNxArray, sampleNum*sizeof(float), cudaMemcpyDeviceToHost ) );
CUDA_SAFE_CALL( cudaMemcpy( sampleNyArray, dev_sampleNyArray, sampleNum*sizeof(float), cudaMemcpyDeviceToHost ) );
CUDA_SAFE_CALL( cudaMemcpy( sampleDepthArray, dev_sampleDepthArray, sampleNum*sizeof(float), cudaMemcpyDeviceToHost ) );
cpuSolid->MallocSampleMemory(nAxis,sampleNum);
sampleArray=cpuSolid->GetSampleArrayPtr(nAxis);
for(i=0;i<sampleNum;i++) {
sampleArray[i].nx=sampleNxArray[i];
sampleArray[i].ny=sampleNyArray[i];
double dd=1.0-sampleArray[i].nx*sampleArray[i].nx-sampleArray[i].ny*sampleArray[i].ny;
if (dd<0.0) dd=0.0; if (dd>1.0) dd=1.0;
if (sampleDepthArray[i]<0) sampleArray[i].nz=-sqrt(dd); else sampleArray[i].nz=sqrt(dd);
sampleArray[i].depth=fabs(sampleDepthArray[i]);
}
free(sampleNxArray); free(sampleNyArray); free(sampleDepthArray);
}
}
void LDNIcudaOperation::_switchSolid(LDNIcudaSolid* solidA, LDNIcudaSolid* solidB)
{
unsigned int *dev_indexArrayA[3];
float *dev_sampleNxArrayA[3];
float *dev_sampleNyArrayA[3];
float *dev_sampleDepthArrayA[3];
float originA[3],sampleWidthA;
int res,xSampleNum,ySampleNum,zSampleNum;
float originB[3];
dev_indexArrayA[0]=solidA->GetIndexArrayPtr(0); dev_indexArrayA[1]=solidA->GetIndexArrayPtr(1); dev_indexArrayA[2]=solidA->GetIndexArrayPtr(2);
dev_sampleNxArrayA[0]=solidA->GetSampleNxArrayPtr(0); dev_sampleNxArrayA[1]=solidA->GetSampleNxArrayPtr(1); dev_sampleNxArrayA[2]=solidA->GetSampleNxArrayPtr(2);
dev_sampleNyArrayA[0]=solidA->GetSampleNyArrayPtr(0); dev_sampleNyArrayA[1]=solidA->GetSampleNyArrayPtr(1); dev_sampleNyArrayA[2]=solidA->GetSampleNyArrayPtr(2);
dev_sampleDepthArrayA[0]=solidA->GetSampleDepthArrayPtr(0); dev_sampleDepthArrayA[1]=solidA->GetSampleDepthArrayPtr(1); dev_sampleDepthArrayA[2]=solidA->GetSampleDepthArrayPtr(2);
solidA->GetOrigin(originA[0],originA[1],originA[2]); sampleWidthA=solidA->GetSampleWidth(); res=solidA->GetResolution();
xSampleNum=solidA->GetSampleNumber(0); ySampleNum=solidA->GetSampleNumber(1); zSampleNum=solidA->GetSampleNumber(2);
solidA->SetIndexArrayPtr(0,solidB->GetIndexArrayPtr(0)); solidA->SetIndexArrayPtr(1,solidB->GetIndexArrayPtr(1)); solidA->SetIndexArrayPtr(2,solidB->GetIndexArrayPtr(2));
solidA->SetSampleNxArrayPtr(0,solidB->GetSampleNxArrayPtr(0)); solidA->SetSampleNxArrayPtr(1,solidB->GetSampleNxArrayPtr(1)); solidA->SetSampleNxArrayPtr(2,solidB->GetSampleNxArrayPtr(2));
solidA->SetSampleNyArrayPtr(0,solidB->GetSampleNyArrayPtr(0)); solidA->SetSampleNyArrayPtr(1,solidB->GetSampleNyArrayPtr(1)); solidA->SetSampleNyArrayPtr(2,solidB->GetSampleNyArrayPtr(2));
solidA->SetSampleDepthArrayPtr(0,solidB->GetSampleDepthArrayPtr(0)); solidA->SetSampleDepthArrayPtr(1,solidB->GetSampleDepthArrayPtr(1)); solidA->SetSampleDepthArrayPtr(2,solidB->GetSampleDepthArrayPtr(2));
solidB->GetOrigin(originB[0],originB[1],originB[2]); solidA->SetOrigin(originB[0],originB[1],originB[2]);
solidA->SetSampleWidth(solidB->GetSampleWidth()); solidA->SetResolution(solidB->GetResolution());
solidA->SetSampleNumber(0,solidB->GetSampleNumber(0)); solidA->SetSampleNumber(1,solidB->GetSampleNumber(1)); solidA->SetSampleNumber(2,solidB->GetSampleNumber(2));
solidB->SetIndexArrayPtr(0,dev_indexArrayA[0]); solidB->SetIndexArrayPtr(1,dev_indexArrayA[1]); solidB->SetIndexArrayPtr(2,dev_indexArrayA[2]);
solidB->SetSampleNxArrayPtr(0,dev_sampleNxArrayA[0]); solidB->SetSampleNxArrayPtr(1,dev_sampleNxArrayA[1]); solidB->SetSampleNxArrayPtr(2,dev_sampleNxArrayA[2]);
solidB->SetSampleNyArrayPtr(0,dev_sampleNyArrayA[0]); solidB->SetSampleNyArrayPtr(1,dev_sampleNyArrayA[1]); solidB->SetSampleNyArrayPtr(2,dev_sampleNyArrayA[2]);
solidB->SetSampleDepthArrayPtr(0,dev_sampleDepthArrayA[0]); solidB->SetSampleDepthArrayPtr(1,dev_sampleDepthArrayA[1]); solidB->SetSampleDepthArrayPtr(2,dev_sampleDepthArrayA[2]);
solidB->SetOrigin(originA[0],originA[1],originA[2]); solidB->SetSampleWidth(sampleWidthA); solidB->SetResolution(res);
solidB->SetSampleNumber(0,xSampleNum); solidB->SetSampleNumber(1,ySampleNum); solidB->SetSampleNumber(2,zSampleNum);
}
void LDNIcudaOperation::_expansionLDNIcudaSolidByNewBoundingBox(LDNIcudaSolid *cudaSolid, float boundingBox[])
{
unsigned int sd[3],ed[3],total; float wx,wy,wz,origin[3],gWidth;
unsigned int *dev_indexArray;
float *dev_sampleDepthArray;
long time=clock();
cudaSolid->GetOrigin(origin[0],origin[1],origin[2]);
gWidth=cudaSolid->GetSampleWidth();
int res=cudaSolid->GetResolution();
origin[0]=origin[0]-gWidth*0.5f;
origin[1]=origin[1]-gWidth*0.5f;
origin[2]=origin[2]-gWidth*0.5f;
//------------------------------------------------------------------------------
// Step 1: determine the number of expansion
boundingBox[0]=boundingBox[0]-gWidth*2.0f;
boundingBox[2]=boundingBox[2]-gWidth*2.0f;
boundingBox[4]=boundingBox[4]-gWidth*2.0f;
boundingBox[1]=boundingBox[1]+gWidth*2.0f;
boundingBox[3]=boundingBox[3]+gWidth*2.0f;
boundingBox[5]=boundingBox[5]+gWidth*2.0f;
//------------------------------------------------------------------------------
sd[0]=sd[1]=sd[2]=0;
if (boundingBox[0]<origin[0]) sd[0]=(unsigned int)((origin[0]-boundingBox[0])/gWidth)+1;
if (boundingBox[2]<origin[1]) sd[1]=(unsigned int)((origin[1]-boundingBox[2])/gWidth)+1;
if (boundingBox[4]<origin[2]) sd[2]=(unsigned int)((origin[2]-boundingBox[4])/gWidth)+1;
//------------------------------------------------------------------------------
wx=origin[0]+gWidth*(float)(res);
wy=origin[1]+gWidth*(float)(res);
wz=origin[2]+gWidth*(float)(res);
ed[0]=ed[1]=ed[2]=0;
if (boundingBox[1]>wx) ed[0]=(int)((boundingBox[1]-wx)/gWidth+0.5);
if (boundingBox[3]>wy) ed[1]=(int)((boundingBox[3]-wy)/gWidth+0.5);
if (boundingBox[5]>wz) ed[2]=(int)((boundingBox[5]-wz)/gWidth+0.5);
//------------------------------------------------------------------------------
total=sd[0]+ed[0];
if ((sd[1]+ed[1])>total) total=sd[1]+ed[1];
if ((sd[2]+ed[2])>total) total=sd[2]+ed[2];
ed[0]=total-sd[0]; ed[1]=total-sd[1]; ed[2]=total-sd[2];
//------------------------------------------------------------------------------
// Step 2: create new index Arrays of LDNISolidNode
unsigned int newArrsize;
newArrsize=(unsigned int)(res+total)*(res+total);
unsigned int *tempIndexArray;
CUDA_SAFE_CALL( cudaMalloc( (void**)&tempIndexArray, (newArrsize+1)*sizeof(unsigned int) ) );
for(short nAxis=0; nAxis<3; nAxis++) {
dev_indexArray=cudaSolid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( cudaMemset( (void*)tempIndexArray, 0, (newArrsize+1)*sizeof(unsigned int) ) );
//------------------------------------------------------------------
// fill the temporary index array by number of samples on each ray
krLDNIcudaSolid_fillNewIndexBySampleNumber<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(
tempIndexArray, dev_indexArray, res, res+total, sd[(nAxis+1)%3], sd[(nAxis+2)%3]);
//------------------------------------------------------------------
// scan the index array
thrust::device_ptr<unsigned int> dev_ptr(tempIndexArray); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(newArrsize+1), dev_ptr); // in-place scan
//------------------------------------------------------------------
// update the temporary index array
cudaFree(dev_indexArray);
CUDA_SAFE_CALL( cudaMalloc( (void**)&(dev_indexArray), (newArrsize+1)*sizeof(unsigned int) ) );
cudaSolid->SetIndexArrayPtr(nAxis,dev_indexArray);
CUDA_SAFE_CALL( cudaMemcpy( dev_indexArray, tempIndexArray, (newArrsize+1)*sizeof(unsigned int), cudaMemcpyDeviceToDevice ) );
}
cudaFree(tempIndexArray);
//------------------------------------------------------------------------------
// Step 3: update the depth-values of samples when necessary
origin[0]=origin[0]-gWidth*(float)(sd[0])+gWidth*0.5;
origin[1]=origin[1]-gWidth*(float)(sd[1])+gWidth*0.5;
origin[2]=origin[2]-gWidth*(float)(sd[2])+gWidth*0.5;
cudaSolid->SetOrigin(origin[0],origin[1],origin[2]);
res+=total; cudaSolid->SetResolution(res);
for(short nAxis=0; nAxis<3; nAxis++) {
if (sd[nAxis]==0) continue;
float updateDepth=gWidth*(float)sd[nAxis];
dev_indexArray=cudaSolid->GetIndexArrayPtr(nAxis);
dev_sampleDepthArray=cudaSolid->GetSampleDepthArrayPtr(nAxis);
unsigned int sampleNum;
CUDA_SAFE_CALL( cudaMemcpy( &sampleNum, &(dev_indexArray[newArrsize]), sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
krLDNIcudaSolid_depthSampleAdd<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(dev_sampleDepthArray, updateDepth, sampleNum);
}
//------------------------------------------------------------------------------
// Step 4: update the boundingBox[] for the sampling of mesh surface bounded by it
boundingBox[0]=origin[0]-gWidth*0.5;
boundingBox[2]=origin[1]-gWidth*0.5;
boundingBox[4]=origin[2]-gWidth*0.5;
boundingBox[1]=boundingBox[0]+gWidth*((float)res);
boundingBox[3]=boundingBox[2]+gWidth*((float)res);
boundingBox[5]=boundingBox[4]+gWidth*((float)res);
printf("-----------------------------------------------------------------------\n");
printf("Expanding the working space of existing cuda solid takes: %ld (ms)\n",clock()-time);
printf("The resolution is extended from %d to %d\n",res-total,res);
printf("-----------------------------------------------------------------------\n");
}
//--------------------------------------------------------------------------------------------
bool initGLInteroperabilityOnCUDA(int major, int minor) {
cudaDeviceProp prop;
int dev;
memset( &prop, 0, sizeof( cudaDeviceProp ) );
prop.major = major;
prop.minor = minor;
CUDA_SAFE_CALL( cudaChooseDevice( &dev, &prop ) );
// tell CUDA which dev we will be using for graphic interop
// from the programming guide: Interoperability with OpenGL
// requires that the CUDA device be specified by
// cudaGLSetGLDevice() before any other runtime calls.
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major < 2)
{
return false;
}
else
{
printf("Current device support compute capability 2.0 \n");
}
CUDA_SAFE_CALL( cudaGLSetGLDevice( dev ) );
return true;
}
//--------------------------------------------------------------------------------------------
void LDNIcudaOperation::GetCudaDeviceProperty()
{
cudaDeviceProp prop;
int count;
CUDA_SAFE_CALL( cudaGetDeviceCount( &count ) );
for (int i=0; i< count; i++) {
CUDA_SAFE_CALL( cudaGetDeviceProperties( &prop, i ) );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
//--------------------------------------------------------------------------------------------------------
// adaptive slicing related
bool LDNIcudaOperation::AdaptiveSlicing_CalculateLayerArea(LDNIcudaSolid* &cudaSolid, float layerAreaArray[]) {
int nAxis = 0; // meaning the x axis
float* devNxArrayPtr = cudaSolid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr = cudaSolid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr = cudaSolid->GetSampleDepthArrayPtr(nAxis);
unsigned int* devIndexArrayPtr = cudaSolid->GetIndexArrayPtr(nAxis);
int res = cudaSolid->GetResolution();
int numRay = res*res;
float* devRayLengthArrayPtr;
CUDA_SAFE_CALL(cudaMalloc((void**)&(devRayLengthArrayPtr), (numRay) * sizeof(float)));
float* devLayerAreaArrayPtr;
CUDA_SAFE_CALL(cudaMalloc((void**)&(devLayerAreaArrayPtr), (res) * sizeof(float)));
//-----------------------------------------------------------------
printf("begin calculating area\n");
krLDNIAdaptiveSlicing_CalculateRayLength<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK >>> (devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, numRay, devIndexArrayPtr, devRayLengthArrayPtr, res);
krLDNIAdaptiveSlicing_CalculateLayerArea << <BLOCKS_PER_GRID, THREADS_PER_BLOCK >> > (devRayLengthArrayPtr, devIndexArrayPtr, devLayerAreaArrayPtr, res);
CUDA_SAFE_CALL(cudaMemcpy(layerAreaArray, devLayerAreaArrayPtr, (res) * sizeof(float), cudaMemcpyDeviceToHost));
cudaFree(devRayLengthArrayPtr);
cudaFree(devLayerAreaArrayPtr);
printf("end calculating area\n");
return true;
}
bool LDNIcudaOperation::AdaptiveSlicing_CalculateVolumeError(LDNIcudaSolid* &cudaSolid, float volumeErrorMatrix[], int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float boundingBox[]) {
int nAxis = 1; // meaning the y axis, pointing up axis
float* devNxArrayPtr = cudaSolid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr = cudaSolid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr = cudaSolid->GetSampleDepthArrayPtr(nAxis);
unsigned int* devIndexArrayPtr = cudaSolid->GetIndexArrayPtr(nAxis);
float ox, oy, oz, ww;
cudaSolid->GetOrigin(ox, oy, oz); ww = cudaSolid->GetSampleWidth();
float origin[3] = { ox,oy,oz };
origin[0] = ox;
origin[1] = oy;
origin[2] = oz;
int res = cudaSolid->GetResolution();
float* devVolumeErrorMatrix;
int thread_per_block_volume = 256;
int blocks_per_grid_volume = 32;
int tileCount = blocks_per_grid_volume*thread_per_block_volume;
CUDA_SAFE_CALL(cudaMalloc((void**)&(devVolumeErrorMatrix), (oneLayerSliceCount*totalSliceCount*tileCount) * sizeof(float)));
//CUDA_SAFE_CALL(cudaMemset((void*)devVolumeErrorMatrix, 1, (oneLayerSliceCount*totalSliceCount*tileCount) * sizeof(float)));
//-----------------------------------------------------------------
printf("begin calculating volume error\n");
printf("oy %.6f\n", origin[0]);
float y_min = boundingBox[2];
printf("ymin %.6f\n", y_min);
krLDNIAdaptiveSlicing_CalculateVolumeErrorPerTile << <blocks_per_grid_volume, thread_per_block_volume >> > (devNxArrayPtr, devNyArrayPtr, devDepthArrayPtr, res*res, devIndexArrayPtr, devVolumeErrorMatrix, res, minSliceCount, totalSliceCount, oneLayerSliceCount, sliceSize, y_min, oy, ww, tileCount);
krLDNIAdaptiveSlicing_ReduceVolumeErrorByTile << <BLOCKS_PER_GRID, THREADS_PER_BLOCK >> > (devVolumeErrorMatrix, oneLayerSliceCount, totalSliceCount, tileCount);
CUDA_SAFE_CALL(cudaMemcpy(volumeErrorMatrix, devVolumeErrorMatrix, (oneLayerSliceCount*totalSliceCount) * sizeof(float), cudaMemcpyDeviceToHost));
cudaFree(devVolumeErrorMatrix);
printf("Now %.3f\n", volumeErrorMatrix[0]);
printf("end calculating volume error\n");
return true;
}
//
//--------------------------------------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////////////
//
// The following functions are running on the graphics hardware by CUDA
//
__global__ void krLDNIRegularization_RegularizationOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
unsigned int *devIndexArrayPtr, unsigned int *devIndexArrayPtrRes, int arrsize, float eps)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int stIndex,sampleNum,i,resSampleNum;
float resNx[MAX_NUM_OF_SAMPLES_ON_RAY],resNy[MAX_NUM_OF_SAMPLES_ON_RAY],resDepth[MAX_NUM_OF_SAMPLES_ON_RAY];
while(index<arrsize) {
stIndex=devIndexArrayPtr[index]; sampleNum=devIndexArrayPtr[index+1]-stIndex;
// if (sampleNum>0) sampleNum=sampleNum-2;
{
//------------------------------------------------------------------------------
// Eliminating gaps
resSampleNum=0;
if (sampleNum>0) {
resNx[0]=devNxArrayPtr[stIndex]; resNy[0]=devNyArrayPtr[stIndex]; resDepth[0]=devDepthArrayPtr[stIndex]; resSampleNum++;
for(i=1;i<sampleNum;i+=2) {
if (fabs(devDepthArrayPtr[stIndex+i+1])-fabs(devDepthArrayPtr[stIndex+i])<eps) continue;
resNx[resSampleNum]=devNxArrayPtr[stIndex+i];
resNy[resSampleNum]=devNyArrayPtr[stIndex+i];
resDepth[resSampleNum]=devDepthArrayPtr[stIndex+i];
resSampleNum++;
resNx[resSampleNum]=devNxArrayPtr[stIndex+i+1];
resNy[resSampleNum]=devNyArrayPtr[stIndex+i+1];
resDepth[resSampleNum]=devDepthArrayPtr[stIndex+i+1];
resSampleNum++;
}
resNx[resSampleNum]=devNxArrayPtr[stIndex+sampleNum-1];
resNy[resSampleNum]=devNyArrayPtr[stIndex+sampleNum-1];
resDepth[resSampleNum]=devDepthArrayPtr[stIndex+sampleNum-1];
resSampleNum++;
}
//------------------------------------------------------------------------------
// Eliminating super-thin sheets
sampleNum=0;
for(i=0;i<resSampleNum;i+=2) {
if (fabs(resDepth[i+1])-fabs(resDepth[i])<eps) continue;
devNxArrayPtr[stIndex+sampleNum]=resNx[i];
devNyArrayPtr[stIndex+sampleNum]=resNy[i];
devDepthArrayPtr[stIndex+sampleNum]=resDepth[i];
sampleNum++;
devNxArrayPtr[stIndex+sampleNum]=resNx[i+1];
devNyArrayPtr[stIndex+sampleNum]=resNy[i+1];
devDepthArrayPtr[stIndex+sampleNum]=resDepth[i+1];
sampleNum++;
}
}
devIndexArrayPtrRes[index]=sampleNum;
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIRegularization_ResultSampleCollection(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
unsigned int *devIndexArrayPtr,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes,
unsigned int *devIndexArrayPtrRes, int arrsize)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int st,num,stRes,numRes,k;
while(index<arrsize) {
st=devIndexArrayPtr[index]; num=devIndexArrayPtr[index+1]-st;
stRes=devIndexArrayPtrRes[index]; numRes=devIndexArrayPtrRes[index+1]-stRes;
if (numRes<=num) {
for(k=0;k<numRes;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtr[st+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtr[st+k];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtr[st+k];
}
}
else { // This rarely occurs.
for(k=0;k<num;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtr[st+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtr[st+k];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtr[st+k];
}
}
index += blockDim.x * gridDim.x;
}
}
#define S_EPS 1.0e-6
__global__ void krLDNIBoolean_SuperUnionOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr, unsigned int *devIndexArrayPtr,
unsigned int *devIndexArrayPtrRes, int arrsize)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int k,st,num,count_start,count_end,count;
float resNx[MAX_NUM_OF_SAMPLES_ON_RAY],resNy[MAX_NUM_OF_SAMPLES_ON_RAY],resDepth[MAX_NUM_OF_SAMPLES_ON_RAY],s_depth, e_depth;
while(index<arrsize) {
st=devIndexArrayPtr[index]; num=devIndexArrayPtr[index+1]-st;
count_start = 0;
count_end = 0;
count = 0;
if (num%2 == 1)
{
for(k=0; k <num ; k++)
{
devNxArrayPtr[st+k]=0;
devNyArrayPtr[st+k]=0;
devDepthArrayPtr[st+k]=0;
devIndexArrayPtrRes[index]=0;
}
}
if (num > 0 && num%2==0)
{
resDepth[0] = s_depth = START_DEPTH(devDepthArrayPtr[st]);
resNx[0] = devNxArrayPtr[st];
resNy[0] = devNyArrayPtr[st];
count_start++;
count++;
e_depth = END_DEPTH(devDepthArrayPtr[num/2+st]);
count_end++;
for(k=1; k < num/2; k++)
{
s_depth = START_DEPTH(devDepthArrayPtr[k+st]);
if (((fabs(s_depth)- fabs(e_depth))>S_EPS) && (count_start == count_end))
{
resDepth[count] = e_depth;
resNx[count] = devNxArrayPtr[st+(k-1)+num/2];
resNy[count] = devNyArrayPtr[st+(k-1)+num/2];
count++;
resDepth[count] = s_depth;
resNx[count] = devNxArrayPtr[st+k];
resNy[count] = devNyArrayPtr[st+k];
count_start++;
count++;
}
//else if (fabs(s_depth) <= fabs(e_depth))
else if ((fabs(s_depth)- fabs(e_depth))<=S_EPS)
{
count_start++;
}
e_depth = END_DEPTH(devDepthArrayPtr[num/2+k+st]);
count_end++;
}
if ((fabs(e_depth)-fabs(s_depth))<S_EPS)
{
count--;
}
else
{
resDepth[count] = e_depth;
resNx[count] = devNxArrayPtr[st+(k-1)+num/2];
resNy[count] = devNyArrayPtr[st+(k-1)+num/2];
count++;
}
devIndexArrayPtrRes[index]=count;
for(k=0; k <count ; k++)
{
devNxArrayPtr[st+k]=resNx[k];
devNyArrayPtr[st+k]=resNy[k];
devDepthArrayPtr[st+k]=resDepth[k];
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIBoolean_IdentifyEnterLeaveOnRays(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr, unsigned int *devIndexArrayPtr, int arrsize)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int k,st,num;
unsigned int prev_mesh,count;
float depth, fdepth;
float resDepth[MAX_NUM_OF_SAMPLES_ON_RAY];
while(index<arrsize) {
st=devIndexArrayPtr[index]; num=devIndexArrayPtr[index+1]-st;
prev_mesh = 0;
count = 0;
if (num > 0)
{
prev_mesh = floor(fabs(devDepthArrayPtr[st]));
for(k=0; k<num; k++)
{
depth = devDepthArrayPtr[k+st];
fdepth = fabs(depth);
//if (floor(fdepth) != prev_mesh)
if (fabs(floor(fdepth)-prev_mesh) >= 1.0)
{
prev_mesh = floor(fdepth); count=0;
}
if (count%2 == 0)
{
fdepth = fdepth - floor(fdepth) + 1; // all starting pos : 1.xxx
}
else
{
fdepth = fdepth - floor(fdepth) + 2; // all ending pos : 2.xxx
}
if (depth < 0) resDepth[k] = -fdepth;
else resDepth[k] = fdepth;
count++;
}
for(k=0; k <num; k++)
{
devDepthArrayPtr[st+k]=resDepth[k];
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIBoolean_BooleanOnRays(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrB, float *devNyArrayPtrB, float *devDepthArrayPtrB, unsigned int *devIndexArrayPtrB,
unsigned int *devIndexArrayPtrRes, int arrsize, short nOperationType)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int k,stA,stB,numA,numB,numRes,aIndex,bIndex;
bool last_op,op,insideA,insideB;
float lastNx,lastNy,lastDepth;
float resNx[MAX_NUM_OF_SAMPLES_ON_RAY],resNy[MAX_NUM_OF_SAMPLES_ON_RAY],resDepth[MAX_NUM_OF_SAMPLES_ON_RAY];
while(index<arrsize) {
stA=devIndexArrayPtrA[index]; numA=devIndexArrayPtrA[index+1]-stA;
stB=devIndexArrayPtrB[index]; numB=devIndexArrayPtrB[index+1]-stB;
last_op=insideA=insideB=false; numRes=0;
//-------------------------------------------------------------------------------------------------------
// Generate the temporary resultant samples
if (numA>0 && numB>0) {
aIndex=bIndex=0;
while( (aIndex<numA) || (bIndex<numB) ) { // scaning the samples on solidA and solidB together
if ((bIndex==numB) || (aIndex<numA && fabs(devDepthArrayPtrA[aIndex+stA])<fabs(devDepthArrayPtrB[bIndex+stB])))
{
// advancing on ray-A
lastDepth=devDepthArrayPtrA[aIndex+stA];
lastNx=devNxArrayPtrA[aIndex+stA];
lastNy=devNyArrayPtrA[aIndex+stA];
insideA=!insideA; aIndex++;
}
else {
// advancing on ray-B
lastDepth=devDepthArrayPtrB[bIndex+stB];
lastNx=devNxArrayPtrB[bIndex+stB];
lastNy=devNyArrayPtrB[bIndex+stB];
if (nOperationType==2) {lastNx=-lastNx; lastNy=-lastNy; lastDepth=-lastDepth;} // inverse the normal
insideB=!insideB; bIndex++;
}
switch(nOperationType) {
case 0:{op=LOGIC_UNION(insideA,insideB); }break;
case 1:{op=LOGIC_INTER(insideA,insideB); }break;
case 2:{op=LOGIC_SUBTR(insideA,insideB); }break;
}
if (op!=last_op)
{
if (numRes>0 && fabs(fabs(lastDepth)-fabs(resDepth[numRes-1]))<0.00001f)
{numRes--;}
else {
resDepth[numRes]=lastDepth;
resNx[numRes]=lastNx; resNy[numRes]=lastNy;
numRes++;
}
last_op=op;
}
}
}
else if ((numA==0) && (numB>0)) { // scaning the samples on solidB
if (nOperationType==0) {
for(k=0;k<numB;k++) {
resNx[k]=devNxArrayPtrB[stB+k];
resNy[k]=devNyArrayPtrB[stB+k];
resDepth[k]=devDepthArrayPtrB[stB+k];
}
numRes=numB;
}
// for "intersect" and "difference", keeping NULL will be fine
}
else if ((numA>0) && (numB==0)) { // scaning the samples on solidA
if (nOperationType==0 || nOperationType==2) { // union and difference
for(k=0;k<numA;k++) {
resNx[k]=devNxArrayPtrA[stA+k];
resNy[k]=devNyArrayPtrA[stA+k];
resDepth[k]=devDepthArrayPtrA[stA+k];
}
numRes=numA;
}
}
//-------------------------------------------------------------------------------------------------------
// Copy the resultant samples into solidA and solidB
if (numRes>numA) {
for(k=0;k<numA;k++) {
devNxArrayPtrA[stA+k]=resNx[k];
devNyArrayPtrA[stA+k]=resNy[k];
devDepthArrayPtrA[stA+k]=resDepth[k];
}
for(k=numA;k<numRes;k++) {
devNxArrayPtrB[stB+k-numA]=resNx[k];
devNyArrayPtrB[stB+k-numA]=resNy[k];
devDepthArrayPtrB[stB+k-numA]=resDepth[k];
}
}
else {
for(k=0;k<numRes;k++) {
devNxArrayPtrA[stA+k]=resNx[k];
devNyArrayPtrA[stA+k]=resNy[k];
devDepthArrayPtrA[stA+k]=resDepth[k];
}
}
devIndexArrayPtrRes[index]=numRes;
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIBoolean_ResultSampleCollection(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes, unsigned int *devIndexArrayPtrRes, int arrsize, float width, float gwidth)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int stA,stRes,numRes,k,numA;
float depth, temp;
while(index<arrsize) {
stA=devIndexArrayPtrA[index]; numA=devIndexArrayPtrA[index+1]-stA;
stRes=devIndexArrayPtrRes[index]; numRes=devIndexArrayPtrRes[index+1]-stRes;
if (numRes>0) {
for(k=0;k<numRes;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtrA[stA+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtrA[stA+k];
depth = devDepthArrayPtrA[stA+k];
temp = fabs(depth)*width-gwidth*0.5f;
if (depth < 0)
devDepthArrayPtrRes[stRes+k]=-temp;
else
devDepthArrayPtrRes[stRes+k]=temp;
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNIBoolean_ResultSampleCollection(float *devNxArrayPtrA, float *devNyArrayPtrA, float *devDepthArrayPtrA, unsigned int *devIndexArrayPtrA,
float *devNxArrayPtrB, float *devNyArrayPtrB, float *devDepthArrayPtrB, unsigned int *devIndexArrayPtrB,
float *devNxArrayPtrRes, float *devNyArrayPtrRes, float *devDepthArrayPtrRes, unsigned int *devIndexArrayPtrRes, int arrsize)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int stA,numA,stB,stRes,numRes,k;
while(index<arrsize) {
stA=devIndexArrayPtrA[index]; numA=devIndexArrayPtrA[index+1]-stA;
stRes=devIndexArrayPtrRes[index]; numRes=devIndexArrayPtrRes[index+1]-stRes;
if (numRes>0) {
if (numRes>numA) {
for(k=0;k<numA;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtrA[stA+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtrA[stA+k];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtrA[stA+k];
}
stB=devIndexArrayPtrB[index];
for(k=numA;k<numRes;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtrB[stB+(k-numA)];
devNyArrayPtrRes[stRes+k]=devNyArrayPtrB[stB+(k-numA)];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtrB[stB+(k-numA)];
}
}
else {
for(k=0;k<numRes;k++) {
devNxArrayPtrRes[stRes+k]=devNxArrayPtrA[stA+k];
devNyArrayPtrRes[stRes+k]=devNyArrayPtrA[stA+k];
devDepthArrayPtrRes[stRes+k]=devDepthArrayPtrA[stA+k];
}
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNISampling_SortSamples(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int st,ed,i,j,n;
float nx[MAX_NUM_OF_SAMPLES_ON_RAY],ny[MAX_NUM_OF_SAMPLES_ON_RAY],depth[MAX_NUM_OF_SAMPLES_ON_RAY];
float tempnx,tempny,tempdepth;
// float auxNx[MAX_NUM_OF_SAMPLES_ON_RAY/2+1],auxNy[MAX_NUM_OF_SAMPLES_ON_RAY/2+1],auxDepth[MAX_NUM_OF_SAMPLES_ON_RAY/2+1]; // for merge-sort
// int lo,hi,m,k; // for merge-sort
while(index<arrsize) {
st=devIndexArrayPtr[index]; ed=devIndexArrayPtr[index+1]; n=ed-st;
//-----------------------------------------------------------------------------------------------------------
// Download data set
for(i=0;i<n;i++) nx[i]=devNxArrayPtr[st+i];
for(i=0;i<n;i++) ny[i]=devNyArrayPtr[st+i];
for(i=0;i<n;i++) depth[i]=devDepthArrayPtr[st+i];
//-----------------------------------------------------------------------------------------------------------
for(i=0;i<n;i++) {
for(j=i+1;j<n;j++) {
if (fabs(depth[i])>fabs(depth[j])) {
tempnx=nx[i]; nx[i]=nx[j]; nx[j]=tempnx;
tempny=ny[i]; ny[i]=ny[j]; ny[j]=tempny;
tempdepth=depth[i]; depth[i]=depth[j]; depth[j]=tempdepth;
}
}
}
//-----------------------------------------------------------------------------------------------------------
// Upload data set
for(i=0;i<n;i++) devNxArrayPtr[st+i]=nx[i];
for(i=0;i<n;i++) devNyArrayPtr[st+i]=ny[i];
for(i=0;i<n;i++) devDepthArrayPtr[st+i]=depth[i];
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNISampling_CopySamples(float *devNxArrayPtr,
float *devNyArrayPtr, float *devDepthArrayPtr,
int n, int arrsize, float width, float sampleWidth, int res,
unsigned int *devIndexArrayPtr)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int arrindex, num, ix, iy;
float4 rgb; float temp;
while(index<arrsize) {
num=devIndexArrayPtr[index+1]-devIndexArrayPtr[index];
if (num>=n) {
arrindex=(int)(devIndexArrayPtr[index])+n-1;
ix=index%res; iy=(index/res);
rgb = tex2D(tex2DFloat4In, ix, iy);
temp=fabs(rgb.z)*width-sampleWidth*0.5f;
devNxArrayPtr[arrindex]=rgb.x; // x-component of normal
devNyArrayPtr[arrindex]=rgb.y; // y-component of normal
if (rgb.z<0) devDepthArrayPtr[arrindex]=-temp; else devDepthArrayPtr[arrindex]=temp;
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNISuperUnion_CopySamples(float *devNxArrayPtr,
float *devNyArrayPtr, float *devDepthArrayPtr,
int n, int arrsize, int res,
unsigned int *devIndexArrayPtr)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
int arrindex, num, ix, iy;
float4 rgb; //float temp;
while(index<arrsize) {
num=devIndexArrayPtr[index+1]-devIndexArrayPtr[index];
if (num>=n) {
arrindex=(int)(devIndexArrayPtr[index])+n-1;
ix=index%res; iy=(index/res);
rgb = tex2D(tex2DFloat4In, ix, iy);
devNxArrayPtr[arrindex]=rgb.x; // x-component of normal
devNyArrayPtr[arrindex]=rgb.y; // y-component of normal
devDepthArrayPtr[arrindex]=rgb.z;
}
index += blockDim.x * gridDim.x;
}
}
__global__ void krLDNISampling_CopyIndexAndFindMax(unsigned char *devStencilBufferPtr, unsigned int *devIndexArrayPtr,
unsigned int *devResArrayPtr, int arrsize )
{
__shared__ unsigned int cache[THREADS_PER_BLOCK];
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int cacheIndex=threadIdx.x;
unsigned int temp=0,temp2;
while(tid<arrsize) {
temp2=(unsigned int)(devStencilBufferPtr[tid]);
devIndexArrayPtr[tid]=temp2;
temp= MAX(temp, temp2);
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex]=temp;
// synchronize threads in this block
__syncthreads();
// for reductions, THREADS_PER_BLOCK must be a power of 2 because of the following code
int i = blockDim.x/2;
while (i!=0) {
if (cacheIndex < i) {cache[cacheIndex] = MAX(cache[cacheIndex], cache[cacheIndex+i]);}
__syncthreads();
i /= 2;
}
if (cacheIndex==0) devResArrayPtr[blockIdx.x] = cache[0];
}
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
bool LDNIcudaOperation::ScaffoldBooleanOperation(LDNIcudaSolid* &outputSolid,QuadTrglMesh *UnitMesh, int UnitNum[], float UnitOff[], int UnitFlip[], int nRes, LDNIcudaSolid* savedSolid)
{
LDNIcudaSolid *solidB;
int res=nRes;
float boundingbox[6];
float UnitWidth[3];
boundingbox[0]=boundingbox[1]=boundingbox[2]=boundingbox[3]=boundingbox[4]=boundingbox[5]=0;
UnitMesh->CompBoundingBox(boundingbox);
UnitWidth[0] = boundingbox[1] - boundingbox[0] ;
UnitWidth[1] = boundingbox[3] - boundingbox[2] ;
UnitWidth[2] = boundingbox[5] - boundingbox[4] ;
boundingbox[1] = boundingbox[1] + (UnitNum[0]-1)*(UnitWidth[0]+UnitOff[0]);
boundingbox[3] = boundingbox[3] + (UnitNum[1]-1)*(UnitWidth[1]+UnitOff[1]);
boundingbox[5] = boundingbox[5] + (UnitNum[2]-1)*(UnitWidth[2]+UnitOff[2]);
float xx=(boundingbox[0]+boundingbox[1])*0.5f;
float yy=(boundingbox[2]+boundingbox[3])*0.5f;
float zz=(boundingbox[4]+boundingbox[5])*0.5f;
float ww=boundingbox[1]-boundingbox[0];
if ((boundingbox[3]-boundingbox[2])>ww) ww=boundingbox[3]-boundingbox[2];
if ((boundingbox[5]-boundingbox[4])>ww) ww=boundingbox[5]-boundingbox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingbox[0]=xx-ww; boundingbox[1]=xx+ww;
boundingbox[2]=yy-ww; boundingbox[3]=yy+ww;
boundingbox[4]=zz-ww; boundingbox[5]=zz+ww;
if (savedSolid!= NULL)
{
_expansionLDNIcudaSolidByNewBoundingBox(savedSolid, boundingbox);
res = savedSolid->GetResolution();
}
//even row + even column
InstancedBRepToLDNISampling(UnitMesh, outputSolid, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, false, true);
//even row + single column
InstancedBRepToLDNISampling(UnitMesh, solidB, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, false, false);
printf("-----------------------------------------------------------------------\n");
printf("Starting to compute Boolean operation\n");
printf("-----------------------------------------------------------------------\n");
_booleanOperation(outputSolid, solidB, 0);
//LDNIcudaSolid *solidA;
InstancedBRepToLDNISampling(UnitMesh, solidB, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, true, true);
_booleanOperation(outputSolid, solidB, 0);
InstancedBRepToLDNISampling(UnitMesh, solidB, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, true, false);
_booleanOperation(outputSolid, solidB, 0);
//even row + single column
//InstancedBRepToLDNISampling(UnitMesh, solidA, boundingbox, res, UnitOff, UnitNum, UnitWidth, UnitFlip, false, false);
outputSolid->SetBoundingBox(boundingbox);
//-----------------------------------------------------------------------------------
// Step 4: free the memory
delete solidB;
return true;
}
bool LDNIcudaOperation::InstancedBRepToLDNISampling(QuadTrglMesh *mesh, LDNIcudaSolid* &solid, float boundingBox[], int res, float UnitOff[], int UnitNum[], float UnitWidth[], int UnitFlip[], bool bsingleRow, bool bsingleCol)
{
const bool bCube=true;
float origin[3],gWidth; long time=clock(),totalTime=clock();
int i,nodeNum,faceNum;
char fileadd[256];
solid=new LDNIcudaSolid;
solid->MallocMemory(res);
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
solid->SetSampleWidth(gWidth);
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
solid->SetOrigin(origin[0],origin[1],origin[2]);
//---------------------------------------------------------------------------------
// For using OpenGL Shading Language to implement the sampling procedure
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
if (glewIsSupported("GL_VERSION_2_0")) {printf("\nReady for OpenGL 2.0\n");} else {printf("OpenGL 2.0 not supported\n"); return false;}
//-----------------------------------------------------------------------------------------
GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
const char *VshaderString[1],*GshaderString[1], *FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
GLuint vbo, vboInd;
char str[4096] = "";
//-----------------------------------------------------------------------------------------
// Step 1: Setup the shaders
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"ScaffoldLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error \n%s\n",str); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"ScaffoldLDNIGeometryShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n%s\n",str); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"ScaffoldLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL!\n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// Configuration setting for geometry shader
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating vertex and index array buffer
glGetError(); // for clean-up the error generated before
nodeNum=mesh->GetNodeNumber();
faceNum=mesh->GetFaceNumber();
float* verTex=(float*)malloc(nodeNum*3*sizeof(float));
memset(verTex,0,nodeNum*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
int* inDex=(int*)malloc(faceNum*3*sizeof(int));
memset(inDex,0,faceNum*3*sizeof(int));
unsigned int* meshptr = mesh->GetFaceTablePtr();
for(int i=0; i < faceNum; i++)
{ inDex[3*i] = meshptr[4*i]-1; inDex[3*i+1] = meshptr[4*i+1]-1; inDex[3*i+2] = meshptr[4*i+2]-1;
}
//memcpy(inDex,mesh->GetFaceTablePtr(),faceNum*3*sizeof(int));
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, nodeNum*3*sizeof(GLfloat), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, nodeNum*3*sizeof(GLfloat), verTex);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glGenBuffers(1, &vboInd);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB, vboInd);
glBufferData(GL_ELEMENT_ARRAY_BUFFER_ARB, faceNum*3*sizeof(GL_UNSIGNED_INT), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, faceNum*3*sizeof(GL_UNSIGNED_INT), inDex);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: buffer binding!\n\n");
free(verTex);
free(inDex);
//-----------------------------------------------------------------------------------------
GLint id0,id1,id2,id3,id4,id5,id6;
float centerPos[3];
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
glUseProgramObjectARB(g_programObj);
{
id0 = glGetUniformLocationARB(g_programObj,"Unum");
glUniform3iARB(id0,UnitNum[0],UnitNum[1],UnitNum[2]);
id1 = glGetUniformLocationARB(g_programObj,"UOff");
glUniform3fARB(id1,UnitOff[0],UnitOff[1],UnitOff[2]);
id2 = glGetUniformLocationARB(g_programObj,"UWidth");
glUniform3fARB(id2,UnitWidth[0],UnitWidth[1],UnitWidth[2]);
id3 = glGetUniformLocationARB(g_programObj,"UFlip");
glUniform3iARB(id3,UnitFlip[0],UnitFlip[1],UnitFlip[2]);
id4 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id4,centerPos[0],centerPos[1],centerPos[2]);
id5 = glGetUniformLocationARB(g_programObj,"bsingleCol");
glUniform1iARB(id5,bsingleCol);
id6 = glGetUniformLocationARB(g_programObj,"bsingleRow");
glUniform1iARB(id6,bsingleRow);
if (glGetError()!=GL_NO_ERROR) printf("Error: Unit Constant !\n\n");
_decomposeLDNIByFBOPBO(solid, vbo, vboInd, UnitNum[0]*UnitNum[1]*UnitNum[2], faceNum*3);
}
glUseProgramObjectARB(0);
//-----------------------------------------------------------------------------------------
// Step 6: free the memory
time=clock();
//-----------------------------------------------------------------------------------------
glDeleteBuffers(1, &vboInd);
glDeleteBuffers(1, &vbo);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
//------------------------------------------------------------------------
printf("\nMemory clean-up time is %ld (ms)\n",clock()-time);
printf("--------------------------------------------------------------\n");
printf("Total time for sampling is %ld (ms)\n\n",clock()-totalTime);
return true;
}
void LDNIcudaOperation::_decomposeLDNIByFBOPBO(LDNIcudaSolid *solid, GLuint vbo, GLuint vboI, int instanceCount, int indexCount)
{
unsigned int n_max,i,n;
float gWidth,origin[3];
unsigned int overall_n_max=0;
long readbackTime=0, sortingTime=0, tempTime;
cudaEvent_t startClock, stopClock;
CUDA_SAFE_CALL( cudaEventCreate( &startClock ) );
CUDA_SAFE_CALL( cudaEventCreate( &stopClock ) );
tempTime=clock();
//------------------------------------------------------------------------
// Preparation
int nRes=solid->GetResolution(); gWidth=solid->GetSampleWidth();
float width=gWidth*(float)nRes;
solid->GetOrigin(origin[0],origin[1],origin[2]);
int arrsize=nRes*nRes;
//------------------------------------------------------------------------
// Step 1: Setup the rendering environment
glEnable(GL_DEPTH_TEST);
glEnable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR); glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glGetError(); // for clean-up the error generated before
//------------------------------------------------------------------------
// create the FBO objects and texture for rendering
if (glewIsSupported("GL_EXT_framebuffer_object") == 0) printf("Warning: FBO is not supported!\n");
if (glGetError()!=GL_NO_ERROR) printf("Error: before framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint fbo;
glGenFramebuffersEXT(1, &fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer generation!\n");
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
if (glGetError()!=GL_NO_ERROR) printf("Error: framebuffer binding!\n");
//------------------------------------------------------------------------
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F_ARB, nRes, nRes, 0, GL_RGBA, GL_FLOAT, 0);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex, 0);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching texture to framebuffer generation!\n");
cudaGraphicsResource *sampleTex_resource;
CUDA_SAFE_CALL( cudaGraphicsGLRegisterImage(&sampleTex_resource, tex, GL_TEXTURE_2D, cudaGraphicsMapFlagsReadOnly) );
//------------------------------------------------------------------------
GLuint depth_and_stencil_rb;
glGenRenderbuffersEXT(1, &depth_and_stencil_rb);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_STENCIL_EXT, nRes, nRes);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of depth-buffer to framebuffer generation!\n");
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depth_and_stencil_rb);
if (glGetError()!=GL_NO_ERROR) printf("Error: attaching renderbuffer of stencil-buffer to framebuffer generation!\n");
//------------------------------------------------------------------------
GLuint indexPBO;
glGenBuffers(1,&indexPBO); // generation of PBO for index array readback
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
glBufferData(GL_PIXEL_PACK_BUFFER_ARB, nRes*nRes*sizeof(unsigned char), NULL, GL_STREAM_READ_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( cudaGLRegisterBufferObject(indexPBO) );
//------------------------------------------------------------------------
if (glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT)!=GL_FRAMEBUFFER_COMPLETE_EXT)
printf("Warning: the setting for rendering on FBO is not correct!\n");
else
printf("FBO has been created successfully!\n");
glPushAttrib(GL_VIEWPORT_BIT);
glViewport(0,0,nRes,nRes);
printf("Preparation time: %ld (ms)\n",clock()-tempTime);
//------------------------------------------------------------------------
// Step 2: Rendering to get the Hermite samples
for(short nAxis=0; nAxis<3; nAxis++) {
//---------------------------------------------------------------------------------------
// Rendering step 1: setting the viewing window
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// The eye is located at (0, 0, 0), the near clipping plane is at the z=0 plane
// the far clipping plane is at the z=(boundingBox[5]-boundingBox[4]) plane
glOrtho(-width*0.5f,width*0.5f,-width*0.5f,width*0.5f,width*0.5f,-width*0.5f);
// Note that: in "glOrtho(left,right,bottom,top,near,far);"
// (left,right,bottom,top) are located at the boundary of pixel instead of
// the center of pixels
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//---------------------------------------------------------------------------------------
// Rendering step 2: determine the number of layers
glClearColor( 1.0f, 1.0f, 1.0f, 1.0f );
glClearDepth(1.0);
glClearStencil(0); glColor3f(1,1,1);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glStencilFunc(GL_GREATER, 1, 0xff);
glStencilOp(GL_INCR, GL_INCR, GL_INCR);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glEnableClientState( GL_VERTEX_ARRAY );
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vboI);
glDrawElementsInstanced(GL_TRIANGLES,indexCount,GL_UNSIGNED_INT, 0 ,instanceCount);
glDisableClientState( GL_VERTEX_ARRAY );
glFlush();
//--------------------------------------------------------------------------------------------------------
// reading stencil buffer into the device memory of CUDA
tempTime=clock();
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, indexPBO);
GLint OldPackAlignment;
glGetIntegerv(GL_PACK_ALIGNMENT,&OldPackAlignment);
glPixelStorei(GL_PACK_ALIGNMENT,1); // Important!!! Without this, the read-back could be abnormal.
glReadPixels(0,0,nRes,nRes,GL_STENCIL_INDEX,GL_UNSIGNED_BYTE,0);
glPixelStorei(GL_PACK_ALIGNMENT,OldPackAlignment);
//--------------------------------------------------------------------------------------------------------
unsigned char *devStencilBufferPtr;
unsigned int *devResArrayPtr;
unsigned int *devIndexArrayPtr=solid->GetIndexArrayPtr(nAxis);
CUDA_SAFE_CALL( cudaGLMapBufferObject( (void **)&devStencilBufferPtr, indexPBO) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int) ) );
//--------------------------------------------------------------------------------------------------------
// building the indexArray on device
krLDNISampling_CopyIndexAndFindMax<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devStencilBufferPtr,
devIndexArrayPtr,devResArrayPtr,arrsize);
//--------------------------------------------------------------------------------------------------------
// read back the max number of layers -- "n_max"
unsigned int* resArrayPtr;
resArrayPtr=(unsigned int *)malloc(BLOCKS_PER_GRID*sizeof(unsigned int));
CUDA_SAFE_CALL( cudaMemcpy( resArrayPtr, devResArrayPtr, BLOCKS_PER_GRID*sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
n_max=0;
for(i=0;i<BLOCKS_PER_GRID;i++) n_max = MAX(n_max,resArrayPtr[i]);
cudaFree(devResArrayPtr); free(resArrayPtr);
//--------------------------------------------------------------------------------------------------------
// read back the number of samples -- "sampleNum"
unsigned int sampleNum=0;
tempTime=clock()-tempTime; //readbackTime+=tempTime;
printf("Stencil buffer processing time: %ld (ms)\n",tempTime);
long scanTime=clock();
// for debug purpose
resArrayPtr=(unsigned int *)malloc((arrsize+1)*sizeof(unsigned int));
CUDA_SAFE_CALL( cudaMemcpy( resArrayPtr, devIndexArrayPtr, (arrsize+1)*sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
sampleNum=0;
for(int k=0;k<arrsize;k++) {sampleNum+=resArrayPtr[k]; resArrayPtr[k]=sampleNum;}
for(int k=arrsize;k>0;k--) {resArrayPtr[k]=resArrayPtr[k-1];}
resArrayPtr[0]=0;
CUDA_SAFE_CALL( cudaMemcpy( devIndexArrayPtr, resArrayPtr, (arrsize+1)*sizeof(unsigned int), cudaMemcpyHostToDevice ) );
free(resArrayPtr);
scanTime=clock()-scanTime; printf("Scanning time: %ld (ms)\n",scanTime);
//--------------------------------------------------------------------------------------------------------
CUDA_SAFE_CALL( cudaGLUnmapBufferObject( indexPBO ) );
glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
printf("n_max=%d sampleNum=%d\n",n_max,sampleNum);
if (n_max>overall_n_max) overall_n_max=n_max;
if (sampleNum==0) continue;
//---------------------------------------------------------------------------------------
// Rendering step 3: decomposing the Layered Depth Images (LDIs) and record its corresponding normals
solid->MallocSampleMemory(nAxis,sampleNum);
float* devNxArrayPtr=solid->GetSampleNxArrayPtr(nAxis);
float* devNyArrayPtr=solid->GetSampleNyArrayPtr(nAxis);
float* devDepthArrayPtr=solid->GetSampleDepthArrayPtr(nAxis);
tempTime=clock();
for(n=1;n<=n_max;n++) {
CUDA_SAFE_CALL( cudaGraphicsMapResources( 1, &sampleTex_resource, NULL ) );
cudaArray *in_array;
CUDA_SAFE_CALL( cudaGraphicsSubResourceGetMappedArray( &in_array, sampleTex_resource, 0, 0));
CUDA_SAFE_CALL( cudaBindTextureToArray(tex2DFloat4In, in_array) );
//--------------------------------------------------------------------------------------------------------
// fill the sampleArray on device
krLDNISampling_CopySamples<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, n, arrsize, width, gWidth, nRes, devIndexArrayPtr);
CUDA_SAFE_CALL( cudaGraphicsUnmapResources( 1, &sampleTex_resource, NULL ) );
if (n==n_max) break;
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glStencilFunc(GL_GREATER, n+1, 0xff);
glStencilOp(GL_KEEP, GL_INCR, GL_INCR);
{
glEnableClientState( GL_VERTEX_ARRAY );
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, vboI);
glDrawElementsInstanced(GL_TRIANGLES,indexCount,GL_UNSIGNED_INT, 0 ,instanceCount);
glDisableClientState( GL_VERTEX_ARRAY );
}
glFlush();
}
tempTime=clock()-tempTime; readbackTime+=tempTime;
//------------------------------------------------------------------------
// Rendering step 4: sorting the samples
CUDA_SAFE_CALL( cudaEventRecord( startClock, 0 ) );
CUDA_SAFE_CALL( cudaEventSynchronize( startClock ) );
krLDNISampling_SortSamples<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(devNxArrayPtr, devNyArrayPtr,
devDepthArrayPtr, arrsize, devIndexArrayPtr);
CUDA_SAFE_CALL( cudaEventRecord( stopClock, 0 ) );
CUDA_SAFE_CALL( cudaEventSynchronize( stopClock ) );
float elapsedTime;
CUDA_SAFE_CALL( cudaEventElapsedTime( &elapsedTime,
startClock, stopClock ) );
// printf( "Sorting time is: %3.1f (ms)\n", elapsedTime );
sortingTime+=(long)elapsedTime;
}
//------------------------------------------------------------------------------------
// Step 3: Set the rendering parameters back
//------------------------------------------------------------------------------------
// detach FBO
glPopAttrib();
// release memory for PBO and cuda's map
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL( cudaGLUnregisterBufferObject( indexPBO ) );
glDeleteBuffers(1, &indexPBO);
CUDA_SAFE_CALL( cudaGraphicsUnregisterResource( sampleTex_resource) );
// release memory for the 2D texture
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteTextures(1, &tex);
// release memory for the frame-buffer object
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
glDeleteFramebuffersEXT(1, &fbo);
// release memory for the render-buffer object
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glDeleteRenderbuffersEXT(1, &depth_and_stencil_rb);
//------------------------------------------------------------------------------------
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
// glEnable(GL_POLYGON_SMOOTH);// adding this will make the invalid display on the Thinkpad laptop
glEnable(GL_POINT_SMOOTH);
// glEnable(GL_LINE_SMOOTH); // adding this will make the Compaq laptop's running fail
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
printf("\nn_max=%ld \n",overall_n_max);
printf("Texture Size: %f (MB)\n",(float)((float)overall_n_max*(float)nRes*(float)nRes*7.0f)/(1024.0f*1024.0f));
printf("Readback time: %ld (ms)\nSorting time: %ld (ms)\n",
readbackTime, sortingTime);
CUDA_SAFE_CALL( cudaEventDestroy( startClock ) );
CUDA_SAFE_CALL( cudaEventDestroy( stopClock ) );
}
//--------------------------------------------------------------------------------------------
// adpative slicing related
extern __global__ void krLDNIAdaptiveSlicing_CalculateRayLength(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devRayLengthArrayPtr, int res) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
int st, num, stRes, numRes, k;
int ix, iy;
float length = 0.0f;
while (index<arrsize) {
st = devIndexArrayPtr[index]; num = devIndexArrayPtr[index + 1] - st;
ix = index%res; iy = (index / res);
for (int k = 0; k < num - 1; k = k + 2) {
length += fabs(devDepthArrayPtr[k +1 + st]) - fabs(devDepthArrayPtr[k + st]);
}
devRayLengthArrayPtr[index] = length;
index += blockDim.x * gridDim.x;
}
}
extern __global__ void krLDNIAdaptiveSlicing_CalculateLayerArea(float *devRayLengthArrayPtr, unsigned int *devIndexArrayPtr, float *devAreaArrayPtr, int res) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
float area = 0.0f;
while (index < res) {
for (int k = 0; k < res; k++) {
area += devRayLengthArrayPtr[k*res+index];
}
devAreaArrayPtr[index] = area;
index += blockDim.x * gridDim.x;
}
}
extern __global__ void krLDNIAdaptiveSlicing_CalculateVolumeErrorPerRow(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devVolumeErrorMatrix, int res, int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float y_min, float oy, float ww) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
int iRow = index;
float raySliceArray[MAX_NUM_OF_SLICE_ON_RAY]; // temp array to record the 0/1 value for each slice on a ray
float raySliceCumArray[MAX_NUM_OF_SLICE_ON_RAY]; // temp array to record the cummulative 0/1 value from first slice throught each slice on a ray
while (index < oneLayerSliceCount) {
int buildingAxis = 1;
int i = 0;
int j = 0;
float temp = oy;
for (int j = 0; j<res; j++) {
for (int i = 0; i<res; i++) {
// process each ray
int index_ray = j*res + i;
float inside = 0.0f;
int pre_slice = 0;
unsigned int now_slice = 0;
raySliceArray[0] = 0.0f;
for (int is = 0; is < totalSliceCount; is++) {
raySliceArray[is] = raySliceArray[0] + 1.0f;
raySliceCumArray[is] = 1.0f;
}
for (int k = devIndexArrayPtr[index_ray]; k<devIndexArrayPtr[index_ray + 1]; k++) {
now_slice = floor(oy + fabs(devDepthArrayPtr[k]) - y_min)/sliceSize;
now_slice = MIN(now_slice, totalSliceCount - 1); now_slice = MAX(now_slice, 0);
for (int is = pre_slice + 1; is <= now_slice; is++) {
raySliceArray[is] = inside;
raySliceCumArray[is] = raySliceCumArray[is - 1] + inside;
}
pre_slice = now_slice;
inside = 1 - inside;
}
// the last point on the ray to the top slice
for (int is = pre_slice; is+1 <totalSliceCount; is++) {
raySliceArray[is] = 0;
raySliceCumArray[is] = raySliceCumArray[is - 1];
}
//// add to the volume error matrix
for (int iSlice = 0; iSlice < totalSliceCount; iSlice++) {
int ivol = iSlice + iRow*totalSliceCount;
if (iSlice - iRow - minSliceCount >= 0) {
bool isThisLayerSolid = raySliceArray[iSlice - iRow - minSliceCount + 1] == 1;
if (isThisLayerSolid) {
// the error is number of non-solid voxels
devVolumeErrorMatrix[ivol] += (iRow + minSliceCount) - (raySliceCumArray[iSlice] - raySliceCumArray[iSlice - iRow - minSliceCount]);
}
else {
devVolumeErrorMatrix[ivol] += raySliceCumArray[iSlice] - raySliceCumArray[iSlice - iRow - minSliceCount];
}
}
}
}
}
index += blockDim.x * gridDim.x;
}
}
extern __global__ void krLDNIAdaptiveSlicing_CalculateVolumeErrorPerTile(float *devNxArrayPtr, float *devNyArrayPtr, float *devDepthArrayPtr,
int arrsize, unsigned int *devIndexArrayPtr, float *devVolumeErrorMatrix, int res, int minSliceCount, int totalSliceCount, int oneLayerSliceCount, float sliceSize, float y_min, float oy, float ww, int tileCount) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
int iTile = index;
float raySliceArray[MAX_NUM_OF_SLICE_ON_RAY]; // temp array to record the 0/1 value for each slice on a ray
float raySliceCumArray[MAX_NUM_OF_SLICE_ON_RAY]; // temp array to record the cummulative 0/1 value from first slice throught each slice on a ray
int rayCountPerTile = arrsize / tileCount + 1;
while (index < tileCount) {
if (rayCountPerTile*index >= arrsize)
break;
//initialize;
for (int ivol = iTile*totalSliceCount*oneLayerSliceCount; ivol < (iTile + 1)*totalSliceCount*oneLayerSliceCount; ivol++) {
devVolumeErrorMatrix[ivol] = 0.0f;
}
for (int index_ray = iTile*rayCountPerTile; index_ray < MIN((iTile+1)*rayCountPerTile,arrsize); index_ray++) {
// process each ray
float inside = 0.0f;
int pre_slice = 0;
int now_slice = 0;
for (int is = 0; is < totalSliceCount; is++) {
raySliceArray[is] = 0.0f;
raySliceCumArray[is] = 0.0f;
}
for (int k = devIndexArrayPtr[index_ray]; k<devIndexArrayPtr[index_ray + 1]; k++) {
now_slice = floor((oy + fabs(devDepthArrayPtr[k]) - y_min) / sliceSize);
now_slice = MIN(now_slice, totalSliceCount - 1); now_slice = MAX(now_slice, 0);
for (int is = pre_slice + 1; is <= now_slice; is++) {
raySliceArray[is] = inside;
raySliceCumArray[is] = raySliceCumArray[is - 1] + inside;
}
pre_slice = now_slice;
inside = 1 - inside;
}
// the last point on the ray to the top slice
for (int is = pre_slice + 1; is < totalSliceCount; is++) {
raySliceArray[is] = 0;
raySliceCumArray[is] = raySliceCumArray[is - 1];
}
//// add to the volume error matrix
for (int ivol = iTile*totalSliceCount*oneLayerSliceCount; ivol < (iTile + 1)*totalSliceCount*oneLayerSliceCount; ivol++) {
int iSlice = (ivol % (totalSliceCount*oneLayerSliceCount)) % totalSliceCount;
int iRow = (ivol % (totalSliceCount*oneLayerSliceCount)) / totalSliceCount;
if (iSlice - iRow - minSliceCount >= 0) {
bool isThisLayerSolid = raySliceArray[iSlice - iRow - minSliceCount + 1] == 1;
if (isThisLayerSolid) {
// the error is number of non-solid voxels
devVolumeErrorMatrix[ivol] += (iRow + minSliceCount) - (raySliceCumArray[iSlice] - raySliceCumArray[iSlice - iRow - minSliceCount]);
}
else {
devVolumeErrorMatrix[ivol] += raySliceCumArray[iSlice] - raySliceCumArray[iSlice - iRow - minSliceCount];
}
}
}
}
index += blockDim.x * gridDim.x;
}
}
extern __global__ void krLDNIAdaptiveSlicing_ReduceVolumeErrorByTile(float *devVolumeErrorMatrix, int oneLayerSliceCount, int totalSliceCount, int tileCount) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
while (index < oneLayerSliceCount*totalSliceCount) {
for (int i = 1; i < tileCount; i++) {
devVolumeErrorMatrix[index] += devVolumeErrorMatrix[i*oneLayerSliceCount*totalSliceCount + index];
}
index += blockDim.x * gridDim.x;
}
} |
b95c4810f3c0f6bbdeeb0cb1e25a18bf3b55ec66.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <system/pointercast.h>
#include <types/types.h>
#include <types/float16.h>
#include <system/op_boilerplate.h>
#include <loops/summarystatsreduce.h>
#include <helpers/shape.h>
#include <helpers/TAD.h>
#include <system/dll.h>
#include <system/Environment.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helpers/DebugHelper.h>
#include <ops/specials_cuda.h>
using namespace simdOps;
namespace functions {
namespace summarystats {
template <typename X, typename Z>
void _CUDA_G summaryStatsReduceT(int op, void const* dx, Nd4jLong const* xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong const* zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets);
}
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template<typename X, typename Z>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> *sPartials, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<Z*>(vextraParams);
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<X> prev = sPartials[tid - floorPow2];
SummaryStatsData<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<X> curr = sPartials[tid];
SummaryStatsData<X> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template<typename X, typename Z>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<X,Z>::transform(void const* vx, Nd4jLong const* xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong const* zShapeInfo,
int *dimension, int dimensionLength,
int postProcessOrNot,
int *allocationBuffer, void *vreductionBuffer,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
auto dx = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
auto reductionBuffer = static_cast<Z*>(vreductionBuffer);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
//shared memory space for storing intermediate results
__shared__ SummaryStatsData<X> sPartials[CUDA_BLOCK_SIZE];
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
//length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData<X> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
resultLength = shape::length(zShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION))
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
}
else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (tadEWS == 0) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
__syncthreads();
}
}
else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int x = threadIdx.x; x < tadLength; x += blockDim.x) {
auto indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
}
else {
for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *)reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__threadfence();
__syncthreads();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer;
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
else {
if (tid == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename X, typename Y>
_CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void const* dx, Nd4jLong const* xShapeInfo, void *extraParams, void *z, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS);
};
template <typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, hipStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hxShapeInfo, void *vextraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hzShapeInfo, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto extraParams = static_cast<Z*>(vextraParams);
auto z = reinterpret_cast<Z*>(vz);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("D16 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
nullptr,
1,
1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
// this is blocking method since method should return scalar
sd::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed");
}
template <typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hxShapeInfo, void *vextraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hzShapeInfo, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("F17 opNum:[%i]\n", opNum);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
nullptr,
1,
1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hxShapeInfo, void *vextraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("D18 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( summaryStatsReduceT<X, Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
dimension,
dimensionLength,
1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_LOCAL SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES);
}
} | b95c4810f3c0f6bbdeeb0cb1e25a18bf3b55ec66.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <system/pointercast.h>
#include <types/types.h>
#include <types/float16.h>
#include <system/op_boilerplate.h>
#include <loops/summarystatsreduce.h>
#include <helpers/shape.h>
#include <helpers/TAD.h>
#include <system/dll.h>
#include <system/Environment.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/DebugHelper.h>
#include <ops/specials_cuda.h>
using namespace simdOps;
namespace functions {
namespace summarystats {
template <typename X, typename Z>
void _CUDA_G summaryStatsReduceT(int op, void const* dx, Nd4jLong const* xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong const* zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets);
}
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template<typename X, typename Z>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> *sPartials, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<Z*>(vextraParams);
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<X> prev = sPartials[tid - floorPow2];
SummaryStatsData<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<X> curr = sPartials[tid];
SummaryStatsData<X> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template<typename X, typename Z>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<X,Z>::transform(void const* vx, Nd4jLong const* xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong const* zShapeInfo,
int *dimension, int dimensionLength,
int postProcessOrNot,
int *allocationBuffer, void *vreductionBuffer,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
auto dx = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
auto reductionBuffer = static_cast<Z*>(vreductionBuffer);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
//shared memory space for storing intermediate results
__shared__ SummaryStatsData<X> sPartials[CUDA_BLOCK_SIZE];
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
//length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData<X> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
resultLength = shape::length(zShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION))
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
}
else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (tadEWS == 0) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
__syncthreads();
}
}
else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int x = threadIdx.x; x < tadLength; x += blockDim.x) {
auto indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
}
else {
for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *)reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__threadfence();
__syncthreads();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer;
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
else {
if (tid == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename X, typename Y>
_CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void const* dx, Nd4jLong const* xShapeInfo, void *extraParams, void *z, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS);
};
template <typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, cudaStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hxShapeInfo, void *vextraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hzShapeInfo, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto extraParams = static_cast<Z*>(vextraParams);
auto z = reinterpret_cast<Z*>(vz);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("D16 opNum:[%i]\n", opNum);
summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
nullptr,
1,
1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
// this is blocking method since method should return scalar
sd::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed");
}
template <typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hxShapeInfo, void *vextraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hzShapeInfo, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("F17 opNum:[%i]\n", opNum);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
nullptr,
1,
1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
_CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hxShapeInfo, void *vextraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool biasCorrected, void *reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose())
printf("D18 opNum:[%i]\n", opNum);
summaryStatsReduceT<X, Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hxShapeInfo),
extraParams,
z,
zShapeInfo, shape::rank(hzShapeInfo),
dimension,
dimensionLength,
1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_LOCAL SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES);
}
} |
276a108c4d025f5cf17874aa0e87bf3fe33594d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "InvolveVector.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int inputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
InvolveVector), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,inputSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
InvolveVector), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,inputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
InvolveVector), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,inputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 276a108c4d025f5cf17874aa0e87bf3fe33594d4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "InvolveVector.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int inputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
InvolveVector<<<gridBlock,threadBlock>>>(input,output,inputSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
InvolveVector<<<gridBlock,threadBlock>>>(input,output,inputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
InvolveVector<<<gridBlock,threadBlock>>>(input,output,inputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4e81180b4b9fa89b5f25ac88902ee4fbc2e76097.hip | // !!! This is a file automatically generated by hipify!!!
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include<stdio.h>
#include "hip/hip_runtime.h"
#include<string.h>
#include<stdlib.h>
#define BLOCK_SIZE 1024 //@@ You can change this
char *inputFile,*outputFile;
void _errorCheck(hipError_t e){
if(e != hipSuccess){
printf("Failed to run statement \n");
}
}
__global__ void totalSequential(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x;
if(tid == 0) {
int sum = 0;
for(unsigned int j = 0; j <blockDim.x; j++)
{
sum += input[i + j];
}
output[blockIdx.x] = sum;
}
}
__global__ void totalSequentialSharedMem(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x;
__shared__ float sdata[BLOCK_SIZE];
sdata[tid] = i + tid < len ? input[i+tid] : 0.0;
if(tid == 0) {
for(unsigned int j = 1; j <blockDim.x; j++)
{
sdata[0] += sdata[j];
}
output[blockIdx.x] = sdata[0];
}
}
__global__ void totalWithThreadSyncInterleaved(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
for(unsigned int j = 1; j <blockDim.x; j *= 2)
{
if (tid % (2 * j) == 0)
input[i] += input[i+j];
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = input[i];
}
}
__global__ void totalWithThreadSyncAndSharedMemInterleaved(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
__shared__ float sdata[BLOCK_SIZE];
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < len)
sdata[tid] = input[i];
else
sdata[tid] = 0.0;
for(unsigned int j = 1; j < blockDim.x; j *= 2)
{
if (tid % (2 * j) == 0)
sdata[tid] += sdata[tid+j];
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = sdata[0];
}
}
__global__ void totalWithThreadSync(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
for(unsigned int j = blockDim.x/2; j > 0; j = j/2)
{
if(tid < j)
{
if ((i + j) < len)
input[i] += input[i+j];
else
input [i] += 0.0;
}
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = input[i];
}
}
__global__ void totalWithThreadSyncAndSharedMem(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
__shared__ float sdata[BLOCK_SIZE];
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < len)
sdata[tid] = input[i];
else
sdata[tid] = 0.0;
__syncthreads();
for(unsigned int j = blockDim.x/2; j > 0; j = j/2)
{
if(tid < j)
{
sdata[tid] += sdata[tid+j];
}
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = sdata[0];
}
}
void parseInput(int argc, char **argv){
if(argc < 2){
printf("Not enough arguments\n");
printf("Usage: reduction -i inputFile -o outputFile\n");
exit(1);
}
int i=1;
while(i<argc){
if(!strcmp(argv[i],"-i")){
++i;
inputFile = argv[i];
}
else if(!strcmp(argv[i],"-o")){
++i;
outputFile = argv[i];
}
else{
printf("Wrong input");
exit(1);
}
i++;
}
}
void getSize(int &size, char *file){
FILE *fp;
fp = fopen(file,"r");
if(fp == NULL){
perror("Error opening File\n");
exit(1);
}
if(fscanf(fp,"%d",&size)==EOF){
printf("Error reading file\n");
exit(1);
}
fclose(fp);
}
void readFromFile(int &size,float *v, char *file){
FILE *fp;
fp = fopen(file,"r");
if(fp == NULL){
printf("Error opening File %s\n",file);
exit(1);
}
if(fscanf(fp,"%d",&size)==EOF){
printf("Error reading file\n");
exit(1);
}
int i=0;
float t;
while(i < size){
if(fscanf(fp,"%f",&t)==EOF){
printf("Error reading file\n");
exit(1);
}
v[i++]=t;
//printf("%lf\t", t);
}
fclose(fp);
}
int main(int argc, char **argv) {
int ii;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
float *solution;
// Read arguments and input files
parseInput(argc,argv);
// Read input from data
getSize(numInputElements,inputFile);
hostInput = (float*) malloc(numInputElements*sizeof(float));
readFromFile(numInputElements,hostInput,inputFile);
printf("Data size: %d\tBlock Size: %d\n", numInputElements, BLOCK_SIZE);
int opsz;
getSize(opsz,outputFile);
solution = (float*) malloc(opsz*sizeof(float));
readFromFile(opsz,solution,outputFile);
//@@ You can change this, but assumes output element per block
numOutputElements = numInputElements / (BLOCK_SIZE);
if (numInputElements % (BLOCK_SIZE)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
hipMalloc((void**) &deviceInput, numInputElements * sizeof(float));
hipMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
//@@ Initialize the grid and block dimensions here
dim3 grid(numOutputElements, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
// Initialize timer
hipEvent_t start,stop;
float elapsed_time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
hipLaunchKernelGGL(( totalWithThreadSync) , dim3(grid), dim3(block), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread sync:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
hipMalloc((void**) &deviceInput, numInputElements * sizeof(float));
hipMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
elapsed_time = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
hipLaunchKernelGGL(( totalWithThreadSyncInterleaved), dim3(grid), dim3(block), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread sync with Interleaved Access:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
hipMalloc((void**) &deviceInput, numInputElements * sizeof(float));
hipMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
elapsed_time = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
hipLaunchKernelGGL(( totalWithThreadSyncAndSharedMem) , dim3(grid), dim3(block), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread sync and shared memory:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
hipMalloc((void**) &deviceInput, numInputElements * sizeof(float));
hipMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
elapsed_time = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
hipLaunchKernelGGL(( totalWithThreadSyncAndSharedMemInterleaved), dim3(grid), dim3(block), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread sync and shared memory with Interleaved Access:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
hipMalloc((void**) &deviceInput, numInputElements * sizeof(float));
hipMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
elapsed_time = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
hipLaunchKernelGGL(( totalSequential) , dim3(grid), dim3(block), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread 0 of every block computing:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
hipMalloc((void**) &deviceInput, numInputElements * sizeof(float));
hipMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
elapsed_time = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
hipLaunchKernelGGL(( totalSequentialSharedMem) , dim3(grid), dim3(block), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread 0 of every block computing using shared memory:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
printf("____________________________________________________________________\n\n\n");
free(hostInput);
free(hostOutput);
return 0;
}
| 4e81180b4b9fa89b5f25ac88902ee4fbc2e76097.cu | // Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include<stdio.h>
#include "cuda.h"
#include<string.h>
#include<stdlib.h>
#define BLOCK_SIZE 1024 //@@ You can change this
char *inputFile,*outputFile;
void _errorCheck(cudaError_t e){
if(e != cudaSuccess){
printf("Failed to run statement \n");
}
}
__global__ void totalSequential(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x;
if(tid == 0) {
int sum = 0;
for(unsigned int j = 0; j <blockDim.x; j++)
{
sum += input[i + j];
}
output[blockIdx.x] = sum;
}
}
__global__ void totalSequentialSharedMem(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x;
__shared__ float sdata[BLOCK_SIZE];
sdata[tid] = i + tid < len ? input[i+tid] : 0.0;
if(tid == 0) {
for(unsigned int j = 1; j <blockDim.x; j++)
{
sdata[0] += sdata[j];
}
output[blockIdx.x] = sdata[0];
}
}
__global__ void totalWithThreadSyncInterleaved(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
for(unsigned int j = 1; j <blockDim.x; j *= 2)
{
if (tid % (2 * j) == 0)
input[i] += input[i+j];
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = input[i];
}
}
__global__ void totalWithThreadSyncAndSharedMemInterleaved(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
__shared__ float sdata[BLOCK_SIZE];
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < len)
sdata[tid] = input[i];
else
sdata[tid] = 0.0;
for(unsigned int j = 1; j < blockDim.x; j *= 2)
{
if (tid % (2 * j) == 0)
sdata[tid] += sdata[tid+j];
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = sdata[0];
}
}
__global__ void totalWithThreadSync(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
for(unsigned int j = blockDim.x/2; j > 0; j = j/2)
{
if(tid < j)
{
if ((i + j) < len)
input[i] += input[i+j];
else
input [i] += 0.0;
}
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = input[i];
}
}
__global__ void totalWithThreadSyncAndSharedMem(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
__shared__ float sdata[BLOCK_SIZE];
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < len)
sdata[tid] = input[i];
else
sdata[tid] = 0.0;
__syncthreads();
for(unsigned int j = blockDim.x/2; j > 0; j = j/2)
{
if(tid < j)
{
sdata[tid] += sdata[tid+j];
}
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = sdata[0];
}
}
void parseInput(int argc, char **argv){
if(argc < 2){
printf("Not enough arguments\n");
printf("Usage: reduction -i inputFile -o outputFile\n");
exit(1);
}
int i=1;
while(i<argc){
if(!strcmp(argv[i],"-i")){
++i;
inputFile = argv[i];
}
else if(!strcmp(argv[i],"-o")){
++i;
outputFile = argv[i];
}
else{
printf("Wrong input");
exit(1);
}
i++;
}
}
void getSize(int &size, char *file){
FILE *fp;
fp = fopen(file,"r");
if(fp == NULL){
perror("Error opening File\n");
exit(1);
}
if(fscanf(fp,"%d",&size)==EOF){
printf("Error reading file\n");
exit(1);
}
fclose(fp);
}
void readFromFile(int &size,float *v, char *file){
FILE *fp;
fp = fopen(file,"r");
if(fp == NULL){
printf("Error opening File %s\n",file);
exit(1);
}
if(fscanf(fp,"%d",&size)==EOF){
printf("Error reading file\n");
exit(1);
}
int i=0;
float t;
while(i < size){
if(fscanf(fp,"%f",&t)==EOF){
printf("Error reading file\n");
exit(1);
}
v[i++]=t;
//printf("%lf\t", t);
}
fclose(fp);
}
int main(int argc, char **argv) {
int ii;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
float *solution;
// Read arguments and input files
parseInput(argc,argv);
// Read input from data
getSize(numInputElements,inputFile);
hostInput = (float*) malloc(numInputElements*sizeof(float));
readFromFile(numInputElements,hostInput,inputFile);
printf("Data size: %d\tBlock Size: %d\n", numInputElements, BLOCK_SIZE);
int opsz;
getSize(opsz,outputFile);
solution = (float*) malloc(opsz*sizeof(float));
readFromFile(opsz,solution,outputFile);
//@@ You can change this, but assumes output element per block
numOutputElements = numInputElements / (BLOCK_SIZE);
if (numInputElements % (BLOCK_SIZE)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
cudaMalloc((void**) &deviceInput, numInputElements * sizeof(float));
cudaMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
//@@ Initialize the grid and block dimensions here
dim3 grid(numOutputElements, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
// Initialize timer
cudaEvent_t start,stop;
float elapsed_time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
totalWithThreadSync <<<grid, block>>> (deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread sync:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
cudaMalloc((void**) &deviceInput, numInputElements * sizeof(float));
cudaMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
elapsed_time = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
totalWithThreadSyncInterleaved<<<grid, block>>> (deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread sync with Interleaved Access:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
cudaMalloc((void**) &deviceInput, numInputElements * sizeof(float));
cudaMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
elapsed_time = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
totalWithThreadSyncAndSharedMem <<<grid, block>>> (deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread sync and shared memory:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
cudaMalloc((void**) &deviceInput, numInputElements * sizeof(float));
cudaMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
elapsed_time = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
totalWithThreadSyncAndSharedMemInterleaved<<<grid, block>>> (deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread sync and shared memory with Interleaved Access:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
cudaMalloc((void**) &deviceInput, numInputElements * sizeof(float));
cudaMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
elapsed_time = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
totalSequential <<<grid, block>>> (deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread 0 of every block computing:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
//@@ ------------------------------------------------------------------------------------------------@@//
//@@ Allocate GPU memory here
cudaMalloc((void**) &deviceInput, numInputElements * sizeof(float));
cudaMalloc((void**) &deviceOutput, numOutputElements * sizeof(float));
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
elapsed_time = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//@@ Launch the GPU Kernel here, you may want multiple implementations to compare
totalSequentialSharedMem <<<grid, block>>> (deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
/*
* Reduce any remaining output on host
*/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time,start, stop);
if(solution[0] == hostOutput[0]){
printf("SUCCESSFUL: with just thread 0 of every block computing using shared memory:- time = %2.6f\n",elapsed_time);
}
else{
printf("The operation failed \n");
}
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
printf("____________________________________________________________________\n\n\n");
free(hostInput);
free(hostOutput);
return 0;
}
|
0da2c0c3d294e705179f22332bdbe5e527cf46ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "timer.h"
#include <iostream>
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
__global__ void vectorAddKernel(float* A, float* B, float* Result) {
// Get the thread id, which we can use as itterator in the array of results.
int i = threadIdx.x + blockDim.x * blockIdx.x;
// Perform the action.
Result[i] = A[i] + B[i];
}
void vectorAddCuda(int n, float* a, float* b, float* result) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
float* deviceA = NULL;
checkCudaCall(hipMalloc((void **) &deviceA, n * sizeof(float)));
if (deviceA == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
float* deviceB = NULL;
checkCudaCall(hipMalloc((void **) &deviceB, n * sizeof(float)));
if (deviceB == NULL) {
checkCudaCall(hipFree(deviceA));
cout << "could not allocate memory!" << endl;
return;
}
float* deviceResult = NULL;
checkCudaCall(hipMalloc((void **) &deviceResult, n * sizeof(float)));
if (deviceResult == NULL) {
checkCudaCall(hipFree(deviceA));
checkCudaCall(hipFree(deviceB));
cout << "could not allocate memory!" << endl;
return;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// copy the original vectors to the GPU
checkCudaCall(hipMemcpy(deviceA, a, n*sizeof(float), hipMemcpyHostToDevice));
checkCudaCall(hipMemcpy(deviceB, b, n*sizeof(float), hipMemcpyHostToDevice));
// execute kernel
hipEventRecord(start, 0);
hipLaunchKernelGGL(( vectorAddKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceA, deviceB, deviceResult);
hipEventRecord(stop, 0);
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
checkCudaCall(hipMemcpy(result, deviceResult, n * sizeof(float), hipMemcpyDeviceToHost));
checkCudaCall(hipFree(deviceA));
checkCudaCall(hipFree(deviceB));
checkCudaCall(hipFree(deviceResult));
// print the time the kernel invocation took, without the copies!
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
cout << "kernel invocation took \t\t" << elapsedTime << " milliseconds" << endl;
}
int main(int argc, char* argv[]) {
int n = 65536;
timer vectorAddTimer("vector add timer");
float* a = new float[n];
float* b = new float[n];
float* result = new float[n];
float* result_s = new float[n];
// initialize the vectors.
for(int i=0; i<n; i++) {
a[i] = i;
b[i] = i;
result_s[i]=a[i]+b[i];
}
vectorAddTimer.start();
vectorAddCuda(n, a, b, result);
vectorAddTimer.stop();
cout << "vector-add (CUDA, total): \t\t" << vectorAddTimer << endl;
// verify the resuls
for(int i=0; i<n; i++) {
if(result[i] != result_s[i]) {
cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << (2*i) << endl;
exit(1);
}
}
cout << "results OK!" << endl;
delete[] a;
delete[] b;
delete[] result;
return 0;
}
| 0da2c0c3d294e705179f22332bdbe5e527cf46ef.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "timer.h"
#include <iostream>
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
__global__ void vectorAddKernel(float* A, float* B, float* Result) {
// Get the thread id, which we can use as itterator in the array of results.
int i = threadIdx.x + blockDim.x * blockIdx.x;
// Perform the action.
Result[i] = A[i] + B[i];
}
void vectorAddCuda(int n, float* a, float* b, float* result) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
float* deviceA = NULL;
checkCudaCall(cudaMalloc((void **) &deviceA, n * sizeof(float)));
if (deviceA == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
float* deviceB = NULL;
checkCudaCall(cudaMalloc((void **) &deviceB, n * sizeof(float)));
if (deviceB == NULL) {
checkCudaCall(cudaFree(deviceA));
cout << "could not allocate memory!" << endl;
return;
}
float* deviceResult = NULL;
checkCudaCall(cudaMalloc((void **) &deviceResult, n * sizeof(float)));
if (deviceResult == NULL) {
checkCudaCall(cudaFree(deviceA));
checkCudaCall(cudaFree(deviceB));
cout << "could not allocate memory!" << endl;
return;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// copy the original vectors to the GPU
checkCudaCall(cudaMemcpy(deviceA, a, n*sizeof(float), cudaMemcpyHostToDevice));
checkCudaCall(cudaMemcpy(deviceB, b, n*sizeof(float), cudaMemcpyHostToDevice));
// execute kernel
cudaEventRecord(start, 0);
vectorAddKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceA, deviceB, deviceResult);
cudaEventRecord(stop, 0);
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
checkCudaCall(cudaMemcpy(result, deviceResult, n * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaCall(cudaFree(deviceA));
checkCudaCall(cudaFree(deviceB));
checkCudaCall(cudaFree(deviceResult));
// print the time the kernel invocation took, without the copies!
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "kernel invocation took \t\t" << elapsedTime << " milliseconds" << endl;
}
int main(int argc, char* argv[]) {
int n = 65536;
timer vectorAddTimer("vector add timer");
float* a = new float[n];
float* b = new float[n];
float* result = new float[n];
float* result_s = new float[n];
// initialize the vectors.
for(int i=0; i<n; i++) {
a[i] = i;
b[i] = i;
result_s[i]=a[i]+b[i];
}
vectorAddTimer.start();
vectorAddCuda(n, a, b, result);
vectorAddTimer.stop();
cout << "vector-add (CUDA, total): \t\t" << vectorAddTimer << endl;
// verify the resuls
for(int i=0; i<n; i++) {
if(result[i] != result_s[i]) {
cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << (2*i) << endl;
exit(1);
}
}
cout << "results OK!" << endl;
delete[] a;
delete[] b;
delete[] result;
return 0;
}
|
e23a5293534c302801362a1bc7d10aca26197a42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CUDA programming example
// A program that computes Y = A*X + Y where X and Y are vectors
// of real numbers and A is a scalar real number.
// Inspired by the Dan Ernst, Brandon Holt CUDA Programming Model talk
#include "book.h"
#include <set>
__global__ void waring_cuda(int size, int cap, int *n, int *f, int *v) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int r;
if(i < size) {
for (int j=0; j<size; j++) {
// obtain new result
r = (v[i] + v[j]) % cap;
f[r] = 1;
}
}
}
int gf_pow(int n, int m, int p) {
int res = n;
for (int i=1; i<m; i++) {
res = (res * n) % p;
}
return res;
}
int main(int argc, char* argv[]) {
if ( (argc < 3) || (atoi(argv[1])==1 && argc < 5) ) {
printf("Usage: %s <0=CPU or 1=GPU> <N> <Blocks> <Threads/Block>\n",argv[0]);
return 1;
}
std::set<int> S;
int list_size;
int *v; int *f; // Pointers to host arrays
int *v_dev, *f_dev; // Pointers to device arrays
int N = atoi(argv[2]);
size_t size = N * sizeof(int); // Compute size of arrays in bytes
v = (int *)malloc(size); // Allocate array on host
f = (int *)malloc(size); // Allocate array on host
HANDLE_ERROR(hipMalloc ((void**) &f_dev, size));
HANDLE_ERROR(hipMalloc ((void**) &v_dev, size));
// Initialize host array
for (int i=0; i<N; i++) {
f[i] = 0; v[i] = 0;
}
int t;
for (int i=0; i<N;i++) {
t = gf_pow(i,11,N);
f[t] = 1;
S.insert(t);
}
std::set<int>::iterator it = S.begin();
int k = 0;
for (;it!=S.end();it++) {
v[k] = *it;
k++;
}
list_size = S.size();
for (k=0;k<list_size;k++)
printf("%d ",v[k]);
printf("\n");
if (atoi(argv[1])==1) {
unsigned int n_blocks = atoi(argv[3]);
unsigned int block_size = atoi(argv[4]);
// Copy host array to the GPU
HANDLE_ERROR(hipMemcpy(f_dev, f, size, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(v_dev, v, size, hipMemcpyHostToDevice));
// Do calculation on device:
//printf("Deploying %d blocks with %d threads per block\n", n_blocks, block_size);
hipLaunchKernelGGL(( waring_cuda) , dim3(n_blocks), dim3(block_size) , 0, 0, list_size, N, &N, f_dev, v_dev);
// Retrieve result from device and store it in host array
hipMemcpy(f, f_dev, size, hipMemcpyDeviceToHost);
hipMemcpy(v, v_dev, size, hipMemcpyDeviceToHost);
}
/*
else {
saxpy(N, 10.0, x, y);
}
*/
// Print results if you have the patience :-)
for (int i=0; i<N; i++)
printf("%d + %d \n", f[i], v[i]);
for (int i=0; i<N; i++)
if(f[i]) printf("%d ", i);
// I don't so I'll just print the last result
//printf("%f : %f \n", x[N-1], y[N-1]);
// Cleanup
free(f); free(v);
hipFree(f_dev); hipFree(v_dev);
}
| e23a5293534c302801362a1bc7d10aca26197a42.cu | // CUDA programming example
// A program that computes Y = A*X + Y where X and Y are vectors
// of real numbers and A is a scalar real number.
// Inspired by the Dan Ernst, Brandon Holt CUDA Programming Model talk
#include "book.h"
#include <set>
__global__ void waring_cuda(int size, int cap, int *n, int *f, int *v) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int r;
if(i < size) {
for (int j=0; j<size; j++) {
// obtain new result
r = (v[i] + v[j]) % cap;
f[r] = 1;
}
}
}
int gf_pow(int n, int m, int p) {
int res = n;
for (int i=1; i<m; i++) {
res = (res * n) % p;
}
return res;
}
int main(int argc, char* argv[]) {
if ( (argc < 3) || (atoi(argv[1])==1 && argc < 5) ) {
printf("Usage: %s <0=CPU or 1=GPU> <N> <Blocks> <Threads/Block>\n",argv[0]);
return 1;
}
std::set<int> S;
int list_size;
int *v; int *f; // Pointers to host arrays
int *v_dev, *f_dev; // Pointers to device arrays
int N = atoi(argv[2]);
size_t size = N * sizeof(int); // Compute size of arrays in bytes
v = (int *)malloc(size); // Allocate array on host
f = (int *)malloc(size); // Allocate array on host
HANDLE_ERROR(cudaMalloc ((void**) &f_dev, size));
HANDLE_ERROR(cudaMalloc ((void**) &v_dev, size));
// Initialize host array
for (int i=0; i<N; i++) {
f[i] = 0; v[i] = 0;
}
int t;
for (int i=0; i<N;i++) {
t = gf_pow(i,11,N);
f[t] = 1;
S.insert(t);
}
std::set<int>::iterator it = S.begin();
int k = 0;
for (;it!=S.end();it++) {
v[k] = *it;
k++;
}
list_size = S.size();
for (k=0;k<list_size;k++)
printf("%d ",v[k]);
printf("\n");
if (atoi(argv[1])==1) {
unsigned int n_blocks = atoi(argv[3]);
unsigned int block_size = atoi(argv[4]);
// Copy host array to the GPU
HANDLE_ERROR(cudaMemcpy(f_dev, f, size, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(v_dev, v, size, cudaMemcpyHostToDevice));
// Do calculation on device:
//printf("Deploying %d blocks with %d threads per block\n", n_blocks, block_size);
waring_cuda <<< n_blocks, block_size >>> (list_size, N, &N, f_dev, v_dev);
// Retrieve result from device and store it in host array
cudaMemcpy(f, f_dev, size, cudaMemcpyDeviceToHost);
cudaMemcpy(v, v_dev, size, cudaMemcpyDeviceToHost);
}
/*
else {
saxpy(N, 10.0, x, y);
}
*/
// Print results if you have the patience :-)
for (int i=0; i<N; i++)
printf("%d + %d \n", f[i], v[i]);
for (int i=0; i<N; i++)
if(f[i]) printf("%d ", i);
// I don't so I'll just print the last result
//printf("%f : %f \n", x[N-1], y[N-1]);
// Cleanup
free(f); free(v);
cudaFree(f_dev); cudaFree(v_dev);
}
|
9b2740fdb6f180034370f4cdf20193cd7a2b402a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main(void){
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count);
printf("\nNumber of Devices: %d\n", count);
for(int i=0; i<count; i++){
hipGetDeviceProperties(&prop, i);
printf("\n ---Device %d Information---\n", i);
printf("Name: %s\n", prop.name);
//Thread/Blocks
printf("Shared Mem Per Block: %lu\n", prop.sharedMemPerBlock);
printf("Registers Per Block: %d\n", prop.regsPerBlock);
printf("Registers Per MultiProcessor: %d\n", prop.regsPerMultiprocessor);
printf("Max Threads per Block: %d\n", prop.maxThreadsPerBlock);
printf("Max Threads per MultiProcessor: %d\n\n", prop.maxThreadsPerMultiProcessor);
printf("Warp Size: %d\n", prop.warpSize);
printf( "Max thread dimensions:(%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions:(%d, %d, %d)\n\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
//Memory
printf("Total Global Mem: %lu\n", prop.totalGlobalMem);
printf("Total Constant Memory: %lu\n", prop.totalConstMem);
printf("Total Managed Memory: %d\n", prop.managedMemory);
printf("Shared Memory Per Block: %lu\n", prop.sharedMemPerBlock);
printf("Shared Memory Per MultiProcessor: %lu\n", prop.sharedMemPerMultiprocessor);
printf("Device can Map Host Memory: %s\n", prop.canMapHostMemory?"Enabled":"Disabled");
printf("Error Correcting code Mem: %s\n", prop.ECCEnabled?"Enabled":"Disabled");
printf("Memory Bus Width: %d\n", prop.memoryBusWidth);
printf("Memory Pitch: %lu\n\n", prop.memPitch);
//Computational Info
printf("Major Compute Capability: %d\n", prop.major);
printf("Minor Compute Capability: %d\n", prop.minor);
printf("ClockRate: %d\n", prop.clockRate);
printf("MultiProcessor Count: %d\n", prop.multiProcessorCount);
printf("Device Overlap: %d\n", prop.deviceOverlap);
printf("Kernel Execution Timeout: %s\n", prop.kernelExecTimeoutEnabled?"Enabled":"Disabled");
printf("Concurrent Kernels: %d\n", prop.concurrentKernels);
}
return 0;
}
| 9b2740fdb6f180034370f4cdf20193cd7a2b402a.cu | #include <stdio.h>
int main(void){
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
printf("\nNumber of Devices: %d\n", count);
for(int i=0; i<count; i++){
cudaGetDeviceProperties(&prop, i);
printf("\n ---Device %d Information---\n", i);
printf("Name: %s\n", prop.name);
//Thread/Blocks
printf("Shared Mem Per Block: %lu\n", prop.sharedMemPerBlock);
printf("Registers Per Block: %d\n", prop.regsPerBlock);
printf("Registers Per MultiProcessor: %d\n", prop.regsPerMultiprocessor);
printf("Max Threads per Block: %d\n", prop.maxThreadsPerBlock);
printf("Max Threads per MultiProcessor: %d\n\n", prop.maxThreadsPerMultiProcessor);
printf("Warp Size: %d\n", prop.warpSize);
printf( "Max thread dimensions:(%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions:(%d, %d, %d)\n\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
//Memory
printf("Total Global Mem: %lu\n", prop.totalGlobalMem);
printf("Total Constant Memory: %lu\n", prop.totalConstMem);
printf("Total Managed Memory: %d\n", prop.managedMemory);
printf("Shared Memory Per Block: %lu\n", prop.sharedMemPerBlock);
printf("Shared Memory Per MultiProcessor: %lu\n", prop.sharedMemPerMultiprocessor);
printf("Device can Map Host Memory: %s\n", prop.canMapHostMemory?"Enabled":"Disabled");
printf("Error Correcting code Mem: %s\n", prop.ECCEnabled?"Enabled":"Disabled");
printf("Memory Bus Width: %d\n", prop.memoryBusWidth);
printf("Memory Pitch: %lu\n\n", prop.memPitch);
//Computational Info
printf("Major Compute Capability: %d\n", prop.major);
printf("Minor Compute Capability: %d\n", prop.minor);
printf("ClockRate: %d\n", prop.clockRate);
printf("MultiProcessor Count: %d\n", prop.multiProcessorCount);
printf("Device Overlap: %d\n", prop.deviceOverlap);
printf("Kernel Execution Timeout: %s\n", prop.kernelExecTimeoutEnabled?"Enabled":"Disabled");
printf("Concurrent Kernels: %d\n", prop.concurrentKernels);
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.