hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
52a35cc802105fc012cf818008f79d8e6e53b07f.hip | // !!! This is a file automatically generated by hipify!!!
#include "cp.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#define CHECK_CUDA_ERROR(call) do { \
hipError_t result_ = (call); \
if (result_ != hipSuccess) { \
fprintf(stderr, #call " failed: %s\n", \
hipGetErrorString(result_)); \
exit(1); \
} \
} while(0)
void normaliseInput(int ny, int nx, double* normalised, const float* data){
for (int rowj = 0; rowj < ny; rowj++)
{
double sumSqRow = 0.0;
double mean = 0.0;
//substract mean
for (int i = rowj*nx; i < rowj*nx + nx; i++)
{
mean += (double)data[i];
}
mean /= (double)nx;
for (int i = rowj*nx; i < rowj*nx + nx; i++)
{
double value = (double) data[i] - mean;
normalised[i] = value;
sumSqRow += pow(value,2);
}
double value2 = sqrt(sumSqRow);
for (int i = rowj*nx; i < rowj*nx + nx; i++)
{
normalised[i] /= value2;
}
}
}
//calculates correlation of two rows given a normalised matrix
__global__ void correlateCall(int ny, int nx, double* normalised, float * d_result, const int BLOCK_SIZE){
double res = 0.0;
int i = BLOCK_SIZE * blockIdx.x + threadIdx.x;
int j = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if(j <= i && i < ny)
{
for(int k = 0; k < nx ; k++){
res += normalised[k + i*nx] * normalised[k + j*nx];
}
d_result[i + j*ny] = res;
}
}
void correlate(int ny, int nx, const float* data, float* result) {
const int DATA_SIZE = ny*nx;
const int RESULT_SIZE = ny*ny;
const int BLOCK_SIZE = 8;
const int ARRAY_BYTES_FLOAT = RESULT_SIZE * sizeof(float);
const int ARRAY_BYTES_DOUBLE = DATA_SIZE * sizeof(double);
//Create GPU pointers
double * d_data;
float * d_result;
double *normalised = new double[ny*nx];
normaliseInput(ny,nx,normalised,data);
//Allocate GPU memory
hipMalloc((void**) &d_data, ARRAY_BYTES_DOUBLE);
hipMalloc((void**) &d_result, ARRAY_BYTES_FLOAT);
//Copy from host to device
hipMemcpy(d_data,normalised, ARRAY_BYTES_DOUBLE, hipMemcpyHostToDevice);
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1);
const dim3 gridSize(ceil(ny/ (double) BLOCK_SIZE), ceil(ny/(double) BLOCK_SIZE), 1);
//Kernel call
hipLaunchKernelGGL(( correlateCall), dim3(gridSize), dim3(blockSize), 0, 0, ny,nx,d_data,d_result,BLOCK_SIZE);
//Copy results from host to device
hipMemcpy(result, d_result, ARRAY_BYTES_FLOAT, hipMemcpyDeviceToHost);
//free Memory
delete [] normalised;
hipFree(d_data);
hipFree(d_result);
}
| 52a35cc802105fc012cf818008f79d8e6e53b07f.cu | #include "cp.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <math.h>
#define CHECK_CUDA_ERROR(call) do { \
cudaError_t result_ = (call); \
if (result_ != cudaSuccess) { \
fprintf(stderr, #call " failed: %s\n", \
cudaGetErrorString(result_)); \
exit(1); \
} \
} while(0)
void normaliseInput(int ny, int nx, double* normalised, const float* data){
for (int rowj = 0; rowj < ny; rowj++)
{
double sumSqRow = 0.0;
double mean = 0.0;
//substract mean
for (int i = rowj*nx; i < rowj*nx + nx; i++)
{
mean += (double)data[i];
}
mean /= (double)nx;
for (int i = rowj*nx; i < rowj*nx + nx; i++)
{
double value = (double) data[i] - mean;
normalised[i] = value;
sumSqRow += pow(value,2);
}
double value2 = sqrt(sumSqRow);
for (int i = rowj*nx; i < rowj*nx + nx; i++)
{
normalised[i] /= value2;
}
}
}
//calculates correlation of two rows given a normalised matrix
__global__ void correlateCall(int ny, int nx, double* normalised, float * d_result, const int BLOCK_SIZE){
double res = 0.0;
int i = BLOCK_SIZE * blockIdx.x + threadIdx.x;
int j = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if(j <= i && i < ny)
{
for(int k = 0; k < nx ; k++){
res += normalised[k + i*nx] * normalised[k + j*nx];
}
d_result[i + j*ny] = res;
}
}
void correlate(int ny, int nx, const float* data, float* result) {
const int DATA_SIZE = ny*nx;
const int RESULT_SIZE = ny*ny;
const int BLOCK_SIZE = 8;
const int ARRAY_BYTES_FLOAT = RESULT_SIZE * sizeof(float);
const int ARRAY_BYTES_DOUBLE = DATA_SIZE * sizeof(double);
//Create GPU pointers
double * d_data;
float * d_result;
double *normalised = new double[ny*nx];
normaliseInput(ny,nx,normalised,data);
//Allocate GPU memory
cudaMalloc((void**) &d_data, ARRAY_BYTES_DOUBLE);
cudaMalloc((void**) &d_result, ARRAY_BYTES_FLOAT);
//Copy from host to device
cudaMemcpy(d_data,normalised, ARRAY_BYTES_DOUBLE, cudaMemcpyHostToDevice);
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1);
const dim3 gridSize(ceil(ny/ (double) BLOCK_SIZE), ceil(ny/(double) BLOCK_SIZE), 1);
//Kernel call
correlateCall<<<gridSize, blockSize>>>(ny,nx,d_data,d_result,BLOCK_SIZE);
//Copy results from host to device
cudaMemcpy(result, d_result, ARRAY_BYTES_FLOAT, cudaMemcpyDeviceToHost);
//free Memory
delete [] normalised;
cudaFree(d_data);
cudaFree(d_result);
}
|
b68ba9e050b55de53d2717a5385eacf61299bb09.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2019 Cleuton Sampaio
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Matrix multiplication sample using CUDA
this sample works for squared matrices!
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
// CUDA Kernel function:
__global__ void matmul(float *A, float* B, float *C, int size)
{
// Row and Column indexes:
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
// Are they bellow the maximum?
if (col < size && row < size) {
float result = 0;
for(int ix=0;ix<size;ix++) {
result += A[row*size+ix]*B[ix*size+col];
}
C[row*size+col] = result;
}
}
int main()
{
// Matrices and constants
int size = 3;
int total = size*size;
float cpu_A[] = {-1,2,4,0,5,3,6,2,1};
float cpu_B[] = {3,0,2,3,4,5,4,7,2};
float cpu_C[total];
// Allocate device memory:
float* gpu_A;
float* gpu_B;
float* gpu_C;
int msize = total * sizeof(float);
hipMalloc((void**)&gpu_A, msize);
hipMemcpy(gpu_A,cpu_A,msize,hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_B, msize);
hipMemcpy(gpu_B,cpu_B,msize,hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_C,msize);
// Blocks & grids:
dim3 blocks(size,size);
dim3 grid(1,1);
// Call the kernel:
hipLaunchKernelGGL(( matmul), dim3(grid),dim3(blocks), 0, 0, gpu_A,gpu_B,gpu_C,size);
// Get the result Matrix:
hipMemcpy(cpu_C,gpu_C,msize,hipMemcpyDeviceToHost);
std::cout << cpu_C[0] << '\t' << cpu_C[1] << '\t' << cpu_C[2] << std::endl
<< cpu_C[3] << '\t' << cpu_C[4] << '\t' << cpu_C[5] << std::endl
<< cpu_C[6] << '\t' << cpu_C[7] << '\t' << cpu_C[8] << std::endl;
//Free device matrices
hipFree(gpu_A);
hipFree(gpu_B);
hipFree(gpu_C);
}
| b68ba9e050b55de53d2717a5385eacf61299bb09.cu | /*
Copyright 2019 Cleuton Sampaio
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Matrix multiplication sample using CUDA
this sample works for squared matrices!
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
// CUDA Kernel function:
__global__ void matmul(float *A, float* B, float *C, int size)
{
// Row and Column indexes:
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
// Are they bellow the maximum?
if (col < size && row < size) {
float result = 0;
for(int ix=0;ix<size;ix++) {
result += A[row*size+ix]*B[ix*size+col];
}
C[row*size+col] = result;
}
}
int main()
{
// Matrices and constants
int size = 3;
int total = size*size;
float cpu_A[] = {-1,2,4,0,5,3,6,2,1};
float cpu_B[] = {3,0,2,3,4,5,4,7,2};
float cpu_C[total];
// Allocate device memory:
float* gpu_A;
float* gpu_B;
float* gpu_C;
int msize = total * sizeof(float);
cudaMalloc((void**)&gpu_A, msize);
cudaMemcpy(gpu_A,cpu_A,msize,cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_B, msize);
cudaMemcpy(gpu_B,cpu_B,msize,cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_C,msize);
// Blocks & grids:
dim3 blocks(size,size);
dim3 grid(1,1);
// Call the kernel:
matmul<<<grid,blocks>>>(gpu_A,gpu_B,gpu_C,size);
// Get the result Matrix:
cudaMemcpy(cpu_C,gpu_C,msize,cudaMemcpyDeviceToHost);
std::cout << cpu_C[0] << '\t' << cpu_C[1] << '\t' << cpu_C[2] << std::endl
<< cpu_C[3] << '\t' << cpu_C[4] << '\t' << cpu_C[5] << std::endl
<< cpu_C[6] << '\t' << cpu_C[7] << '\t' << cpu_C[8] << std::endl;
//Free device matrices
cudaFree(gpu_A);
cudaFree(gpu_B);
cudaFree(gpu_C);
}
|
5055cf244a528b8e8b7fb6a07b6050b4f80fcd95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// R-FCN
// Copyright (c) 2016 Microsoft
// Licensed under The MIT License [see r-fcn/LICENSE for details]
// Written by Yi Li
// ------------------------------------------------------------------
#include "./psroi_pooling_layer.hpp"
#include "../util/math_functions.hpp"
namespace caffe {
__global__ void PSROIPoolingForward(const int nthreads,
const real_t* bottom_data,
const real_t spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const real_t* bottom_rois,
const int output_dim,
const int group_size,
real_t* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
real_t roi_start_w = static_cast<real_t>(round(bottom_rois[1])) * spatial_scale;
real_t roi_start_h = static_cast<real_t>(round(bottom_rois[2])) * spatial_scale;
real_t roi_end_w = static_cast<real_t>(round(bottom_rois[3]) + 1.) * spatial_scale;
real_t roi_end_h = static_cast<real_t>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
real_t roi_width = max(roi_end_w - roi_start_w, static_cast<real_t>(0.1)); //avoid 0
real_t roi_height = max(roi_end_h - roi_start_h, static_cast<real_t>(0.1));
// Compute w and h at bottom
real_t bin_size_h = roi_height / static_cast<real_t>(pooled_height);
real_t bin_size_w = roi_width / static_cast<real_t>(pooled_width);
int hstart = floor(static_cast<real_t>(ph) * bin_size_h + roi_start_h);
int wstart = floor(static_cast<real_t>(pw)* bin_size_w + roi_start_w);
int hend = ceil(static_cast<real_t>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(static_cast<real_t>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
real_t out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
out_sum += bottom_data[bottom_index];
}
}
real_t bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty ? 0. : out_sum/bin_area;
}
}
void PSROIPoolingLayer::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const real_t* bottom_data = bottom[0]->gpu_data();
const real_t* bottom_rois = bottom[1]->gpu_data();
real_t* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set(count, static_cast<real_t>(0), top_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PSROIPoolingForward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, output_dim_, group_size_, top_data);
CUDA_POST_KERNEL_CHECK;
}
} // namespace caffe
| 5055cf244a528b8e8b7fb6a07b6050b4f80fcd95.cu | // ------------------------------------------------------------------
// R-FCN
// Copyright (c) 2016 Microsoft
// Licensed under The MIT License [see r-fcn/LICENSE for details]
// Written by Yi Li
// ------------------------------------------------------------------
#include "./psroi_pooling_layer.hpp"
#include "../util/math_functions.hpp"
namespace caffe {
__global__ void PSROIPoolingForward(const int nthreads,
const real_t* bottom_data,
const real_t spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const real_t* bottom_rois,
const int output_dim,
const int group_size,
real_t* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
real_t roi_start_w = static_cast<real_t>(round(bottom_rois[1])) * spatial_scale;
real_t roi_start_h = static_cast<real_t>(round(bottom_rois[2])) * spatial_scale;
real_t roi_end_w = static_cast<real_t>(round(bottom_rois[3]) + 1.) * spatial_scale;
real_t roi_end_h = static_cast<real_t>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
real_t roi_width = max(roi_end_w - roi_start_w, static_cast<real_t>(0.1)); //avoid 0
real_t roi_height = max(roi_end_h - roi_start_h, static_cast<real_t>(0.1));
// Compute w and h at bottom
real_t bin_size_h = roi_height / static_cast<real_t>(pooled_height);
real_t bin_size_w = roi_width / static_cast<real_t>(pooled_width);
int hstart = floor(static_cast<real_t>(ph) * bin_size_h + roi_start_h);
int wstart = floor(static_cast<real_t>(pw)* bin_size_w + roi_start_w);
int hend = ceil(static_cast<real_t>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(static_cast<real_t>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
real_t out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
out_sum += bottom_data[bottom_index];
}
}
real_t bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty ? 0. : out_sum/bin_area;
}
}
void PSROIPoolingLayer::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const real_t* bottom_data = bottom[0]->gpu_data();
const real_t* bottom_rois = bottom[1]->gpu_data();
real_t* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set(count, static_cast<real_t>(0), top_data);
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, output_dim_, group_size_, top_data);
CUDA_POST_KERNEL_CHECK;
}
} // namespace caffe
|
79df1482838668195178de37034aeff9dd43f5df.hip | // !!! This is a file automatically generated by hipify!!!
extern "C"{
#include<stdio.h>
#include "qsort_cuda.h"
}
extern "C"{
const int N = 7;
void MyTestFunct()
{
printf("Hello");
char *ad;
const int csize = N*sizeof(char);
hipMalloc( (void**)&ad, csize );
}
}
| 79df1482838668195178de37034aeff9dd43f5df.cu | extern "C"{
#include<stdio.h>
#include "qsort_cuda.h"
}
extern "C"{
const int N = 7;
void MyTestFunct()
{
printf("Hello");
char *ad;
const int csize = N*sizeof(char);
cudaMalloc( (void**)&ad, csize );
}
}
|
abf068057fb3212b057fff4b29d484a80278401a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
void deviceQuery() {
hipDeviceProp_t prop;
int nDevices = 0, i;
hipError_t ierr;
ierr = hipGetDeviceCount(&nDevices);
if (ierr != hipSuccess) {
printf("Sync error: %s\n", hipGetErrorString(ierr));
}
for (i = 0; i < nDevices; ++i) {
ierr = hipGetDeviceProperties(&prop, i);
printf("Device number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Compute capability: %d.%d\n\n", prop.major, prop.minor);
printf(" Clock Rate: %d kHz\n", prop.clockRate);
printf(" Total SMs: %d \n", prop.multiProcessorCount);
printf(" Shared Memory Per SM: %lu bytes\n",
prop.sharedMemPerMultiprocessor);
printf(" Registers Per SM: %d 32-bit\n", prop.regsPerMultiprocessor);
printf(" Max threads per SM: %d\n", prop.maxThreadsPerMultiProcessor);
printf(" L2 Cache Size: %d bytes\n", prop.l2CacheSize);
printf(" Total Global Memory: %lu bytes\n", prop.totalGlobalMem);
printf(" Memory Clock Rate: %d kHz\n\n", prop.memoryClockRate);
printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Max threads in X-dimension of block: %d\n",
prop.maxThreadsDim[0]);
printf(" Max threads in Y-dimension of block: %d\n",
prop.maxThreadsDim[1]);
printf(" Max threads in Z-dimension of block: %d\n\n",
prop.maxThreadsDim[2]);
printf(" Max blocks in X-dimension of grid: %d\n", prop.maxGridSize[0]);
printf(" Max blocks in Y-dimension of grid: %d\n", prop.maxGridSize[1]);
printf(" Max blocks in Z-dimension of grid: %d\n\n", prop.maxGridSize[2]);
printf(" Shared Memory Per Block: %lu bytes\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d 32-bit\n", prop.regsPerBlock);
printf(" Warp size: %d\n\n", prop.warpSize);
}
}
int main() { deviceQuery(); }
| abf068057fb3212b057fff4b29d484a80278401a.cu | #include <stdio.h>
void deviceQuery() {
cudaDeviceProp prop;
int nDevices = 0, i;
cudaError_t ierr;
ierr = cudaGetDeviceCount(&nDevices);
if (ierr != cudaSuccess) {
printf("Sync error: %s\n", cudaGetErrorString(ierr));
}
for (i = 0; i < nDevices; ++i) {
ierr = cudaGetDeviceProperties(&prop, i);
printf("Device number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Compute capability: %d.%d\n\n", prop.major, prop.minor);
printf(" Clock Rate: %d kHz\n", prop.clockRate);
printf(" Total SMs: %d \n", prop.multiProcessorCount);
printf(" Shared Memory Per SM: %lu bytes\n",
prop.sharedMemPerMultiprocessor);
printf(" Registers Per SM: %d 32-bit\n", prop.regsPerMultiprocessor);
printf(" Max threads per SM: %d\n", prop.maxThreadsPerMultiProcessor);
printf(" L2 Cache Size: %d bytes\n", prop.l2CacheSize);
printf(" Total Global Memory: %lu bytes\n", prop.totalGlobalMem);
printf(" Memory Clock Rate: %d kHz\n\n", prop.memoryClockRate);
printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Max threads in X-dimension of block: %d\n",
prop.maxThreadsDim[0]);
printf(" Max threads in Y-dimension of block: %d\n",
prop.maxThreadsDim[1]);
printf(" Max threads in Z-dimension of block: %d\n\n",
prop.maxThreadsDim[2]);
printf(" Max blocks in X-dimension of grid: %d\n", prop.maxGridSize[0]);
printf(" Max blocks in Y-dimension of grid: %d\n", prop.maxGridSize[1]);
printf(" Max blocks in Z-dimension of grid: %d\n\n", prop.maxGridSize[2]);
printf(" Shared Memory Per Block: %lu bytes\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d 32-bit\n", prop.regsPerBlock);
printf(" Warp size: %d\n\n", prop.warpSize);
}
}
int main() { deviceQuery(); }
|
a1fd14b53f12952a3d02c22ea6a0467c1af8d823.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_4_back;
int xdim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_4_back;
int ydim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_4_back;
int xdim1_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_4_back;
int ydim1_update_halo_kernel5_minus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_4_back * (y) + \
xdim0_update_halo_kernel5_minus_4_back * \
ydim0_update_halo_kernel5_minus_4_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_4_back * (y) + \
xdim1_update_halo_kernel5_minus_4_back * \
ydim1_update_halo_kernel5_minus_4_back * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_4_back_gpu(double *vol_flux_z,
double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 4)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_4_back(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back *
ydim0_update_halo_kernel5_minus_4_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back *
ydim1_update_halo_kernel5_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_4_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_minus_4_back_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 91))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(91, "update_halo_kernel5_minus_4_back");
OPS_kernels[91].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_4_back_h ||
ydim0 != ydim0_update_halo_kernel5_minus_4_back_h ||
xdim1 != xdim1_update_halo_kernel5_minus_4_back_h ||
ydim1 != ydim1_update_halo_kernel5_minus_4_back_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_minus_4_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_4_back_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_minus_4_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_4_back_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_minus_4_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_4_back_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_minus_4_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_4_back), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[91].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 91;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 91;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_minus_4_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(91, "update_halo_kernel5_minus_4_back");
}
ops_enqueue_kernel(desc);
}
#endif
| a1fd14b53f12952a3d02c22ea6a0467c1af8d823.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_4_back;
int xdim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_4_back;
int ydim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_4_back;
int xdim1_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_4_back;
int ydim1_update_halo_kernel5_minus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_4_back * (y) + \
xdim0_update_halo_kernel5_minus_4_back * \
ydim0_update_halo_kernel5_minus_4_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_4_back * (y) + \
xdim1_update_halo_kernel5_minus_4_back * \
ydim1_update_halo_kernel5_minus_4_back * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_4_back_gpu(double *vol_flux_z,
double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 4)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_4_back(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back *
ydim0_update_halo_kernel5_minus_4_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back *
ydim1_update_halo_kernel5_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_4_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_minus_4_back_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 91))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(91, "update_halo_kernel5_minus_4_back");
OPS_kernels[91].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_4_back_h ||
ydim0 != ydim0_update_halo_kernel5_minus_4_back_h ||
xdim1 != xdim1_update_halo_kernel5_minus_4_back_h ||
ydim1 != ydim1_update_halo_kernel5_minus_4_back_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_minus_4_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_4_back_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_minus_4_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_4_back_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_minus_4_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_4_back_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_minus_4_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_minus_4_back<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[91].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 91;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 91;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_minus_4_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(91, "update_halo_kernel5_minus_4_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
e8d04e179e557cec34410f50446e62b657aed5db.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward";
void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_t_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda);
} // namespace at::native
| e8d04e179e557cec34410f50446e62b657aed5db.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward";
void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_t_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda);
} // namespace at::native
|
118c5d346f79cbe486b931d10c62b02fa20caf90.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "string"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "../highperformancetimertest/highperformancetimer.h"
using namespace std;
typedef int ArrayType_T;
int main(int argc, char * argv[])
{
srand((unsigned)time(NULL));
bool initialize(ArrayType_T ** a, ArrayType_T ** b, ArrayType_T ** c, int size);
void clearMem(ArrayType_T * a, ArrayType_T * b, ArrayType_T * c);
void assign(ArrayType_T * a, ArrayType_T * b, ArrayType_T * c, int size);
void addVector(ArrayType_T *a, ArrayType_T *b, ArrayType_T *c, int size);
int size = 1000;
ArrayType_T * a = nullptr;
ArrayType_T * b = nullptr;
ArrayType_T * c = nullptr;
double accumulatedTime = 0.0;
//HighPrecisionTime htp;
try
{
if (!initialize(&a, &b, &c, size))
throw("CPU memory allocation error ");
cout << "CPU memory has been allocated" << endl;
//accumulatedTime = 0.0;
//for (int i = 0; i < 100; i++)
//{
//htp.TimeSinceLastCall();
//addVector(a, b, c, size);
//accumulatedTime += htp.TimeSinceLastCall();
//}
}
catch (char * errMessage)
{
cout << "An exception occured " << endl;
cout << errMessage << endl;
}
cout << argc << endl;
std::stoi(argv[1]);
if (argc > 1)
{
size = stoi(argv[1]);
}
cout << argv[0] << endl;
cout << endl;
cout << "Array size will be " << size << endl;
assign(a, b, c, size);
cout << *a << endl;
cout << *b << endl;
cout << *c << endl;
addVector(a, b, c, size);
cout << "Adding the vectors by taking the random int of a and b and adding them and placing them into c" << endl;
cout << *a << endl;
cout << *b << endl;
cout << *c << endl;
clearMem(a, b, c);
system("pause");
}
bool initialize(ArrayType_T ** a, ArrayType_T ** b, ArrayType_T ** c, int size)
{
bool retVal = true;
*a = (ArrayType_T*)malloc(size * sizeof(ArrayType_T));
*b = (ArrayType_T*)malloc(size * sizeof(ArrayType_T));
*c = (ArrayType_T*)malloc(size * sizeof(ArrayType_T));
if (*a == nullptr || *b == nullptr || *c == nullptr)
{
retVal = false;
}
return retVal;
}
void assign(ArrayType_T * a, ArrayType_T * b, ArrayType_T * c, int size)
{
cout << "Starting loop" << endl;
for (int i = 0; i < size; i++)
{
a[i] = rand() % size;
b[i] = rand() % size;
c[i] = 0;
}
}
void clearMem(ArrayType_T * a, ArrayType_T * b, ArrayType_T * c)
{
if (a != nullptr)
{
free(a);
}
if (b != nullptr)
{
free(b);
}
if (c != nullptr)
{
free(c);
}
}
void addVector(ArrayType_T *a, ArrayType_T *b, ArrayType_T *c, int size)
{
for (int i = 0; i < size; i++)
{
c[i] = a[i] + b[i];
}
} | 118c5d346f79cbe486b931d10c62b02fa20caf90.cu | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "string"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "../highperformancetimertest/highperformancetimer.h"
using namespace std;
typedef int ArrayType_T;
int main(int argc, char * argv[])
{
srand((unsigned)time(NULL));
bool initialize(ArrayType_T ** a, ArrayType_T ** b, ArrayType_T ** c, int size);
void clearMem(ArrayType_T * a, ArrayType_T * b, ArrayType_T * c);
void assign(ArrayType_T * a, ArrayType_T * b, ArrayType_T * c, int size);
void addVector(ArrayType_T *a, ArrayType_T *b, ArrayType_T *c, int size);
int size = 1000;
ArrayType_T * a = nullptr;
ArrayType_T * b = nullptr;
ArrayType_T * c = nullptr;
double accumulatedTime = 0.0;
//HighPrecisionTime htp;
try
{
if (!initialize(&a, &b, &c, size))
throw("CPU memory allocation error ");
cout << "CPU memory has been allocated" << endl;
//accumulatedTime = 0.0;
//for (int i = 0; i < 100; i++)
//{
//htp.TimeSinceLastCall();
//addVector(a, b, c, size);
//accumulatedTime += htp.TimeSinceLastCall();
//}
}
catch (char * errMessage)
{
cout << "An exception occured " << endl;
cout << errMessage << endl;
}
cout << argc << endl;
std::stoi(argv[1]);
if (argc > 1)
{
size = stoi(argv[1]);
}
cout << argv[0] << endl;
cout << endl;
cout << "Array size will be " << size << endl;
assign(a, b, c, size);
cout << *a << endl;
cout << *b << endl;
cout << *c << endl;
addVector(a, b, c, size);
cout << "Adding the vectors by taking the random int of a and b and adding them and placing them into c" << endl;
cout << *a << endl;
cout << *b << endl;
cout << *c << endl;
clearMem(a, b, c);
system("pause");
}
bool initialize(ArrayType_T ** a, ArrayType_T ** b, ArrayType_T ** c, int size)
{
bool retVal = true;
*a = (ArrayType_T*)malloc(size * sizeof(ArrayType_T));
*b = (ArrayType_T*)malloc(size * sizeof(ArrayType_T));
*c = (ArrayType_T*)malloc(size * sizeof(ArrayType_T));
if (*a == nullptr || *b == nullptr || *c == nullptr)
{
retVal = false;
}
return retVal;
}
void assign(ArrayType_T * a, ArrayType_T * b, ArrayType_T * c, int size)
{
cout << "Starting loop" << endl;
for (int i = 0; i < size; i++)
{
a[i] = rand() % size;
b[i] = rand() % size;
c[i] = 0;
}
}
void clearMem(ArrayType_T * a, ArrayType_T * b, ArrayType_T * c)
{
if (a != nullptr)
{
free(a);
}
if (b != nullptr)
{
free(b);
}
if (c != nullptr)
{
free(c);
}
}
void addVector(ArrayType_T *a, ArrayType_T *b, ArrayType_T *c, int size)
{
for (int i = 0; i < size; i++)
{
c[i] = a[i] + b[i];
}
} |
cfa51cc1b4de55f27250c1899a095e6836d0da1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *A, int *B, int *C, int wa, int wb) {
// Get the 1D Array index of the matrix
int id = threadIdx.x;
int sum;
for (int i = 0; i < wb; ++i) {
sum = 0;
for (int j = 0; j < wa; ++j){
sum += (A[id*wa + j] * B[j*wb + i]);
}
C[id*wb + i] = sum;
}
}
int main(){
int a[100], b[100], c[100], n1, m1, n2, m2;
printf("Enter m1: ");
scanf("%d",&m1);
printf("Enter n1: ");
scanf("%d",&n1);
printf("Enter Matrix 1:\n");
for(int i=0;i<n1*m1;i++)
scanf("%d",&a[i]);
printf("Enter m2: ");
scanf("%d",&m2);
if (m2 != n1){
printf("cannot be multiplied\n");
exit(0);
}
printf("Enter n2: ");
scanf("%d",&n2);
printf("Enter Matrix 2:\n");
for(int i=0;i<n2*m2;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
hipMalloc((void**)&d_a,sizeof(int)*n1*m1);
hipMalloc((void**)&d_b,sizeof(int)*n2*m2);
hipMalloc((void**)&d_c,sizeof(int)*m1*n2);
hipMemcpy(d_a,&a,sizeof(int)*n1*m1,hipMemcpyHostToDevice);
hipMemcpy(d_b,&b,sizeof(int)*n2*m2,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1), dim3(m1), 0, 0, d_a, d_b, d_c, n1, n2);
hipMemcpy(&c,d_c,sizeof(int)*n2*m1,hipMemcpyDeviceToHost);
for(int i=0;i<m1*n2;i++){
if (i % n2 == 0)
printf("\n");
printf("%d ",c[i]);
}
printf("\n");
hipFree(d_a);
hipFree(d_b);
}
| cfa51cc1b4de55f27250c1899a095e6836d0da1f.cu | #include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *A, int *B, int *C, int wa, int wb) {
// Get the 1D Array index of the matrix
int id = threadIdx.x;
int sum;
for (int i = 0; i < wb; ++i) {
sum = 0;
for (int j = 0; j < wa; ++j){
sum += (A[id*wa + j] * B[j*wb + i]);
}
C[id*wb + i] = sum;
}
}
int main(){
int a[100], b[100], c[100], n1, m1, n2, m2;
printf("Enter m1: ");
scanf("%d",&m1);
printf("Enter n1: ");
scanf("%d",&n1);
printf("Enter Matrix 1:\n");
for(int i=0;i<n1*m1;i++)
scanf("%d",&a[i]);
printf("Enter m2: ");
scanf("%d",&m2);
if (m2 != n1){
printf("cannot be multiplied\n");
exit(0);
}
printf("Enter n2: ");
scanf("%d",&n2);
printf("Enter Matrix 2:\n");
for(int i=0;i<n2*m2;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
cudaMalloc((void**)&d_a,sizeof(int)*n1*m1);
cudaMalloc((void**)&d_b,sizeof(int)*n2*m2);
cudaMalloc((void**)&d_c,sizeof(int)*m1*n2);
cudaMemcpy(d_a,&a,sizeof(int)*n1*m1,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,sizeof(int)*n2*m2,cudaMemcpyHostToDevice);
add<<<1, m1>>>(d_a, d_b, d_c, n1, n2);
cudaMemcpy(&c,d_c,sizeof(int)*n2*m1,cudaMemcpyDeviceToHost);
for(int i=0;i<m1*n2;i++){
if (i % n2 == 0)
printf("\n");
printf("%d ",c[i]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
}
|
1fcd8a5bc923cd77e88dc7b7f6fc2b00b1028a4c.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/zmath.cuh>
namespace at { namespace native {
// We manually overload log because std::log does not work with thrust::complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t log_wrapper(scalar_t v) {
return ::log(v);
}
template<typename T>
__host__ __device__ static inline thrust::complex<T> log_wrapper(thrust::complex<T> v) {
return thrust::log(v);
}
void log_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "log_cuda", [&] {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t {
return log_wrapper(a);
});
});
});
}
// We manually overload log10 because std::log10 does not work with thrust::complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t log10_wrapper(scalar_t v) {
return ::log10(v);
}
template<typename T>
__host__ __device__ static inline thrust::complex<T> log10_wrapper(thrust::complex<T> v) {
return thrust::log10(v);
}
void log10_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log10_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "log10_cuda", [&] {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t {
return log10_wrapper(a);
});
});
});
}
void log1p_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log1p_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "log1p_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});
});
});
}
// We manually overload log2 because std::log2 does not work with thrust::complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t log2_wrapper(scalar_t v) {
return ::log2(v);
}
template<typename T>
__host__ __device__ static inline thrust::complex<T> log2_wrapper(thrust::complex<T> v) {
const thrust::complex<T> log2 = thrust::complex<T>(::log(2.0), 0.0);
return thrust::log(v)/log2;
}
void log2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log2_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "log2_cuda", [&] {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t {
return log2_wrapper(a);
});
});
});
}
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(log2_stub, &log2_kernel_cuda);
REGISTER_DISPATCH(log1p_stub, &log1p_kernel_cuda);
}} // namespace at::native
| 1fcd8a5bc923cd77e88dc7b7f6fc2b00b1028a4c.cu | #include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/zmath.cuh>
namespace at { namespace native {
// We manually overload log because std::log does not work with thrust::complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t log_wrapper(scalar_t v) {
return ::log(v);
}
template<typename T>
__host__ __device__ static inline thrust::complex<T> log_wrapper(thrust::complex<T> v) {
return thrust::log(v);
}
void log_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "log_cuda", [&] {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t {
return log_wrapper(a);
});
});
});
}
// We manually overload log10 because std::log10 does not work with thrust::complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t log10_wrapper(scalar_t v) {
return ::log10(v);
}
template<typename T>
__host__ __device__ static inline thrust::complex<T> log10_wrapper(thrust::complex<T> v) {
return thrust::log10(v);
}
void log10_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log10_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "log10_cuda", [&] {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t {
return log10_wrapper(a);
});
});
});
}
void log1p_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log1p_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "log1p_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});
});
});
}
// We manually overload log2 because std::log2 does not work with thrust::complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t log2_wrapper(scalar_t v) {
return ::log2(v);
}
template<typename T>
__host__ __device__ static inline thrust::complex<T> log2_wrapper(thrust::complex<T> v) {
const thrust::complex<T> log2 = thrust::complex<T>(::log(2.0), 0.0);
return thrust::log(v)/log2;
}
void log2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "log2_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "log2_cuda", [&] {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t {
return log2_wrapper(a);
});
});
});
}
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(log2_stub, &log2_kernel_cuda);
REGISTER_DISPATCH(log1p_stub, &log1p_kernel_cuda);
}} // namespace at::native
|
f4fea44f70baca3f1429a9a1dcb138c583db4f03.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
*
* Copyright RTK Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// rtk includes
#include "rtkCudaLastDimensionTVDenoisingImageFilter.hcu"
#include "rtkCudaUtilities.hcu"
#include <itkMacro.h>
// cuda includes
#include <hip/hip_runtime.h>
// TEXTURES AND CONSTANTS //
__constant__ int4 c_Size;
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_( S T A R T )_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
__global__ void
denoise_oneD_TV_kernel(float * in, float * out, float beta, float gamma, int niter)
{
// Allocate a few shared buffers
extern __shared__ float shared[];
float * input = &shared[0];
float * interm = &shared[c_Size.w];
float * output = &shared[2 * c_Size.w];
float * gradient = &shared[3 * c_Size.w];
// Each thread reads one element into the shared buffer
long int gindex = ((threadIdx.x * c_Size.z + blockIdx.z) * c_Size.y + blockIdx.y) * c_Size.x + blockIdx.x;
int lindex = threadIdx.x;
input[lindex] = in[gindex];
__syncthreads();
///////////////////////////////////////////////////
// Perform complete 1D TV denoising on the buffer
// with circular padding border conditions
// Multiply by beta
interm[lindex] = beta * input[lindex];
// Compute gradient
__syncthreads();
if (lindex == (c_Size.w - 1))
gradient[lindex] = interm[0] - interm[lindex];
else
gradient[lindex] = interm[lindex + 1] - interm[lindex];
// Magnitude threshold (in 1D, hard threshold on absolute value)
if (gradient[lindex] >= 0)
gradient[lindex] = fminf(gradient[lindex], gamma);
else
gradient[lindex] = fmaxf(gradient[lindex], -gamma);
// Rest of the iterations
for (int iter = 0; iter < niter; iter++)
{
// Compute divergence
__syncthreads();
if (lindex == 0)
interm[lindex] = gradient[lindex] - gradient[c_Size.w - 1];
else
interm[lindex] = gradient[lindex] - gradient[lindex - 1];
// Subtract and store in output buffer
__syncthreads();
output[lindex] = input[lindex] - interm[lindex];
// Multiply by beta
__syncthreads();
interm[lindex] = output[lindex] * beta;
// Compute gradient
__syncthreads();
if (lindex == (c_Size.w - 1))
gradient[lindex] -= (interm[0] - interm[lindex]);
else
gradient[lindex] -= (interm[lindex + 1] - interm[lindex]);
// Magnitude threshold
__syncthreads();
if (gradient[lindex] >= 0)
gradient[lindex] = fminf(gradient[lindex], gamma);
else
gradient[lindex] = fmaxf(gradient[lindex], -gamma);
}
// Done computing 1D TV for this buffer
////////////////////////////////////////////////////////
// Each thread writes one element from the shared buffer into the global memory
out[gindex] = output[lindex];
// out[gindex] = gindex;
}
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-( E N D )-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
void
CUDA_total_variation_last_dimension(int size[4],
float * dev_in,
float * dev_out,
float gamma,
float beta,
int NumberOfIterations)
{
int4 dev_Size = make_int4(size[0], size[1], size[2], size[3]);
hipMemcpyToSymbol(c_Size, &dev_Size, sizeof(int4));
// Thread Block Dimensions
// Create one thread block per voxel of the 3D volume.
// Each thread block handles a single 1D temporal vector
dim3 dimBlock = dim3(size[3], 1, 1);
int blocksInX = size[0];
int blocksInY = size[1];
int blocksInZ = size[2];
dim3 dimGrid = dim3(blocksInX, blocksInY, blocksInZ);
// Dynamic allocation of shared memory
hipLaunchKernelGGL(( denoise_oneD_TV_kernel), dim3(dimGrid), dim3(dimBlock), 4 * sizeof(float) * size[3], 0,
dev_in, dev_out, beta, gamma, NumberOfIterations);
CUDA_CHECK_ERROR;
}
| f4fea44f70baca3f1429a9a1dcb138c583db4f03.cu | /*=========================================================================
*
* Copyright RTK Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// rtk includes
#include "rtkCudaLastDimensionTVDenoisingImageFilter.hcu"
#include "rtkCudaUtilities.hcu"
#include <itkMacro.h>
// cuda includes
#include <cuda.h>
// TEXTURES AND CONSTANTS //
__constant__ int4 c_Size;
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_( S T A R T )_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
__global__ void
denoise_oneD_TV_kernel(float * in, float * out, float beta, float gamma, int niter)
{
// Allocate a few shared buffers
extern __shared__ float shared[];
float * input = &shared[0];
float * interm = &shared[c_Size.w];
float * output = &shared[2 * c_Size.w];
float * gradient = &shared[3 * c_Size.w];
// Each thread reads one element into the shared buffer
long int gindex = ((threadIdx.x * c_Size.z + blockIdx.z) * c_Size.y + blockIdx.y) * c_Size.x + blockIdx.x;
int lindex = threadIdx.x;
input[lindex] = in[gindex];
__syncthreads();
///////////////////////////////////////////////////
// Perform complete 1D TV denoising on the buffer
// with circular padding border conditions
// Multiply by beta
interm[lindex] = beta * input[lindex];
// Compute gradient
__syncthreads();
if (lindex == (c_Size.w - 1))
gradient[lindex] = interm[0] - interm[lindex];
else
gradient[lindex] = interm[lindex + 1] - interm[lindex];
// Magnitude threshold (in 1D, hard threshold on absolute value)
if (gradient[lindex] >= 0)
gradient[lindex] = fminf(gradient[lindex], gamma);
else
gradient[lindex] = fmaxf(gradient[lindex], -gamma);
// Rest of the iterations
for (int iter = 0; iter < niter; iter++)
{
// Compute divergence
__syncthreads();
if (lindex == 0)
interm[lindex] = gradient[lindex] - gradient[c_Size.w - 1];
else
interm[lindex] = gradient[lindex] - gradient[lindex - 1];
// Subtract and store in output buffer
__syncthreads();
output[lindex] = input[lindex] - interm[lindex];
// Multiply by beta
__syncthreads();
interm[lindex] = output[lindex] * beta;
// Compute gradient
__syncthreads();
if (lindex == (c_Size.w - 1))
gradient[lindex] -= (interm[0] - interm[lindex]);
else
gradient[lindex] -= (interm[lindex + 1] - interm[lindex]);
// Magnitude threshold
__syncthreads();
if (gradient[lindex] >= 0)
gradient[lindex] = fminf(gradient[lindex], gamma);
else
gradient[lindex] = fmaxf(gradient[lindex], -gamma);
}
// Done computing 1D TV for this buffer
////////////////////////////////////////////////////////
// Each thread writes one element from the shared buffer into the global memory
out[gindex] = output[lindex];
// out[gindex] = gindex;
}
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-( E N D )-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
void
CUDA_total_variation_last_dimension(int size[4],
float * dev_in,
float * dev_out,
float gamma,
float beta,
int NumberOfIterations)
{
int4 dev_Size = make_int4(size[0], size[1], size[2], size[3]);
cudaMemcpyToSymbol(c_Size, &dev_Size, sizeof(int4));
// Thread Block Dimensions
// Create one thread block per voxel of the 3D volume.
// Each thread block handles a single 1D temporal vector
dim3 dimBlock = dim3(size[3], 1, 1);
int blocksInX = size[0];
int blocksInY = size[1];
int blocksInZ = size[2];
dim3 dimGrid = dim3(blocksInX, blocksInY, blocksInZ);
// Dynamic allocation of shared memory
denoise_oneD_TV_kernel<<<dimGrid, dimBlock, 4 * sizeof(float) * size[3]>>>(
dev_in, dev_out, beta, gamma, NumberOfIterations);
CUDA_CHECK_ERROR;
}
|
e68122d011de9305c450cacee00ca144a8f9853a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <pthread.h>
#include <thread>
#include <unistd.h>
#include <cmath>
#include <fcntl.h>
#include <fstream>
#include <string>
#include <vector>
#include <sys/stat.h>
#include <sys/time.h>
#include <map>
#include <queue>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <grpcpp/grpcpp.h>
#include "hash.h"
#include "new_file.h"
#include "recalcu.h"
#include "rsync.grpc.pb.h"
using grpc::Server;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::Status;
using rsync::FileInfo;
using rsync::FileHead;
using rsync::FileChunkInfo;
using rsync::MatchTokenReply;
using rsync::MatchToken;
using rsync::UnmatchChunks;
using rsync::UnmatchChunkInfo;
using rsync::RsyncReply;
using rsync::Rsync;
extern int n_stream;
extern int chunk_size;
extern int threads_num;
extern int cpu_threads_num;
extern int chunk_num;
extern int round_num;
extern int taskNum;
extern int recalcu_region_size;
extern std::queue<Task> cputaskq;
extern std::map<std::string, Task> taskMap;
extern std::map<std::string, buildTask> buildTaskMap;
extern hipStream_t *stream;
extern int time_use;
extern struct timeval start;
extern struct timeval end;
extern int handled_task_num;
extern int handled_calcutask_num;
class RsyncServiceImpl final : public Rsync::Service {
Status PreCalcu(ServerContext* context, const FileHead* request,
RsyncReply* reply) override {
int c_time_use;
struct timeval c_start;
struct timeval c_end;
gettimeofday(&c_start,NULL);
std::string rsyncFileName = request->filename();
int new_file_len = request->filesize();
chunk_size = request->chunksize();
struct stat statbuf;
int fd;
if((fd = open(rsyncFileName.c_str(), O_RDWR, 0777)) < 0){
fprintf(stderr, "Unable to open rsync file.\n");
exit(1);
}
if (0 > fstat(fd, &statbuf)) printf("file with path %s not found\n",rsyncFileName.c_str());
size_t fileLen = (size_t) statbuf.st_size;
char *d_file, *file;
hipHostMalloc((void**)&file, sizeof(char)*fileLen,hipHostMallocDefault);
hipMalloc((void**)&d_file, sizeof(char)*fileLen);
size_t read_size = 0;
while(read_size < fileLen){
read_size += read(fd, &(file[read_size]), chunk_size);
}
int local_round_num = round_num % n_stream;
// use multistream or not
round_num ++;
hipMemcpyAsync(d_file, file, sizeof(char)*(fileLen), hipMemcpyHostToDevice, stream[local_round_num]);
hipEvent_t event;
hipEventCreate(&event);
hipEventRecord(event, stream[local_round_num]);
char *new_file, *d_new_file;
hipHostMalloc((void**)&new_file, sizeof(char)*new_file_len, hipHostMallocDefault);
hipMalloc((void**)&d_new_file, sizeof(char)*new_file_len);
int total_chunk_num = ceil(float(fileLen) / chunk_size);
int total_threads = ceil(float(fileLen) / (chunk_num*chunk_size));
int *match_offset, *match_chunkid, *h_stat;
hipHostMalloc((void**)&match_offset, sizeof(int)*total_chunk_num, hipHostMallocDefault);
hipHostMalloc((void**)&match_chunkid, sizeof(int)*total_chunk_num, hipHostMallocDefault);
hipHostMalloc((void**)&h_stat, sizeof(int)*total_threads, hipHostMallocDefault);
int *d_match_offset, *d_match_chunkid, *d_stat;
hipMalloc((void**)&d_match_offset,sizeof(int)*total_chunk_num);
hipMalloc((void**)&d_match_chunkid,sizeof(int)*total_chunk_num);
hipMalloc((void**)&d_stat,sizeof(int)*total_threads);
hipMemset(d_match_offset, -1, sizeof(int)*total_chunk_num);
hipMemset(d_match_chunkid, -1, sizeof(int)*total_chunk_num);
hipMemset(d_stat, 0, sizeof(int)*total_threads);
Node *d_ht;
hipMalloc((void**)&d_ht,sizeof(Node)*HASHSIZE);
// file
hipEventSynchronize(event);
hipEventDestroy(event);
Task t = {rsyncFileName, file, new_file, match_chunkid, match_offset, NULL, h_stat, \
d_file, d_new_file, d_match_chunkid, d_match_offset, d_ht, d_stat, \
(int)fileLen, new_file_len, chunk_size, total_chunk_num, total_threads, local_round_num, fd};
taskMap.insert(std::pair<std::string, Task> (rsyncFileName, t));
//printf("server precalcu finished, vector size %d, with content \n",(int)taskMap.size());
reply->set_success(true);
gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout<<"on server, spend " << c_time_use << " us on preparation" << std::endl;
return Status::OK;
}
Status CalcuDiff(ServerContext* context, const FileInfo* request,
MatchTokenReply* reply) override {
if(handled_task_num ==0) gettimeofday(&start,NULL);
int c_time_use;
struct timeval c_start;
struct timeval c_end;
gettimeofday(&c_start,NULL);
int size = request->chunkinfo_size();
int *matchIdArray = (int*)malloc(sizeof(int)*size); // one to one match
memset(matchIdArray, -1, size);
std::map<int, std::set<int> > matchIdMap; // one to more match
//ht
//htwhiletask tchecklist
Node *ht = (Node*)malloc(sizeof(Node)*HASHSIZE);
for(int i = 0;i<HASHSIZE;++i){
ht[i].chunk_id = -1;
ht[i].next = NULL;
}
for (int i = 0; i < size; i++) {
const FileChunkInfo item = request->chunkinfo(i);
const int chunkId = item.chunkid();
const int checksum1 = item.checksum1();
char checksum2[16];
strncpy(checksum2, item.checksum2().c_str(), 16);
if((insert_hashtable(ht, chunkId, checksum1, checksum2, matchIdArray, matchIdMap))==1){}
else printf("insert failed\n");
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout<<"on server, spend " << c_time_use << " us on get checksum list from client" << std::endl;*/
//gettimeofday(&c_start,NULL);
Task t;
// make sure we finish the preparation
while(1){
auto it = taskMap.find(request->filename());
if(it != taskMap.end()){
t = it->second;
break;
}
else{
std::this_thread::yield(); // CalcuDiffyield
}
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout<<"on server, spend " << c_time_use << " us on waiting for preparation to finish" << std::endl;*/
//gettimeofday(&c_start,NULL);
t.ht = ht;
hipMemcpy(t.dHt, ht, sizeof(Node)*HASHSIZE, hipMemcpyHostToDevice); //80us
int nthreads = threads_num;
int nblocks = ceil(float(t.totalThreads) / nthreads);
//std::cout << "total threads " << t.totalThreads << ", threads in a block " << nthreads << ", nblocks " << nblocks << std::endl;
int i = t.roundNum;
hipLaunchKernelGGL(( multiwarp_match), dim3(nblocks), dim3(nthreads), 0, stream[i], t.dHt, t.dFileContent, t.fileLen, t.totalThreads, t.chunkSize, chunk_num,
t.dMatchOffset, t.dMatchChunkid, t.dStat);
// sync point, wait for memcpy asyn finish
hipEvent_t event, event1;
hipEventCreate(&event);
hipEventRecord(event, stream[i]);
while(hipEventQuery(event)==hipErrorNotReady){
std::this_thread::yield();
}
//hipEventSynchronize(event);
hipEventDestroy(event);
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout<<"on server, spend " << c_time_use << " us on copying ht into gpu and gpu computation" << std::endl;*/
//gettimeofday(&c_start,NULL);
int recalcu_threads = ceil(float(t.totalThreads)/recalcu_region_size)-1;
//wait to implement
hipLaunchKernelGGL(( gpu_recalcu), dim3(1), dim3(recalcu_threads), 0, stream[i], t.dHt, t.dFileContent, t.chunkSize, chunk_num, t.dMatchOffset, t.dMatchChunkid, t.dStat, recalcu_region_size);
hipMemcpyAsync(t.stat, t.dStat, sizeof(int)*t.totalThreads,hipMemcpyDeviceToHost,stream[i]);
hipMemcpyAsync(t.matchOffset, t.dMatchOffset, sizeof(int)*t.totalChunkNum,hipMemcpyDeviceToHost,stream[i]);
hipMemcpyAsync(t.matchChunkid, t.dMatchChunkid, sizeof(int)*t.totalChunkNum,hipMemcpyDeviceToHost,stream[i]);
hipEventCreate(&event1);
hipEventRecord(event1, stream[i]);
while(hipEventQuery(event1)==hipErrorNotReady){
std::this_thread::yield();
}
//hipEventSynchronize(event1);
//hipEventDestroy(event1);
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , single rsync file with size " << t.fileLen << ", spend " << c_time_use << " us on gpu first recalcu" << std::endl;*/
//gettimeofday(&c_start,NULL);
for(int i=recalcu_region_size-1; i<t.totalThreads; i += recalcu_region_size){
int t_match_num = t.stat[i];
int j = i+1;
int jump_pos = t.matchOffset[chunk_num*i+t_match_num-1]+chunk_size;
if(t_match_num > 0 && t.stat[j] > 0 && jump_pos > t.matchOffset[chunk_num*j]){
if(i+recalcu_region_size > t.totalThreads){
int last_region_size = t.totalThreads - i;
recalcu(chunk_size, chunk_num, t.stat, jump_pos, t.fileLen, t.totalThreads,
t.file, t.matchOffset, t.matchChunkid, t.ht, j, last_region_size);
}
else{
recalcu(chunk_size, chunk_num, t.stat, jump_pos, t.fileLen, t.totalThreads,
t.file, t.matchOffset, t.matchChunkid, t.ht, j, recalcu_region_size);
}
}
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , single rsync file with size " << t.fileLen << ", spend " << c_time_use << " us on cpu final recalcu" << std::endl;*/
hipFree(t.dMatchChunkid);
hipFree(t.dMatchOffset);
hipFree(t.dHt);
hipFree(t.dStat);
free(t.ht);
buildTask bt = {t.newFileLen, t.chunkSize, t.totalThreads, t.roundNum, t.fd, t.dFileContent, t.dNewFile, t.file, t.newFile, t.stat, t.matchChunkid, t.matchOffset, matchIdArray, matchIdMap, t.totalChunkNum, size};
buildTaskMap[t.fileName] = bt;
//gettimeofday(&c_start,NULL);
std::set<int> matchTokens;
for(int i=0;i<t.totalChunkNum;++i){
if(t.matchChunkid[i] != -1){
int _chunkid = matchIdArray[t.matchChunkid[i]];
if(_chunkid != -1){
matchTokens.insert(_chunkid);
}
else{
std::set<int> idset = matchIdMap[t.matchChunkid[i]];
for(auto it = idset.begin(); it != idset.end(); ++it){
matchTokens.insert(*it);
}
}
}
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , rsync file size " << t.fileLen << ", spend " << c_time_use << " us on pick up tokens need to be send" << std::endl;*/
//gettimeofday(&c_start,NULL);
std::cout << "match token size " << matchTokens.size() << std::endl;
reply->set_filename(t.fileName);
for (auto it = matchTokens.begin(); it != matchTokens.end(); ++it) {
MatchToken *item = reply->add_tokeninfo();
item->set_chunkid(*it);
}
taskMap.erase(request->filename());
gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , rsync file size " << t.fileLen << ", spend " << c_time_use << " us on calcu diff" << std::endl;
handled_calcutask_num++;
gettimeofday(&end,NULL);
if(handled_calcutask_num == 3){
time_use =(end.tv_sec-start.tv_sec)*1000000+(end.tv_usec-start.tv_usec);
std::cout << "8 rsync file with size spend " << time_use << " us on gpu rsyn compute" << std::endl;
}
return Status::OK;
}
Status BuildNewFile(ServerContext* context, const UnmatchChunks* request,
RsyncReply* reply) override {
int c_time_use;
struct timeval c_start;
struct timeval c_end;
gettimeofday(&c_start,NULL);
std::string filename = request->filename();
buildTask t;
// make sure we finish the cpu revise
// whileclient,
while(1){
auto it = buildTaskMap.find(request->filename());
if(it != buildTaskMap.end()){
t = it->second;
buildTaskMap.erase(request->filename());
break;
}
else{
std::this_thread::yield(); // CalcuDiffyield
}
}
//gettimeofday(&c_start,NULL);
int *dmatchIdArray;
hipMalloc((void**)&dmatchIdArray,sizeof(int)*t.totalNewChunkNum);
hipMemcpy(dmatchIdArray, t.matchIdArray, sizeof(int)*t.totalNewChunkNum, hipMemcpyHostToDevice);
int total_multi_match_num = (t.totalNewChunkNum/20);
int *d_multi_match_array;
hipMalloc((void**)&d_multi_match_array,sizeof(int)*total_multi_match_num); // 20
int *multi_match_array = (int*)malloc(sizeof(int)*total_multi_match_num);
int nthreads = threads_num;
int nblocks = ceil(float(t.totalOldChunkNum) / nthreads);
int i = t.roundNum;
int *multi_match_num = (int*)malloc(sizeof(int)*1);
int *d_multi_match_num;
hipMalloc((void**)&d_multi_match_num,sizeof(int)*1);
hipMemset(d_multi_match_num, 0, sizeof(int)*1);
hipLaunchKernelGGL(( oldfile_match_build), dim3(nblocks), dim3(nthreads), 0, stream[i], t.dFileContent, dmatchIdArray, t.chunkSize, chunk_num, t.matchOffset, t.matchChunkid, t.dNewFile, t.totalOldChunkNum, d_multi_match_array, d_multi_match_num);
hipMemcpyAsync(t.newFile, t.dNewFile, sizeof(char)*t.newFileLen,hipMemcpyDeviceToHost,stream[i]);
hipMemcpyAsync(multi_match_array, d_multi_match_array, sizeof(int)*total_multi_match_num, hipMemcpyDeviceToHost,stream[i]);
hipMemcpyAsync(multi_match_num, d_multi_match_num, sizeof(int)*1, hipMemcpyDeviceToHost,stream[i]);
hipEvent_t event;
hipEventCreate(&event);
hipEventRecord(event, stream[i]);
hipEventSynchronize(event);
hipEventDestroy(event);
char chunk_buffer[t.chunkSize];
size_t read_size = 0;
for(int i=0;i<*multi_match_num;++i){
int match_pos = multi_match_array[i];
int old_file_pos = t.matchOffset[match_pos];
std::set<int> idset = t.matchIdMap[t.matchChunkid[match_pos]];
lseek(t.fd, old_file_pos, SEEK_SET);
read_size = read(t.fd, chunk_buffer, t.chunkSize);
for(auto id : idset){
int new_file_pos = id * t.chunkSize;
memcpy(&t.newFile[new_file_pos], chunk_buffer, read_size);
}
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , using gpu old file content and build spend " << c_time_use << " us" << std::endl;*/
//gettimeofday(&c_start,NULL);
int size = request->chunkinfo_size();
for (int i = 0; i < size; i++) {
const UnmatchChunkInfo item = request->chunkinfo(i);
const int pos = item.pos();
memcpy(&t.newFile[pos], item.content().c_str(), item.length());
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , new file content receive and build spend " << c_time_use << " us" << std::endl;*/
//gettimeofday(&c_start,NULL);
std::ofstream outf;
outf.open("construct.txt");
outf.write(t.newFile, t.newFileLen);
outf.close();
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server, construct new file spend " << c_time_use << " us" << std::endl;*/
//gettimeofday(&c_start,NULL);
close(t.fd);
free(t.matchIdArray);
hipFree(dmatchIdArray);
hipFree(t.dFileContent);
hipFree(t.dNewFile);
hipHostFree(t.newFile);
hipHostFree(t.matchChunkid);
hipHostFree(t.matchOffset);
hipHostFree(t.stat);
hipHostFree(t.file);
reply->set_filename(filename);
reply->set_success(true);
gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server, build new file spend " << c_time_use << " us" << std::endl;
handled_task_num++;
gettimeofday(&end,NULL);
if(handled_task_num == 3){
time_use =(end.tv_sec-start.tv_sec)*1000000+(end.tv_usec-start.tv_usec);
std::cout << "8 rsync file with size spend " << time_use << " us on gpu rsyn compute" << std::endl;
}
return Status::OK;
}
}; | e68122d011de9305c450cacee00ca144a8f9853a.cu | #include <pthread.h>
#include <thread>
#include <unistd.h>
#include <cmath>
#include <fcntl.h>
#include <fstream>
#include <string>
#include <vector>
#include <sys/stat.h>
#include <sys/time.h>
#include <map>
#include <queue>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <grpcpp/grpcpp.h>
#include "hash.h"
#include "new_file.h"
#include "recalcu.h"
#include "rsync.grpc.pb.h"
using grpc::Server;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::Status;
using rsync::FileInfo;
using rsync::FileHead;
using rsync::FileChunkInfo;
using rsync::MatchTokenReply;
using rsync::MatchToken;
using rsync::UnmatchChunks;
using rsync::UnmatchChunkInfo;
using rsync::RsyncReply;
using rsync::Rsync;
extern int n_stream;
extern int chunk_size;
extern int threads_num;
extern int cpu_threads_num;
extern int chunk_num;
extern int round_num;
extern int taskNum;
extern int recalcu_region_size;
extern std::queue<Task> cputaskq;
extern std::map<std::string, Task> taskMap;
extern std::map<std::string, buildTask> buildTaskMap;
extern cudaStream_t *stream;
extern int time_use;
extern struct timeval start;
extern struct timeval end;
extern int handled_task_num;
extern int handled_calcutask_num;
class RsyncServiceImpl final : public Rsync::Service {
Status PreCalcu(ServerContext* context, const FileHead* request,
RsyncReply* reply) override {
int c_time_use;
struct timeval c_start;
struct timeval c_end;
gettimeofday(&c_start,NULL);
std::string rsyncFileName = request->filename();
int new_file_len = request->filesize();
chunk_size = request->chunksize();
struct stat statbuf;
int fd;
if((fd = open(rsyncFileName.c_str(), O_RDWR, 0777)) < 0){
fprintf(stderr, "Unable to open rsync file.\n");
exit(1);
}
if (0 > fstat(fd, &statbuf)) printf("file with path %s not found\n",rsyncFileName.c_str());
size_t fileLen = (size_t) statbuf.st_size;
char *d_file, *file;
cudaHostAlloc((void**)&file, sizeof(char)*fileLen,cudaHostAllocDefault);
cudaMalloc((void**)&d_file, sizeof(char)*fileLen);
size_t read_size = 0;
while(read_size < fileLen){
read_size += read(fd, &(file[read_size]), chunk_size);
}
int local_round_num = round_num % n_stream;
// use multistream or not
round_num ++;
cudaMemcpyAsync(d_file, file, sizeof(char)*(fileLen), cudaMemcpyHostToDevice, stream[local_round_num]);
cudaEvent_t event;
cudaEventCreate(&event);
cudaEventRecord(event, stream[local_round_num]);
char *new_file, *d_new_file;
cudaHostAlloc((void**)&new_file, sizeof(char)*new_file_len, cudaHostAllocDefault);
cudaMalloc((void**)&d_new_file, sizeof(char)*new_file_len);
int total_chunk_num = ceil(float(fileLen) / chunk_size);
int total_threads = ceil(float(fileLen) / (chunk_num*chunk_size));
int *match_offset, *match_chunkid, *h_stat;
cudaHostAlloc((void**)&match_offset, sizeof(int)*total_chunk_num, cudaHostAllocDefault);
cudaHostAlloc((void**)&match_chunkid, sizeof(int)*total_chunk_num, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_stat, sizeof(int)*total_threads, cudaHostAllocDefault);
int *d_match_offset, *d_match_chunkid, *d_stat;
cudaMalloc((void**)&d_match_offset,sizeof(int)*total_chunk_num);
cudaMalloc((void**)&d_match_chunkid,sizeof(int)*total_chunk_num);
cudaMalloc((void**)&d_stat,sizeof(int)*total_threads);
cudaMemset(d_match_offset, -1, sizeof(int)*total_chunk_num);
cudaMemset(d_match_chunkid, -1, sizeof(int)*total_chunk_num);
cudaMemset(d_stat, 0, sizeof(int)*total_threads);
Node *d_ht;
cudaMalloc((void**)&d_ht,sizeof(Node)*HASHSIZE);
// 在这里才检测是否传输完成file内容
cudaEventSynchronize(event);
cudaEventDestroy(event);
Task t = {rsyncFileName, file, new_file, match_chunkid, match_offset, NULL, h_stat, \
d_file, d_new_file, d_match_chunkid, d_match_offset, d_ht, d_stat, \
(int)fileLen, new_file_len, chunk_size, total_chunk_num, total_threads, local_round_num, fd};
taskMap.insert(std::pair<std::string, Task> (rsyncFileName, t));
//printf("server precalcu finished, vector size %d, with content \n",(int)taskMap.size());
reply->set_success(true);
gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout<<"on server, spend " << c_time_use << " us on preparation" << std::endl;
return Status::OK;
}
Status CalcuDiff(ServerContext* context, const FileInfo* request,
MatchTokenReply* reply) override {
if(handled_task_num ==0) gettimeofday(&start,NULL);
int c_time_use;
struct timeval c_start;
struct timeval c_end;
gettimeofday(&c_start,NULL);
int size = request->chunkinfo_size();
int *matchIdArray = (int*)malloc(sizeof(int)*size); // one to one match
memset(matchIdArray, -1, size);
std::map<int, std::set<int> > matchIdMap; // one to more match
//这个ht最好在这里分配,为的是为上面的准备工作再多些时间
//如果把ht放在准备工作里做,则必须要先while取出task t,但接收checklist时间依然不会少
Node *ht = (Node*)malloc(sizeof(Node)*HASHSIZE);
for(int i = 0;i<HASHSIZE;++i){
ht[i].chunk_id = -1;
ht[i].next = NULL;
}
for (int i = 0; i < size; i++) {
const FileChunkInfo item = request->chunkinfo(i);
const int chunkId = item.chunkid();
const int checksum1 = item.checksum1();
char checksum2[16];
strncpy(checksum2, item.checksum2().c_str(), 16);
if((insert_hashtable(ht, chunkId, checksum1, checksum2, matchIdArray, matchIdMap))==1){}
else printf("insert failed\n");
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout<<"on server, spend " << c_time_use << " us on get checksum list from client" << std::endl;*/
//gettimeofday(&c_start,NULL);
Task t;
// make sure we finish the preparation
while(1){
auto it = taskMap.find(request->filename());
if(it != taskMap.end()){
t = it->second;
break;
}
else{
std::this_thread::yield(); // CalcuDiff是不是应该多线程,不然这里的yield没有意义啊
}
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout<<"on server, spend " << c_time_use << " us on waiting for preparation to finish" << std::endl;*/
//gettimeofday(&c_start,NULL);
t.ht = ht;
cudaMemcpy(t.dHt, ht, sizeof(Node)*HASHSIZE, cudaMemcpyHostToDevice); //80us
int nthreads = threads_num;
int nblocks = ceil(float(t.totalThreads) / nthreads);
//std::cout << "total threads " << t.totalThreads << ", threads in a block " << nthreads << ", nblocks " << nblocks << std::endl;
int i = t.roundNum;
multiwarp_match<<<nblocks, nthreads, 0, stream[i]>>>(t.dHt, t.dFileContent, t.fileLen, t.totalThreads, t.chunkSize, chunk_num,
t.dMatchOffset, t.dMatchChunkid, t.dStat);
// sync point, wait for memcpy asyn finish
cudaEvent_t event, event1;
cudaEventCreate(&event);
cudaEventRecord(event, stream[i]);
while(cudaEventQuery(event)==cudaErrorNotReady){
std::this_thread::yield();
}
//cudaEventSynchronize(event);
cudaEventDestroy(event);
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout<<"on server, spend " << c_time_use << " us on copying ht into gpu and gpu computation" << std::endl;*/
//gettimeofday(&c_start,NULL);
int recalcu_threads = ceil(float(t.totalThreads)/recalcu_region_size)-1;
//这里线程安排还需要改wait to implement
gpu_recalcu<<<1, recalcu_threads, 0, stream[i]>>>(t.dHt, t.dFileContent, t.chunkSize, chunk_num, t.dMatchOffset, t.dMatchChunkid, t.dStat, recalcu_region_size);
cudaMemcpyAsync(t.stat, t.dStat, sizeof(int)*t.totalThreads,cudaMemcpyDeviceToHost,stream[i]);
cudaMemcpyAsync(t.matchOffset, t.dMatchOffset, sizeof(int)*t.totalChunkNum,cudaMemcpyDeviceToHost,stream[i]);
cudaMemcpyAsync(t.matchChunkid, t.dMatchChunkid, sizeof(int)*t.totalChunkNum,cudaMemcpyDeviceToHost,stream[i]);
cudaEventCreate(&event1);
cudaEventRecord(event1, stream[i]);
while(cudaEventQuery(event1)==cudaErrorNotReady){
std::this_thread::yield();
}
//cudaEventSynchronize(event1);
//cudaEventDestroy(event1);
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , single rsync file with size " << t.fileLen << ", spend " << c_time_use << " us on gpu first recalcu" << std::endl;*/
//gettimeofday(&c_start,NULL);
for(int i=recalcu_region_size-1; i<t.totalThreads; i += recalcu_region_size){
int t_match_num = t.stat[i];
int j = i+1;
int jump_pos = t.matchOffset[chunk_num*i+t_match_num-1]+chunk_size;
if(t_match_num > 0 && t.stat[j] > 0 && jump_pos > t.matchOffset[chunk_num*j]){
if(i+recalcu_region_size > t.totalThreads){
int last_region_size = t.totalThreads - i;
recalcu(chunk_size, chunk_num, t.stat, jump_pos, t.fileLen, t.totalThreads,
t.file, t.matchOffset, t.matchChunkid, t.ht, j, last_region_size);
}
else{
recalcu(chunk_size, chunk_num, t.stat, jump_pos, t.fileLen, t.totalThreads,
t.file, t.matchOffset, t.matchChunkid, t.ht, j, recalcu_region_size);
}
}
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , single rsync file with size " << t.fileLen << ", spend " << c_time_use << " us on cpu final recalcu" << std::endl;*/
cudaFree(t.dMatchChunkid);
cudaFree(t.dMatchOffset);
cudaFree(t.dHt);
cudaFree(t.dStat);
free(t.ht);
buildTask bt = {t.newFileLen, t.chunkSize, t.totalThreads, t.roundNum, t.fd, t.dFileContent, t.dNewFile, t.file, t.newFile, t.stat, t.matchChunkid, t.matchOffset, matchIdArray, matchIdMap, t.totalChunkNum, size};
buildTaskMap[t.fileName] = bt;
//gettimeofday(&c_start,NULL);
std::set<int> matchTokens;
for(int i=0;i<t.totalChunkNum;++i){
if(t.matchChunkid[i] != -1){
int _chunkid = matchIdArray[t.matchChunkid[i]];
if(_chunkid != -1){
matchTokens.insert(_chunkid);
}
else{
std::set<int> idset = matchIdMap[t.matchChunkid[i]];
for(auto it = idset.begin(); it != idset.end(); ++it){
matchTokens.insert(*it);
}
}
}
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , rsync file size " << t.fileLen << ", spend " << c_time_use << " us on pick up tokens need to be send" << std::endl;*/
//gettimeofday(&c_start,NULL);
std::cout << "match token size " << matchTokens.size() << std::endl;
reply->set_filename(t.fileName);
for (auto it = matchTokens.begin(); it != matchTokens.end(); ++it) {
MatchToken *item = reply->add_tokeninfo();
item->set_chunkid(*it);
}
taskMap.erase(request->filename());
gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , rsync file size " << t.fileLen << ", spend " << c_time_use << " us on calcu diff" << std::endl;
handled_calcutask_num++;
gettimeofday(&end,NULL);
if(handled_calcutask_num == 3){
time_use =(end.tv_sec-start.tv_sec)*1000000+(end.tv_usec-start.tv_usec);
std::cout << "8 rsync file with size spend " << time_use << " us on gpu rsyn compute" << std::endl;
}
return Status::OK;
}
Status BuildNewFile(ServerContext* context, const UnmatchChunks* request,
RsyncReply* reply) override {
int c_time_use;
struct timeval c_start;
struct timeval c_end;
gettimeofday(&c_start,NULL);
std::string filename = request->filename();
buildTask t;
// make sure we finish the cpu revise
// 我这边的while都是为异步处理同一个client的多个文件而准备的,同步就没有意义了
while(1){
auto it = buildTaskMap.find(request->filename());
if(it != buildTaskMap.end()){
t = it->second;
buildTaskMap.erase(request->filename());
break;
}
else{
std::this_thread::yield(); // CalcuDiff是不是应该多线程,不然这里的yield没有意义啊
}
}
//gettimeofday(&c_start,NULL);
int *dmatchIdArray;
cudaMalloc((void**)&dmatchIdArray,sizeof(int)*t.totalNewChunkNum);
cudaMemcpy(dmatchIdArray, t.matchIdArray, sizeof(int)*t.totalNewChunkNum, cudaMemcpyHostToDevice);
int total_multi_match_num = (t.totalNewChunkNum/20);
int *d_multi_match_array;
cudaMalloc((void**)&d_multi_match_array,sizeof(int)*total_multi_match_num); // 这个20参数是我定的
int *multi_match_array = (int*)malloc(sizeof(int)*total_multi_match_num);
int nthreads = threads_num;
int nblocks = ceil(float(t.totalOldChunkNum) / nthreads);
int i = t.roundNum;
int *multi_match_num = (int*)malloc(sizeof(int)*1);
int *d_multi_match_num;
cudaMalloc((void**)&d_multi_match_num,sizeof(int)*1);
cudaMemset(d_multi_match_num, 0, sizeof(int)*1);
oldfile_match_build<<<nblocks, nthreads, 0, stream[i]>>>(t.dFileContent, dmatchIdArray, t.chunkSize, chunk_num, t.matchOffset, t.matchChunkid, t.dNewFile, t.totalOldChunkNum, d_multi_match_array, d_multi_match_num);
cudaMemcpyAsync(t.newFile, t.dNewFile, sizeof(char)*t.newFileLen,cudaMemcpyDeviceToHost,stream[i]);
cudaMemcpyAsync(multi_match_array, d_multi_match_array, sizeof(int)*total_multi_match_num, cudaMemcpyDeviceToHost,stream[i]);
cudaMemcpyAsync(multi_match_num, d_multi_match_num, sizeof(int)*1, cudaMemcpyDeviceToHost,stream[i]);
cudaEvent_t event;
cudaEventCreate(&event);
cudaEventRecord(event, stream[i]);
cudaEventSynchronize(event);
cudaEventDestroy(event);
char chunk_buffer[t.chunkSize];
size_t read_size = 0;
for(int i=0;i<*multi_match_num;++i){
int match_pos = multi_match_array[i];
int old_file_pos = t.matchOffset[match_pos];
std::set<int> idset = t.matchIdMap[t.matchChunkid[match_pos]];
lseek(t.fd, old_file_pos, SEEK_SET);
read_size = read(t.fd, chunk_buffer, t.chunkSize);
for(auto id : idset){
int new_file_pos = id * t.chunkSize;
memcpy(&t.newFile[new_file_pos], chunk_buffer, read_size);
}
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , using gpu old file content and build spend " << c_time_use << " us" << std::endl;*/
//gettimeofday(&c_start,NULL);
int size = request->chunkinfo_size();
for (int i = 0; i < size; i++) {
const UnmatchChunkInfo item = request->chunkinfo(i);
const int pos = item.pos();
memcpy(&t.newFile[pos], item.content().c_str(), item.length());
}
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server , new file content receive and build spend " << c_time_use << " us" << std::endl;*/
//gettimeofday(&c_start,NULL);
std::ofstream outf;
outf.open("construct.txt");
outf.write(t.newFile, t.newFileLen);
outf.close();
/*gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server, construct new file spend " << c_time_use << " us" << std::endl;*/
//gettimeofday(&c_start,NULL);
close(t.fd);
free(t.matchIdArray);
cudaFree(dmatchIdArray);
cudaFree(t.dFileContent);
cudaFree(t.dNewFile);
cudaFreeHost(t.newFile);
cudaFreeHost(t.matchChunkid);
cudaFreeHost(t.matchOffset);
cudaFreeHost(t.stat);
cudaFreeHost(t.file);
reply->set_filename(filename);
reply->set_success(true);
gettimeofday(&c_end,NULL);
c_time_use=(c_end.tv_sec-c_start.tv_sec)*1000000+(c_end.tv_usec-c_start.tv_usec);
std::cout << "on server, build new file spend " << c_time_use << " us" << std::endl;
handled_task_num++;
gettimeofday(&end,NULL);
if(handled_task_num == 3){
time_use =(end.tv_sec-start.tv_sec)*1000000+(end.tv_usec-start.tv_usec);
std::cout << "8 rsync file with size spend " << time_use << " us on gpu rsyn compute" << std::endl;
}
return Status::OK;
}
}; |
d50a9610a881c0dd34ae557da77783fd28a96af6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/kld_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void KLD_fwd_gpu(const int N,
const Dtype* p, const Dtype* q, Dtype* loss) {
CUDA_KERNEL_LOOP(index, N) {
loss[index] = log(max(p[index],Dtype(FLT_MIN)));
loss[index] -= log(max(q[index],Dtype(FLT_MIN)));
}
}
template <typename Dtype>
void KLDLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* prob_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
Dtype* temp = bottom[0]->mutable_gpu_diff();
int N = bottom[0]->count();
Dtype loss = 0;
hipLaunchKernelGGL(( KLD_fwd_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(N)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N,prob_data,label,temp);
caffe_gpu_dot(N,temp,label,&loss);
top[0]->mutable_cpu_data()[0] = -loss /
get_normalizer(normalization_, Dtype(N));
}
template <typename Dtype>
void KLDLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to target distribution yet.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
int N = bottom[0]->count();
caffe_gpu_sub(N,prob_data,label,bottom_diff);
Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, Dtype(N));
caffe_gpu_scal(N, loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(KLDLossLayer);
} // namespace caffe | d50a9610a881c0dd34ae557da77783fd28a96af6.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/kld_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void KLD_fwd_gpu(const int N,
const Dtype* p, const Dtype* q, Dtype* loss) {
CUDA_KERNEL_LOOP(index, N) {
loss[index] = log(max(p[index],Dtype(FLT_MIN)));
loss[index] -= log(max(q[index],Dtype(FLT_MIN)));
}
}
template <typename Dtype>
void KLDLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* prob_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
Dtype* temp = bottom[0]->mutable_gpu_diff();
int N = bottom[0]->count();
Dtype loss = 0;
KLD_fwd_gpu<Dtype><<<CAFFE_GET_BLOCKS(N),CAFFE_CUDA_NUM_THREADS>>>(
N,prob_data,label,temp);
caffe_gpu_dot(N,temp,label,&loss);
top[0]->mutable_cpu_data()[0] = -loss /
get_normalizer(normalization_, Dtype(N));
}
template <typename Dtype>
void KLDLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to target distribution yet.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
int N = bottom[0]->count();
caffe_gpu_sub(N,prob_data,label,bottom_diff);
Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, Dtype(N));
caffe_gpu_scal(N, loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(KLDLossLayer);
} // namespace caffe |
401932673a8898024c5c57c9bacc36bc08d5745e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/utilities.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/binary_search.h>
#include <thrust/count.h>
#include <thrust/distance.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Average string byte-length threshold for deciding character-level vs row-level parallel
* algorithm.
*
* This value was determined by running the replace string scalar benchmark against different
* power-of-2 string lengths and observing the point at which the performance only improved for
* all trials.
*/
constexpr size_type BYTES_PER_VALID_ROW_THRESHOLD = 64;
/**
* @brief Function logic for the row-level parallelism replace API.
*
* This will perform a replace operation on each string.
*/
struct replace_row_parallel_fn {
column_device_view const d_strings;
string_view const d_target;
string_view const d_repl;
int32_t const max_repl;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
const char* in_ptr = d_str.data();
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
auto max_n = (max_repl < 0) ? d_str.length() : max_repl;
auto bytes = d_str.size_bytes();
auto position = d_str.find(d_target);
size_type last_pos = 0;
while ((position >= 0) && (max_n > 0)) {
if (out_ptr) {
auto const curr_pos = d_str.byte_offset(position);
out_ptr = copy_and_increment(out_ptr, in_ptr + last_pos, curr_pos - last_pos); // copy left
out_ptr = copy_string(out_ptr, d_repl); // copy repl
last_pos = curr_pos + d_target.size_bytes();
} else {
bytes += d_repl.size_bytes() - d_target.size_bytes();
}
position = d_str.find(d_target, position + d_target.size_bytes());
--max_n;
}
if (out_ptr) // copy whats left (or right depending on your point of view)
memcpy(out_ptr, in_ptr + last_pos, d_str.size_bytes() - last_pos);
else
d_offsets[idx] = bytes;
}
};
/**
* @brief Functor for detecting falsely-overlapped target positions.
*
* This functor examines target positions that have been flagged as potentially overlapped by
* a previous target position and identifies the overlaps that are false. A false overlap can occur
* when a target position is overlapped by another target position that is itself overlapped.
*
* For example, a target string of "+++" and string to search of "++++++" will generate 4 potential
* target positions at char offsets 0 through 3. The targets at offsets 1, 2, and 3 will be flagged
* as potential overlaps since a prior target position is within range of the target string length.
* The targets at offset 1 and 2 are true overlaps, since the footprint of the valid target at
* offset 0 overlaps with them. The target at offset 3 is not truly overlapped because it is only
* overlapped by invalid targets, targets that were themselves overlapped by a valid target.
*/
struct target_false_overlap_filter_fn {
size_type const* const d_overlap_pos_indices{};
size_type const* const d_target_positions{};
size_type const target_size{};
__device__ bool operator()(size_type overlap_idx) const
{
if (overlap_idx == 0) {
// The first overlap has no prior overlap to chain, so it should be kept as an overlap.
return false;
}
size_type const this_pos_idx = d_overlap_pos_indices[overlap_idx];
// Searching backwards for the first target position index of an overlap that is not adjacent
// to its overlap predecessor. The result will be the first overlap in this chain of overlaps.
size_type first_overlap_idx = overlap_idx;
size_type first_pos_idx = this_pos_idx;
while (first_overlap_idx > 0) {
size_type prev_pos_idx = d_overlap_pos_indices[--first_overlap_idx];
if (prev_pos_idx + 1 != first_pos_idx) { break; }
first_pos_idx = prev_pos_idx;
}
// The prior target position to the first overlapped position in the chain is a valid target.
size_type valid_pos_idx = first_pos_idx - 1;
size_type valid_pos = d_target_positions[valid_pos_idx];
// Walk forward from this valid target. Any targets within the range of this valid one are true
// overlaps. The first overlap beyond the range of this valid target is another valid target,
// as it was falsely overlapped by a target that was itself overlapped. Repeat until we get to
// the overlapped position being queried by this call.
while (valid_pos_idx < this_pos_idx) {
size_type next_pos_idx = valid_pos_idx + 1;
size_type next_pos = d_target_positions[next_pos_idx];
// Every target position within the range of a valid target position is a true overlap.
while (next_pos < valid_pos + target_size) {
if (next_pos_idx == this_pos_idx) { return false; }
next_pos = d_target_positions[++next_pos_idx];
}
valid_pos_idx = next_pos_idx;
valid_pos = next_pos;
}
// This was overlapped only by false overlaps and therefore is a valid target.
return true;
}
};
/**
* @brief Functor for replacing each target string with the replacement string.
*
* This will perform a replace operation at each target position.
*/
struct target_replacer_fn {
device_span<size_type const> const d_target_positions;
char const* const d_in_chars{};
char* const d_out_chars{};
size_type const target_size{};
string_view const d_repl;
int32_t const in_char_offset = 0;
__device__ void operator()(size_type input_idx) const
{
// Calculate the adjustment from input index to output index for each prior target position.
auto const repl_size = d_repl.size_bytes();
auto const idx_delta_per_pos = repl_size - target_size;
// determine the number of target positions at or before this character position
size_type const* next_target_pos_ptr = thrust::upper_bound(
thrust::seq, d_target_positions.begin(), d_target_positions.end(), input_idx);
size_type const num_prev_targets = next_target_pos_ptr - d_target_positions.data();
size_type output_idx = input_idx - in_char_offset + idx_delta_per_pos * num_prev_targets;
if (num_prev_targets == 0) {
// not within a target string
d_out_chars[output_idx] = d_in_chars[input_idx];
} else {
// check if this input position is within a target string
size_type const prev_target_pos = *(next_target_pos_ptr - 1);
size_type target_idx = input_idx - prev_target_pos;
if (target_idx < target_size) {
// within the target string, so the original calculation was off by one target string
output_idx -= idx_delta_per_pos;
// Copy the corresponding byte from the replacement string. If the replacement string is
// larger than the target string then the thread reading the last target byte is
// responsible for copying the remainder of the replacement string.
if (target_idx < repl_size) {
d_out_chars[output_idx++] = d_repl.data()[target_idx++];
if (target_idx == target_size) {
memcpy(d_out_chars + output_idx, d_repl.data() + target_idx, repl_size - target_idx);
}
}
} else {
// not within a target string
d_out_chars[output_idx] = d_in_chars[input_idx];
}
}
}
};
/**
* @brief Filter target positions that are overlapped by other, valid target positions.
*
* This performs an in-place modification of the target positions to remove any target positions
* that are overlapped by other, valid target positions. For example, if the target string is "++"
* and the string to search is "+++" then there will be two potential targets at character offsets
* 0 and 1. The target at offset 0 is valid and overlaps the target at offset 1, invalidating the
* target at offset 1.
*
* @param[in,out] d_target_positions Potential target positions to filter in-place.
* @param[in] target_count Number of potential target positions.
* @param[in] target_size Size of the target string in bytes.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_overlap_target_positions(size_type* d_target_positions,
size_type target_count,
size_type target_size,
rmm::cuda_stream_view stream)
{
auto overlap_detector = [d_target_positions, target_size] __device__(size_type pos_idx) -> bool {
return (pos_idx > 0)
? d_target_positions[pos_idx] - d_target_positions[pos_idx - 1] < target_size
: false;
};
// count the potential number of overlapped target positions
size_type overlap_count =
thrust::count_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(target_count),
overlap_detector);
if (overlap_count == 0) { return target_count; }
// create a vector indexing the potential overlapped target positions
rmm::device_uvector<size_type> potential_overlapped_pos_indices(overlap_count, stream);
auto d_potential_overlapped_pos_indices = potential_overlapped_pos_indices.data();
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(target_count),
d_potential_overlapped_pos_indices,
overlap_detector);
// filter out the false overlaps that are actually valid
rmm::device_uvector<size_type> overlapped_pos_indices(overlap_count, stream);
auto d_overlapped_pos_indices = overlapped_pos_indices.data();
auto overlap_end =
thrust::remove_copy_if(rmm::exec_policy(stream),
d_potential_overlapped_pos_indices,
d_potential_overlapped_pos_indices + overlap_count,
thrust::make_counting_iterator<size_type>(0),
d_overlapped_pos_indices,
target_false_overlap_filter_fn{
d_potential_overlapped_pos_indices, d_target_positions, target_size});
overlap_count = cudf::distance(d_overlapped_pos_indices, overlap_end);
// In-place remove any target positions that are overlapped by valid target positions
auto target_pos_end = thrust::remove_if(
rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_count,
thrust::make_counting_iterator<size_type>(0),
[d_overlapped_pos_indices, overlap_count] __device__(size_type target_position_idx) -> bool {
return thrust::binary_search(thrust::seq,
d_overlapped_pos_indices,
d_overlapped_pos_indices + overlap_count,
target_position_idx);
});
return cudf::distance(d_target_positions, target_pos_end);
}
/**
* @brief Filter target positions to remove any invalid target positions.
*
* This performs an in-place modification of the target positions to remove any target positions
* that are invalid, either by the target string overlapping a row boundary or being overlapped by
* another valid target string.
*
* @param[in,out] target_positions Potential target positions to filter in-place.
* @param[in] d_offsets_span Memory range encompassing the string column offsets.
* @param[in] target_size Size of the target string in bytes.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_false_target_positions(rmm::device_uvector<size_type>& target_positions,
device_span<int32_t const> d_offsets_span,
size_type target_size,
rmm::cuda_stream_view stream)
{
// In-place remove any positions for target strings that crossed string boundaries.
auto d_target_positions = target_positions.data();
auto target_pos_end =
thrust::remove_if(rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_positions.size(),
[d_offsets_span, target_size] __device__(size_type target_pos) -> bool {
// find the end of the string containing the start of this target
size_type const* offset_ptr = thrust::upper_bound(
thrust::seq, d_offsets_span.begin(), d_offsets_span.end(), target_pos);
return target_pos + target_size > *offset_ptr;
});
auto const target_count = cudf::distance(d_target_positions, target_pos_end);
if (target_count == 0) { return 0; }
// Filter out target positions that are the result of overlapping target matches.
return (target_count > 1)
? filter_overlap_target_positions(d_target_positions, target_count, target_size, stream)
: target_count;
}
/**
* @brief Filter target positions beyond the maximum target replacements per row limit.
*
* This performs an in-place modification of the target positions to remove any target positions
* corresponding to targets that should not be replaced due to the maximum target replacement per
* row limit.
*
* @param[in,out] target_positions Target positions to filter in-place.
* @param[in] target_count Number of target positions.
* @param[in] d_offsets_span Memory range encompassing the string column offsets.
* @param[in] max_repl_per_row Maximum target replacements per row limit.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_maxrepl_target_positions(size_type* d_target_positions,
size_type target_count,
device_span<int32_t const> d_offsets_span,
size_type max_repl_per_row,
rmm::cuda_stream_view stream)
{
auto pos_to_row_fn = [d_offsets_span] __device__(size_type target_pos) -> size_type {
auto upper_bound =
thrust::upper_bound(thrust::seq, d_offsets_span.begin(), d_offsets_span.end(), target_pos);
return thrust::distance(d_offsets_span.begin(), upper_bound);
};
// compute the match count per row for each target position
rmm::device_uvector<size_type> match_counts(target_count, stream);
auto d_match_counts = match_counts.data();
thrust::inclusive_scan_by_key(
rmm::exec_policy(stream),
thrust::make_transform_iterator(d_target_positions, pos_to_row_fn),
thrust::make_transform_iterator(d_target_positions + target_count, pos_to_row_fn),
thrust::make_constant_iterator<size_type>(1),
d_match_counts);
// In-place remove any positions that exceed the per-row match limit
auto target_pos_end =
thrust::remove_if(rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_count,
d_match_counts,
[max_repl_per_row] __device__(size_type match_count) -> bool {
return match_count > max_repl_per_row;
});
return cudf::distance(d_target_positions, target_pos_end);
}
/**
* @brief Scalar string replacement using a character-level parallel algorithm.
*
* Replaces occurrences of the target string with the replacement string using an algorithm with
* character-level parallelism. This algorithm will perform well when the strings in the string
* column are relatively long.
* @see BYTES_PER_VALID_ROW_THRESHOLD
*
* @param strings String column to search for target strings.
* @param chars_start Offset of the first character in the string column.
* @param chars_end Offset beyond the last character in the string column to search.
* @param d_target String to search for within the string column.
* @param d_repl Replacement string if target string is found.
* @param maxrepl Maximum times to replace if target appears multiple times in a string.
* @param stream CUDA stream to use for device operations
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column.
*/
std::unique_ptr<column> replace_char_parallel(strings_column_view const& strings,
size_type chars_start,
size_type chars_end,
string_view const& d_target,
string_view const& d_repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
auto const d_in_chars = strings.chars().data<char>();
auto const chars_bytes = chars_end - chars_start;
auto const target_size = d_target.size_bytes();
// detect a target match at the specified byte position
device_span<char const> const d_chars_span(d_in_chars, chars_end);
auto target_detector = [d_chars_span, d_target] __device__(size_type char_idx) {
auto target_size = d_target.size_bytes();
auto target_ptr = d_chars_span.begin() + char_idx;
return target_ptr + target_size <= d_chars_span.end() &&
d_target.compare(target_ptr, target_size) == 0;
};
// Count target string matches across all character positions, ignoring string boundaries and
// overlapping target strings. This may produce false-positives.
size_type target_count = thrust::count_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
thrust::make_counting_iterator<size_type>(chars_end),
target_detector);
if (target_count == 0) {
// nothing to replace, copy the input column
return std::make_unique<cudf::column>(strings.parent(), stream, mr);
}
// create a vector of the potential target match positions
rmm::device_uvector<size_type> target_positions(target_count, stream);
auto d_target_positions = target_positions.data();
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
thrust::make_counting_iterator<size_type>(chars_end),
d_target_positions,
target_detector);
device_span<int32_t const> d_offsets_span(d_offsets, offset_count);
if (target_size > 1) {
target_count =
filter_false_target_positions(target_positions, d_offsets_span, target_size, stream);
if (target_count == 0) {
// nothing to replace, copy the input column
return std::make_unique<cudf::column>(strings.parent(), stream, mr);
}
}
// filter out any target positions that exceed the per-row match limit
if (maxrepl > 0 && target_count > maxrepl) {
target_count = filter_maxrepl_target_positions(
d_target_positions, target_count, d_offsets_span, maxrepl, stream);
}
// build the offsets column
auto offsets_column = make_numeric_column(
data_type{type_id::INT32}, offset_count, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
auto delta_per_target = d_repl.size_bytes() - target_size;
device_span<size_type const> d_target_positions_span(d_target_positions, target_count);
auto offsets_update_fn =
[d_target_positions_span, delta_per_target, chars_start] __device__(int32_t offset) -> int32_t {
// determine the number of target positions occurring before this offset
size_type const* next_target_pos_ptr = thrust::lower_bound(
thrust::seq, d_target_positions_span.begin(), d_target_positions_span.end(), offset);
size_type num_prev_targets =
thrust::distance(d_target_positions_span.data(), next_target_pos_ptr);
return offset - chars_start + delta_per_target * num_prev_targets;
};
thrust::transform(rmm::exec_policy(stream),
d_offsets_span.begin(),
d_offsets_span.end(),
offsets_view.begin<int32_t>(),
offsets_update_fn);
// build the characters column
auto chars_column = create_chars_child_column(
strings_count, chars_bytes + (delta_per_target * target_count), stream, mr);
auto d_out_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
chars_bytes,
target_replacer_fn{
d_target_positions_span, d_in_chars, d_out_chars, target_size, d_repl, chars_start});
// free the target positions buffer as it is no longer needed
(void)target_positions.release();
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
/**
* @brief Scalar string replacement using a row-level parallel algorithm.
*
* Replaces occurrences of the target string with the replacement string using an algorithm with
* row-level parallelism. This algorithm will perform well when the strings in the string
* column are relatively short.
* @see BYTES_PER_VALID_ROW_THRESHOLD
*
* @param strings String column to search for target strings.
* @param d_target String to search for within the string column.
* @param d_repl Replacement string if target string is found.
* @param maxrepl Maximum times to replace if target appears multiple times in a string.
* @param stream CUDA stream to use for device operations
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column.
*/
std::unique_ptr<column> replace_row_parallel(strings_column_view const& strings,
string_view const& d_target,
string_view const& d_repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto d_strings = column_device_view::create(strings.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_row_parallel_fn{*d_strings, d_target, d_repl, maxrepl}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
} // namespace
/**
* @copydoc cudf::strings::detail::replace(strings_column_view const&, string_scalar const&,
* string_scalar const&, int32_t, rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
*/
template <>
std::unique_ptr<column> replace<replace_algorithm::AUTO>(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
// determine range of characters in the base column
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
size_type const chars_start =
(strings.offset() == 0)
? 0
: cudf::detail::get_value<int32_t>(strings.offsets(), strings.offset(), stream);
size_type const chars_end = (offset_count == strings.offsets().size())
? strings.chars_size()
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset() + strings_count, stream);
size_type const chars_bytes = chars_end - chars_start;
auto const avg_bytes_per_row = chars_bytes / ::max(strings_count - strings.null_count(), 1);
return (avg_bytes_per_row < BYTES_PER_VALID_ROW_THRESHOLD)
? replace_row_parallel(strings, d_target, d_repl, maxrepl, stream, mr)
: replace_char_parallel(
strings, chars_start, chars_end, d_target, d_repl, maxrepl, stream, mr);
}
template <>
std::unique_ptr<column> replace<replace_algorithm::CHAR_PARALLEL>(
strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
// determine range of characters in the base column
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
size_type chars_start = (strings.offset() == 0) ? 0
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset(), stream);
size_type chars_end = (offset_count == strings.offsets().size())
? strings.chars_size()
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset() + strings_count, stream);
return replace_char_parallel(
strings, chars_start, chars_end, d_target, d_repl, maxrepl, stream, mr);
}
template <>
std::unique_ptr<column> replace<replace_algorithm::ROW_PARALLEL>(
strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
return replace_row_parallel(strings, d_target, d_repl, maxrepl, stream, mr);
}
namespace {
/**
* @brief Function logic for the replace_slice API.
*
* This will perform a replace_slice operation on each string.
*/
struct replace_slice_fn {
column_device_view const d_strings;
string_view const d_repl;
size_type const start;
size_type const stop;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
auto const length = d_str.length();
char const* in_ptr = d_str.data();
auto const begin = d_str.byte_offset(((start < 0) || (start > length) ? length : start));
auto const end = d_str.byte_offset(((stop < 0) || (stop > length) ? length : stop));
if (d_chars) {
char* out_ptr = d_chars + d_offsets[idx];
out_ptr = copy_and_increment(out_ptr, in_ptr, begin); // copy beginning
out_ptr = copy_string(out_ptr, d_repl); // insert replacement
out_ptr = copy_and_increment(out_ptr, // copy end
in_ptr + end,
d_str.size_bytes() - end);
} else {
d_offsets[idx] = d_str.size_bytes() + d_repl.size_bytes() - (end - begin);
}
}
};
} // namespace
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
if (stop > 0) CUDF_EXPECTS(start <= stop, "Parameter start must be less than or equal to stop.");
string_view d_repl(repl.data(), repl.size());
auto d_strings = column_device_view::create(strings.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_slice_fn{*d_strings, d_repl, start, stop}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
namespace {
/**
* @brief Function logic for the replace_multi API.
*
* This will perform the multi-replace operation on each string.
*/
struct replace_multi_fn {
column_device_view const d_strings;
column_device_view const d_targets;
column_device_view const d_repls;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
char const* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
size_type spos = 0;
size_type lpos = 0;
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
// check each character against each target
while (spos < d_str.size_bytes()) {
for (int tgt_idx = 0; tgt_idx < d_targets.size(); ++tgt_idx) {
auto const d_tgt = d_targets.element<string_view>(tgt_idx);
if ((d_tgt.size_bytes() <= (d_str.size_bytes() - spos)) && // check fit
(d_tgt.compare(in_ptr + spos, d_tgt.size_bytes()) == 0)) // and match
{
auto const d_repl = (d_repls.size() == 1) ? d_repls.element<string_view>(0)
: d_repls.element<string_view>(tgt_idx);
bytes += d_repl.size_bytes() - d_tgt.size_bytes();
if (out_ptr) {
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = spos + d_tgt.size_bytes();
}
spos += d_tgt.size_bytes() - 1;
break;
}
}
++spos;
}
if (out_ptr) // copy remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = bytes;
}
};
} // namespace
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(((targets.size() > 0) && (targets.null_count() == 0)),
"Parameters targets must not be empty and must not have nulls");
CUDF_EXPECTS(((repls.size() > 0) && (repls.null_count() == 0)),
"Parameters repls must not be empty and must not have nulls");
if (repls.size() > 1)
CUDF_EXPECTS(repls.size() == targets.size(), "Sizes for targets and repls must match");
auto d_strings = column_device_view::create(strings.parent(), stream);
auto d_targets = column_device_view::create(targets.parent(), stream);
auto d_repls = column_device_view::create(repls.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_multi_fn{*d_strings, *d_targets, *d_repls}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
std::unique_ptr<column> replace_nulls(strings_column_view const& strings,
string_scalar const& repl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0), [d_strings, d_repl] __device__(size_type idx) {
return d_strings.is_null(idx) ? d_repl.size_bytes()
: d_strings.element<string_view>(idx).size_bytes();
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
auto const bytes =
cudf::detail::get_value<int32_t>(offsets_column->view(), strings_count, stream);
auto chars_column = strings::detail::create_chars_child_column(strings_count, bytes, stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_repl, d_offsets, d_chars] __device__(size_type idx) {
string_view d_str = d_repl;
if (!d_strings.is_null(idx)) d_str = d_strings.element<string_view>(idx);
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
0,
rmm::device_buffer{0, stream, mr},
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, target, repl, maxrepl, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_slice(strings, repl, start, stop, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, targets, repls, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| 401932673a8898024c5c57c9bacc36bc08d5745e.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/utilities.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/binary_search.h>
#include <thrust/count.h>
#include <thrust/distance.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Average string byte-length threshold for deciding character-level vs row-level parallel
* algorithm.
*
* This value was determined by running the replace string scalar benchmark against different
* power-of-2 string lengths and observing the point at which the performance only improved for
* all trials.
*/
constexpr size_type BYTES_PER_VALID_ROW_THRESHOLD = 64;
/**
* @brief Function logic for the row-level parallelism replace API.
*
* This will perform a replace operation on each string.
*/
struct replace_row_parallel_fn {
column_device_view const d_strings;
string_view const d_target;
string_view const d_repl;
int32_t const max_repl;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
const char* in_ptr = d_str.data();
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
auto max_n = (max_repl < 0) ? d_str.length() : max_repl;
auto bytes = d_str.size_bytes();
auto position = d_str.find(d_target);
size_type last_pos = 0;
while ((position >= 0) && (max_n > 0)) {
if (out_ptr) {
auto const curr_pos = d_str.byte_offset(position);
out_ptr = copy_and_increment(out_ptr, in_ptr + last_pos, curr_pos - last_pos); // copy left
out_ptr = copy_string(out_ptr, d_repl); // copy repl
last_pos = curr_pos + d_target.size_bytes();
} else {
bytes += d_repl.size_bytes() - d_target.size_bytes();
}
position = d_str.find(d_target, position + d_target.size_bytes());
--max_n;
}
if (out_ptr) // copy whats left (or right depending on your point of view)
memcpy(out_ptr, in_ptr + last_pos, d_str.size_bytes() - last_pos);
else
d_offsets[idx] = bytes;
}
};
/**
* @brief Functor for detecting falsely-overlapped target positions.
*
* This functor examines target positions that have been flagged as potentially overlapped by
* a previous target position and identifies the overlaps that are false. A false overlap can occur
* when a target position is overlapped by another target position that is itself overlapped.
*
* For example, a target string of "+++" and string to search of "++++++" will generate 4 potential
* target positions at char offsets 0 through 3. The targets at offsets 1, 2, and 3 will be flagged
* as potential overlaps since a prior target position is within range of the target string length.
* The targets at offset 1 and 2 are true overlaps, since the footprint of the valid target at
* offset 0 overlaps with them. The target at offset 3 is not truly overlapped because it is only
* overlapped by invalid targets, targets that were themselves overlapped by a valid target.
*/
struct target_false_overlap_filter_fn {
size_type const* const d_overlap_pos_indices{};
size_type const* const d_target_positions{};
size_type const target_size{};
__device__ bool operator()(size_type overlap_idx) const
{
if (overlap_idx == 0) {
// The first overlap has no prior overlap to chain, so it should be kept as an overlap.
return false;
}
size_type const this_pos_idx = d_overlap_pos_indices[overlap_idx];
// Searching backwards for the first target position index of an overlap that is not adjacent
// to its overlap predecessor. The result will be the first overlap in this chain of overlaps.
size_type first_overlap_idx = overlap_idx;
size_type first_pos_idx = this_pos_idx;
while (first_overlap_idx > 0) {
size_type prev_pos_idx = d_overlap_pos_indices[--first_overlap_idx];
if (prev_pos_idx + 1 != first_pos_idx) { break; }
first_pos_idx = prev_pos_idx;
}
// The prior target position to the first overlapped position in the chain is a valid target.
size_type valid_pos_idx = first_pos_idx - 1;
size_type valid_pos = d_target_positions[valid_pos_idx];
// Walk forward from this valid target. Any targets within the range of this valid one are true
// overlaps. The first overlap beyond the range of this valid target is another valid target,
// as it was falsely overlapped by a target that was itself overlapped. Repeat until we get to
// the overlapped position being queried by this call.
while (valid_pos_idx < this_pos_idx) {
size_type next_pos_idx = valid_pos_idx + 1;
size_type next_pos = d_target_positions[next_pos_idx];
// Every target position within the range of a valid target position is a true overlap.
while (next_pos < valid_pos + target_size) {
if (next_pos_idx == this_pos_idx) { return false; }
next_pos = d_target_positions[++next_pos_idx];
}
valid_pos_idx = next_pos_idx;
valid_pos = next_pos;
}
// This was overlapped only by false overlaps and therefore is a valid target.
return true;
}
};
/**
* @brief Functor for replacing each target string with the replacement string.
*
* This will perform a replace operation at each target position.
*/
struct target_replacer_fn {
device_span<size_type const> const d_target_positions;
char const* const d_in_chars{};
char* const d_out_chars{};
size_type const target_size{};
string_view const d_repl;
int32_t const in_char_offset = 0;
__device__ void operator()(size_type input_idx) const
{
// Calculate the adjustment from input index to output index for each prior target position.
auto const repl_size = d_repl.size_bytes();
auto const idx_delta_per_pos = repl_size - target_size;
// determine the number of target positions at or before this character position
size_type const* next_target_pos_ptr = thrust::upper_bound(
thrust::seq, d_target_positions.begin(), d_target_positions.end(), input_idx);
size_type const num_prev_targets = next_target_pos_ptr - d_target_positions.data();
size_type output_idx = input_idx - in_char_offset + idx_delta_per_pos * num_prev_targets;
if (num_prev_targets == 0) {
// not within a target string
d_out_chars[output_idx] = d_in_chars[input_idx];
} else {
// check if this input position is within a target string
size_type const prev_target_pos = *(next_target_pos_ptr - 1);
size_type target_idx = input_idx - prev_target_pos;
if (target_idx < target_size) {
// within the target string, so the original calculation was off by one target string
output_idx -= idx_delta_per_pos;
// Copy the corresponding byte from the replacement string. If the replacement string is
// larger than the target string then the thread reading the last target byte is
// responsible for copying the remainder of the replacement string.
if (target_idx < repl_size) {
d_out_chars[output_idx++] = d_repl.data()[target_idx++];
if (target_idx == target_size) {
memcpy(d_out_chars + output_idx, d_repl.data() + target_idx, repl_size - target_idx);
}
}
} else {
// not within a target string
d_out_chars[output_idx] = d_in_chars[input_idx];
}
}
}
};
/**
* @brief Filter target positions that are overlapped by other, valid target positions.
*
* This performs an in-place modification of the target positions to remove any target positions
* that are overlapped by other, valid target positions. For example, if the target string is "++"
* and the string to search is "+++" then there will be two potential targets at character offsets
* 0 and 1. The target at offset 0 is valid and overlaps the target at offset 1, invalidating the
* target at offset 1.
*
* @param[in,out] d_target_positions Potential target positions to filter in-place.
* @param[in] target_count Number of potential target positions.
* @param[in] target_size Size of the target string in bytes.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_overlap_target_positions(size_type* d_target_positions,
size_type target_count,
size_type target_size,
rmm::cuda_stream_view stream)
{
auto overlap_detector = [d_target_positions, target_size] __device__(size_type pos_idx) -> bool {
return (pos_idx > 0)
? d_target_positions[pos_idx] - d_target_positions[pos_idx - 1] < target_size
: false;
};
// count the potential number of overlapped target positions
size_type overlap_count =
thrust::count_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(target_count),
overlap_detector);
if (overlap_count == 0) { return target_count; }
// create a vector indexing the potential overlapped target positions
rmm::device_uvector<size_type> potential_overlapped_pos_indices(overlap_count, stream);
auto d_potential_overlapped_pos_indices = potential_overlapped_pos_indices.data();
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(target_count),
d_potential_overlapped_pos_indices,
overlap_detector);
// filter out the false overlaps that are actually valid
rmm::device_uvector<size_type> overlapped_pos_indices(overlap_count, stream);
auto d_overlapped_pos_indices = overlapped_pos_indices.data();
auto overlap_end =
thrust::remove_copy_if(rmm::exec_policy(stream),
d_potential_overlapped_pos_indices,
d_potential_overlapped_pos_indices + overlap_count,
thrust::make_counting_iterator<size_type>(0),
d_overlapped_pos_indices,
target_false_overlap_filter_fn{
d_potential_overlapped_pos_indices, d_target_positions, target_size});
overlap_count = cudf::distance(d_overlapped_pos_indices, overlap_end);
// In-place remove any target positions that are overlapped by valid target positions
auto target_pos_end = thrust::remove_if(
rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_count,
thrust::make_counting_iterator<size_type>(0),
[d_overlapped_pos_indices, overlap_count] __device__(size_type target_position_idx) -> bool {
return thrust::binary_search(thrust::seq,
d_overlapped_pos_indices,
d_overlapped_pos_indices + overlap_count,
target_position_idx);
});
return cudf::distance(d_target_positions, target_pos_end);
}
/**
* @brief Filter target positions to remove any invalid target positions.
*
* This performs an in-place modification of the target positions to remove any target positions
* that are invalid, either by the target string overlapping a row boundary or being overlapped by
* another valid target string.
*
* @param[in,out] target_positions Potential target positions to filter in-place.
* @param[in] d_offsets_span Memory range encompassing the string column offsets.
* @param[in] target_size Size of the target string in bytes.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_false_target_positions(rmm::device_uvector<size_type>& target_positions,
device_span<int32_t const> d_offsets_span,
size_type target_size,
rmm::cuda_stream_view stream)
{
// In-place remove any positions for target strings that crossed string boundaries.
auto d_target_positions = target_positions.data();
auto target_pos_end =
thrust::remove_if(rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_positions.size(),
[d_offsets_span, target_size] __device__(size_type target_pos) -> bool {
// find the end of the string containing the start of this target
size_type const* offset_ptr = thrust::upper_bound(
thrust::seq, d_offsets_span.begin(), d_offsets_span.end(), target_pos);
return target_pos + target_size > *offset_ptr;
});
auto const target_count = cudf::distance(d_target_positions, target_pos_end);
if (target_count == 0) { return 0; }
// Filter out target positions that are the result of overlapping target matches.
return (target_count > 1)
? filter_overlap_target_positions(d_target_positions, target_count, target_size, stream)
: target_count;
}
/**
* @brief Filter target positions beyond the maximum target replacements per row limit.
*
* This performs an in-place modification of the target positions to remove any target positions
* corresponding to targets that should not be replaced due to the maximum target replacement per
* row limit.
*
* @param[in,out] target_positions Target positions to filter in-place.
* @param[in] target_count Number of target positions.
* @param[in] d_offsets_span Memory range encompassing the string column offsets.
* @param[in] max_repl_per_row Maximum target replacements per row limit.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_maxrepl_target_positions(size_type* d_target_positions,
size_type target_count,
device_span<int32_t const> d_offsets_span,
size_type max_repl_per_row,
rmm::cuda_stream_view stream)
{
auto pos_to_row_fn = [d_offsets_span] __device__(size_type target_pos) -> size_type {
auto upper_bound =
thrust::upper_bound(thrust::seq, d_offsets_span.begin(), d_offsets_span.end(), target_pos);
return thrust::distance(d_offsets_span.begin(), upper_bound);
};
// compute the match count per row for each target position
rmm::device_uvector<size_type> match_counts(target_count, stream);
auto d_match_counts = match_counts.data();
thrust::inclusive_scan_by_key(
rmm::exec_policy(stream),
thrust::make_transform_iterator(d_target_positions, pos_to_row_fn),
thrust::make_transform_iterator(d_target_positions + target_count, pos_to_row_fn),
thrust::make_constant_iterator<size_type>(1),
d_match_counts);
// In-place remove any positions that exceed the per-row match limit
auto target_pos_end =
thrust::remove_if(rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_count,
d_match_counts,
[max_repl_per_row] __device__(size_type match_count) -> bool {
return match_count > max_repl_per_row;
});
return cudf::distance(d_target_positions, target_pos_end);
}
/**
* @brief Scalar string replacement using a character-level parallel algorithm.
*
* Replaces occurrences of the target string with the replacement string using an algorithm with
* character-level parallelism. This algorithm will perform well when the strings in the string
* column are relatively long.
* @see BYTES_PER_VALID_ROW_THRESHOLD
*
* @param strings String column to search for target strings.
* @param chars_start Offset of the first character in the string column.
* @param chars_end Offset beyond the last character in the string column to search.
* @param d_target String to search for within the string column.
* @param d_repl Replacement string if target string is found.
* @param maxrepl Maximum times to replace if target appears multiple times in a string.
* @param stream CUDA stream to use for device operations
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column.
*/
std::unique_ptr<column> replace_char_parallel(strings_column_view const& strings,
size_type chars_start,
size_type chars_end,
string_view const& d_target,
string_view const& d_repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
auto const d_in_chars = strings.chars().data<char>();
auto const chars_bytes = chars_end - chars_start;
auto const target_size = d_target.size_bytes();
// detect a target match at the specified byte position
device_span<char const> const d_chars_span(d_in_chars, chars_end);
auto target_detector = [d_chars_span, d_target] __device__(size_type char_idx) {
auto target_size = d_target.size_bytes();
auto target_ptr = d_chars_span.begin() + char_idx;
return target_ptr + target_size <= d_chars_span.end() &&
d_target.compare(target_ptr, target_size) == 0;
};
// Count target string matches across all character positions, ignoring string boundaries and
// overlapping target strings. This may produce false-positives.
size_type target_count = thrust::count_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
thrust::make_counting_iterator<size_type>(chars_end),
target_detector);
if (target_count == 0) {
// nothing to replace, copy the input column
return std::make_unique<cudf::column>(strings.parent(), stream, mr);
}
// create a vector of the potential target match positions
rmm::device_uvector<size_type> target_positions(target_count, stream);
auto d_target_positions = target_positions.data();
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
thrust::make_counting_iterator<size_type>(chars_end),
d_target_positions,
target_detector);
device_span<int32_t const> d_offsets_span(d_offsets, offset_count);
if (target_size > 1) {
target_count =
filter_false_target_positions(target_positions, d_offsets_span, target_size, stream);
if (target_count == 0) {
// nothing to replace, copy the input column
return std::make_unique<cudf::column>(strings.parent(), stream, mr);
}
}
// filter out any target positions that exceed the per-row match limit
if (maxrepl > 0 && target_count > maxrepl) {
target_count = filter_maxrepl_target_positions(
d_target_positions, target_count, d_offsets_span, maxrepl, stream);
}
// build the offsets column
auto offsets_column = make_numeric_column(
data_type{type_id::INT32}, offset_count, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
auto delta_per_target = d_repl.size_bytes() - target_size;
device_span<size_type const> d_target_positions_span(d_target_positions, target_count);
auto offsets_update_fn =
[d_target_positions_span, delta_per_target, chars_start] __device__(int32_t offset) -> int32_t {
// determine the number of target positions occurring before this offset
size_type const* next_target_pos_ptr = thrust::lower_bound(
thrust::seq, d_target_positions_span.begin(), d_target_positions_span.end(), offset);
size_type num_prev_targets =
thrust::distance(d_target_positions_span.data(), next_target_pos_ptr);
return offset - chars_start + delta_per_target * num_prev_targets;
};
thrust::transform(rmm::exec_policy(stream),
d_offsets_span.begin(),
d_offsets_span.end(),
offsets_view.begin<int32_t>(),
offsets_update_fn);
// build the characters column
auto chars_column = create_chars_child_column(
strings_count, chars_bytes + (delta_per_target * target_count), stream, mr);
auto d_out_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
chars_bytes,
target_replacer_fn{
d_target_positions_span, d_in_chars, d_out_chars, target_size, d_repl, chars_start});
// free the target positions buffer as it is no longer needed
(void)target_positions.release();
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
/**
* @brief Scalar string replacement using a row-level parallel algorithm.
*
* Replaces occurrences of the target string with the replacement string using an algorithm with
* row-level parallelism. This algorithm will perform well when the strings in the string
* column are relatively short.
* @see BYTES_PER_VALID_ROW_THRESHOLD
*
* @param strings String column to search for target strings.
* @param d_target String to search for within the string column.
* @param d_repl Replacement string if target string is found.
* @param maxrepl Maximum times to replace if target appears multiple times in a string.
* @param stream CUDA stream to use for device operations
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column.
*/
std::unique_ptr<column> replace_row_parallel(strings_column_view const& strings,
string_view const& d_target,
string_view const& d_repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto d_strings = column_device_view::create(strings.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_row_parallel_fn{*d_strings, d_target, d_repl, maxrepl}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
} // namespace
/**
* @copydoc cudf::strings::detail::replace(strings_column_view const&, string_scalar const&,
* string_scalar const&, int32_t, rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
*/
template <>
std::unique_ptr<column> replace<replace_algorithm::AUTO>(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
// determine range of characters in the base column
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
size_type const chars_start =
(strings.offset() == 0)
? 0
: cudf::detail::get_value<int32_t>(strings.offsets(), strings.offset(), stream);
size_type const chars_end = (offset_count == strings.offsets().size())
? strings.chars_size()
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset() + strings_count, stream);
size_type const chars_bytes = chars_end - chars_start;
auto const avg_bytes_per_row = chars_bytes / std::max(strings_count - strings.null_count(), 1);
return (avg_bytes_per_row < BYTES_PER_VALID_ROW_THRESHOLD)
? replace_row_parallel(strings, d_target, d_repl, maxrepl, stream, mr)
: replace_char_parallel(
strings, chars_start, chars_end, d_target, d_repl, maxrepl, stream, mr);
}
template <>
std::unique_ptr<column> replace<replace_algorithm::CHAR_PARALLEL>(
strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
// determine range of characters in the base column
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
size_type chars_start = (strings.offset() == 0) ? 0
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset(), stream);
size_type chars_end = (offset_count == strings.offsets().size())
? strings.chars_size()
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset() + strings_count, stream);
return replace_char_parallel(
strings, chars_start, chars_end, d_target, d_repl, maxrepl, stream, mr);
}
template <>
std::unique_ptr<column> replace<replace_algorithm::ROW_PARALLEL>(
strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
return replace_row_parallel(strings, d_target, d_repl, maxrepl, stream, mr);
}
namespace {
/**
* @brief Function logic for the replace_slice API.
*
* This will perform a replace_slice operation on each string.
*/
struct replace_slice_fn {
column_device_view const d_strings;
string_view const d_repl;
size_type const start;
size_type const stop;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
auto const length = d_str.length();
char const* in_ptr = d_str.data();
auto const begin = d_str.byte_offset(((start < 0) || (start > length) ? length : start));
auto const end = d_str.byte_offset(((stop < 0) || (stop > length) ? length : stop));
if (d_chars) {
char* out_ptr = d_chars + d_offsets[idx];
out_ptr = copy_and_increment(out_ptr, in_ptr, begin); // copy beginning
out_ptr = copy_string(out_ptr, d_repl); // insert replacement
out_ptr = copy_and_increment(out_ptr, // copy end
in_ptr + end,
d_str.size_bytes() - end);
} else {
d_offsets[idx] = d_str.size_bytes() + d_repl.size_bytes() - (end - begin);
}
}
};
} // namespace
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
if (stop > 0) CUDF_EXPECTS(start <= stop, "Parameter start must be less than or equal to stop.");
string_view d_repl(repl.data(), repl.size());
auto d_strings = column_device_view::create(strings.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_slice_fn{*d_strings, d_repl, start, stop}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
namespace {
/**
* @brief Function logic for the replace_multi API.
*
* This will perform the multi-replace operation on each string.
*/
struct replace_multi_fn {
column_device_view const d_strings;
column_device_view const d_targets;
column_device_view const d_repls;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
char const* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
size_type spos = 0;
size_type lpos = 0;
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
// check each character against each target
while (spos < d_str.size_bytes()) {
for (int tgt_idx = 0; tgt_idx < d_targets.size(); ++tgt_idx) {
auto const d_tgt = d_targets.element<string_view>(tgt_idx);
if ((d_tgt.size_bytes() <= (d_str.size_bytes() - spos)) && // check fit
(d_tgt.compare(in_ptr + spos, d_tgt.size_bytes()) == 0)) // and match
{
auto const d_repl = (d_repls.size() == 1) ? d_repls.element<string_view>(0)
: d_repls.element<string_view>(tgt_idx);
bytes += d_repl.size_bytes() - d_tgt.size_bytes();
if (out_ptr) {
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = spos + d_tgt.size_bytes();
}
spos += d_tgt.size_bytes() - 1;
break;
}
}
++spos;
}
if (out_ptr) // copy remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = bytes;
}
};
} // namespace
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(((targets.size() > 0) && (targets.null_count() == 0)),
"Parameters targets must not be empty and must not have nulls");
CUDF_EXPECTS(((repls.size() > 0) && (repls.null_count() == 0)),
"Parameters repls must not be empty and must not have nulls");
if (repls.size() > 1)
CUDF_EXPECTS(repls.size() == targets.size(), "Sizes for targets and repls must match");
auto d_strings = column_device_view::create(strings.parent(), stream);
auto d_targets = column_device_view::create(targets.parent(), stream);
auto d_repls = column_device_view::create(repls.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_multi_fn{*d_strings, *d_targets, *d_repls}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
std::unique_ptr<column> replace_nulls(strings_column_view const& strings,
string_scalar const& repl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(repl.is_valid(), "Parameter repl must be valid.");
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0), [d_strings, d_repl] __device__(size_type idx) {
return d_strings.is_null(idx) ? d_repl.size_bytes()
: d_strings.element<string_view>(idx).size_bytes();
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
auto const bytes =
cudf::detail::get_value<int32_t>(offsets_column->view(), strings_count, stream);
auto chars_column = strings::detail::create_chars_child_column(strings_count, bytes, stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_repl, d_offsets, d_chars] __device__(size_type idx) {
string_view d_str = d_repl;
if (!d_strings.is_null(idx)) d_str = d_strings.element<string_view>(idx);
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
0,
rmm::device_buffer{0, stream, mr},
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, target, repl, maxrepl, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_slice(strings, repl, start, stop, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, targets, repls, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
369a974bed9be9ceefd428bbd84273e21a259265.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to [email protected] //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: [email protected] //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <assert.h>
#include "header.h"
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
__global__ void tzetar_kernel(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*speed )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1]
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5;
double btuz, ac2u, uzik1;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
xvel = us[k][j][i];
yvel = vs[k][j][i];
zvel = ws[k][j][i];
ac = speed[k][j][i];
ac2u = ac*ac;
r1 = rhs[k][0][j][i];
r2 = rhs[k][1][j][i];
r3 = rhs[k][2][j][i];
r4 = rhs[k][3][j][i];
r5 = rhs[k][4][j][i];
uzik1 = u[k][0][j][i];
btuz = bt * uzik1;
t1 = btuz/ac * (r4 + r5);
t2 = r3 + t1;
t3 = btuz * (r4 - r5);
rhs[k][0][j][i] = t2;
rhs[k][1][j][i] = -uzik1*r2 + xvel*t2;
rhs[k][2][j][i] = uzik1*r1 + yvel*t2;
rhs[k][3][j][i] = zvel*t2 + t3;
rhs[k][4][j][i] = uzik1*(-xvel*r2 + yvel*r1) +
qs[k][j][i]*t2 + c2iv*ac2u*t1 + zvel*t3;
}
}
}
}
void tzetar()
{
if (timeron) timer_start(t_tzetar);
hipLaunchKernelGGL(( tzetar_kernel) , dim3(gridDim_), dim3(blockDim_) , 0, 0,
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_speed[device], dev_rhs[device]
);
if (timeron) timer_stop(t_tzetar);
}
| 369a974bed9be9ceefd428bbd84273e21a259265.cu | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to [email protected] //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: [email protected] //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <assert.h>
#include "header.h"
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
__global__ void tzetar_kernel(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*speed )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1]
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5;
double btuz, ac2u, uzik1;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
xvel = us[k][j][i];
yvel = vs[k][j][i];
zvel = ws[k][j][i];
ac = speed[k][j][i];
ac2u = ac*ac;
r1 = rhs[k][0][j][i];
r2 = rhs[k][1][j][i];
r3 = rhs[k][2][j][i];
r4 = rhs[k][3][j][i];
r5 = rhs[k][4][j][i];
uzik1 = u[k][0][j][i];
btuz = bt * uzik1;
t1 = btuz/ac * (r4 + r5);
t2 = r3 + t1;
t3 = btuz * (r4 - r5);
rhs[k][0][j][i] = t2;
rhs[k][1][j][i] = -uzik1*r2 + xvel*t2;
rhs[k][2][j][i] = uzik1*r1 + yvel*t2;
rhs[k][3][j][i] = zvel*t2 + t3;
rhs[k][4][j][i] = uzik1*(-xvel*r2 + yvel*r1) +
qs[k][j][i]*t2 + c2iv*ac2u*t1 + zvel*t3;
}
}
}
}
void tzetar()
{
if (timeron) timer_start(t_tzetar);
tzetar_kernel <<< gridDim_, blockDim_ >>> (
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_speed[device], dev_rhs[device]
);
if (timeron) timer_stop(t_tzetar);
}
|
aaea288e44feab78a8b9f83a1443d7ab9cf9b7d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudaBubbleSort.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
#include <stdio.h>
#include <vector>
#include <limits>
#include <algorithm>
#include <Windows.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <iostream>
__host__ int64 GetTimeMs64()
{
/* Windows */
FILETIME ft;
LARGE_INTEGER li;
/* Get the amount of 100 nano seconds intervals elapsed
* since January 1, 1601 (UTC) and copy it
* to a LARGE_INTEGER structure. */
GetSystemTimeAsFileTime(&ft);
li.LowPart = ft.dwLowDateTime;
li.HighPart = ft.dwHighDateTime;
uint64 ret = li.QuadPart;
ret -= 116444736000000000LL;
/* Convert from file time to UNIX epoch time. */
ret /= 10000; /* From 100 nano seconds (10^-7)
to 1 millisecond (10^-3) intervals */
return ret;
}
/*
__global__ void sumAndGetThreshold(float *a, float *aCopy,
int* ind, float* negSum,
int* lock, int size,
int* cuttingIdx, float *posSum)
{
int elem = threadIdx.x + blockDim.x * blockIdx.x;
while(elem < size)
{
aCopy[elem] = a[elem];
ind[elem] = elem;
elem = elem + blockDim.x * gridDim.x;
}
__syncthreads();
float cacheFirst = 0;
float cacheSecond = 0;
float cacheThird = 0;
int cacheFirstInd;
int cacheSecondInd;
int cacheThirdInd ;
for (int j = 0; j < size/2+1; j++)
{
int i = (threadIdx.x + blockDim.x*blockIdx.x)*2;
while(i+1 < size)
{
cacheFirst = aCopy[i];
cacheSecond = aCopy[i+1];
cacheFirstInd = ind[i];
cacheSecondInd = ind[i+1];
if(cacheFirst < cacheSecond)
{
// Weight
float temp = cacheFirst;
aCopy[i] = cacheSecond;
aCopy[i+1] = temp;
// Indirection
int tempInd = cacheFirstInd;
ind[i] = cacheSecondInd;
ind[i+1] = tempInd;
}
i += (blockDim.x*gridDim.x)*2;
}
__syncthreads();
i = (threadIdx.x + blockDim.x*blockIdx.x)*2 +1;
while(i+1 < size)
{
cacheFirst = aCopy[i];
cacheSecond = aCopy[i+1];
cacheFirstInd = ind[i];
cacheSecondInd = ind[i+1];
if(cacheFirst < cacheSecond)
{
// Weight
float temp = cacheFirst;
aCopy[i] = cacheSecond;
aCopy[i+1] = temp;
// Indirection
int tempInd = cacheFirstInd;
ind[i] = cacheSecondInd;
ind[i+1] = tempInd;
}
i += (blockDim.x*gridDim.x)*2;
}
}
__syncthreads();
__shared__ int thatPoint;
__shared__ float cachePos[1024];
__shared__ float cacheNeg[1024];
__shared__ float sumNegShared;
sumNegShared = 0.0;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float tempNeg = 0.0;
float tempPos = 0.0;
while(tid < size)
{
float aCopyTemp = aCopy[tid];
if(aCopyTemp < 0.0)
tempNeg += aCopyTemp;
//else
// tempPos += aCopyTemp;
tid += blockDim.x*gridDim.x;
}
//cachePos[cacheIndex] = tempPos;
cacheNeg[cacheIndex] = tempNeg;
__syncthreads();
int NewI = blockDim.x/2;
while(NewI!=0)
{
if(cacheIndex < NewI)
{
//cachePos[cacheIndex] += cachePos[cacheIndex+NewI];
cacheNeg[cacheIndex] += cacheNeg[cacheIndex+NewI];
}
__syncthreads();
NewI/=2;
}
if(cacheIndex == 0)
{
//*posSum = (float)cachePos[0];
*negSum = cacheNeg[0];
sumNegShared = cacheNeg[0];
}
__syncthreads();
// Buscar el primer indice donde queda anulada la suma negativa.
tid = blockDim.x * blockIdx.x + threadIdx.x;
thatPoint = size;
while(tid < size-1)
{
float firstFloat = aCopy[tid];
float secondFloat = aCopy[tid+1];
if(firstFloat > 0 && secondFloat < 0)
{
thatPoint = tid; // Solo uno puede entrar aqu
*cuttingIdx = tid;
}
tid += blockDim.x*gridDim.x;
}
__syncthreads();
//__shared__ float cachePos[1024];
//__shared__ float sumNegShared;
__shared__ bool breakFlux;
breakFlux = false;
sumNegShared = *negSum;
// En un proceso iterativo y redux obtengo el indice de corte.
int valuesToSum = *cuttingIdx;
int miniBlockSize = (valuesToSum+1)/blockDim.x+1;
int finBlock = valuesToSum - miniBlockSize*threadIdx.x;
int iniBlock = valuesToSum - miniBlockSize*(threadIdx.x+1);
float parSum = 0.0;
for(int indice = finBlock; indice > iniBlock && ! breakFlux; indice--)
{
if(indice < 0) continue;
parSum += a[ind[indice]];
// Quizas no deberia estar aqui
if(parSum > -sumNegShared && threadIdx.x == 0)
{
*cuttingIdx = indice;
breakFlux = true;
}
}
// Si ya hemos acabado, volvemos. No es lo normal, quizas conviene quitarlo
// para ir mas rapido en la mayoria de los casos.
__syncthreads();
if(breakFlux) return;
// Esto podria sobrar
if(threadIdx.x == 0) *posSum = parSum;
// Almacenamos las sumas parciales en el array de cache
// Ordenamos el array a la inversa... asi funciona todo
// el algoritmo que teniamos ya en fucnionamiento.
//int cacheIndex = threadIdx.x;
cachePos[cacheIndex] = parSum;
__shared__ float sumAccumulated;
__shared__ int idxAccumulated;
__shared__ bool refined;
__shared__ bool cutIdx;
idxAccumulated = 0;
sumAccumulated = 0.0;
refined = false;
cutIdx = false;
float tempSum = 0.0;
int restValuesToSum = valuesToSum;
while(!refined)
{
// inicializacion de cache.
int threadReposition = threadIdx.x-idxAccumulated;
cachePos[threadIdx.x] = 0.0;
if(threadReposition >= 0)
{
cachePos[threadReposition] = parSum;
}
__syncthreads();
cutIdx = false;
int offset = 1;
int particion = offset*2;
while(!cutIdx && offset <= restValuesToSum)
{
int secondTempIdx = threadReposition+offset;
if(threadReposition >= 0 && threadReposition%particion == 0)
{
tempSum = cachePos[threadReposition] + cachePos[secondTempIdx];
if(threadReposition == 0 && tempSum + sumAccumulated > -sumNegShared)
{
cutIdx = true;
sumAccumulated += cachePos[threadReposition];
idxAccumulated += offset;
}
else
{
cachePos[threadReposition] = tempSum;
}
}
__syncthreads();
offset = particion;
particion = offset*2;
}
__syncthreads();
restValuesToSum = valuesToSum - idxAccumulated;
if(threadReposition == 0 && (sumAccumulated > -sumNegShared || restValuesToSum <= 0))
{
refined = true;
}
}
__syncthreads();
if(cacheIndex == 0)
{
*posSum = sumAccumulated;
*cuttingIdx = *cuttingIdx - idxAccumulated*miniBlockSize;
}
}
*/
__global__ void sort(float *a, float *aCopy, int* ind, float* negSum, int* lock, int size, int* cuttingIdx, float *posSum)
{
int elem = threadIdx.x + blockDim.x * blockIdx.x;
while(elem < size)
{
aCopy[elem] = a[elem];
ind[elem] = elem;
elem = elem + blockDim.x * gridDim.x;
}
__syncthreads();
float cacheFirst = 0;
float cacheSecond = 0;
float cacheThird = 0;
int cacheFirstInd;
int cacheSecondInd;
int cacheThirdInd ;
for (int j = 0; j < size/2+1 ; j++)
{
int i = (threadIdx.x + blockDim.x * blockIdx.x)*2;
while(i+1 < size)
{
cacheFirst = aCopy[i];
cacheSecond = aCopy[i+1];
cacheFirstInd = ind[i];
cacheSecondInd = ind[i+1];
if(cacheFirst < cacheSecond)
{
// Weight
float temp = cacheFirst;
aCopy[i] = cacheSecond;
aCopy[i+1] = temp;
// Indirection
int tempInd = cacheFirstInd;
ind[i] = cacheSecondInd;
ind[i+1] = tempInd;
}
i += (blockDim.x*gridDim.x)*2;
}
__syncthreads();
i = (threadIdx.x + blockDim.x*blockIdx.x)*2 +1;
while(i+1 < size)
{
cacheFirst = aCopy[i];
cacheSecond = aCopy[i+1];
cacheFirstInd = ind[i];
cacheSecondInd = ind[i+1];
if(cacheFirst < cacheSecond)
{
// Weight
float temp = cacheFirst;
aCopy[i] = cacheSecond;
aCopy[i+1] = temp;
// Indirection
int tempInd = cacheFirstInd;
ind[i] = cacheSecondInd;
ind[i+1] = tempInd;
}
i += (blockDim.x*gridDim.x)*2;
}
}
__syncthreads();
__shared__ int thatPoint;
__shared__ float cacheNeg[threadsPerOnlyOneBlock];
__shared__ float sumNegShared;
sumNegShared = 0.0;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float tempNeg = 0.0;
float tempPos = 0.0;
while(tid < size)
{
float aCopyTemp = aCopy[tid];
if(aCopyTemp < 0.0)
tempNeg += aCopyTemp;
tid += blockDim.x*gridDim.x;
}
cacheNeg[cacheIndex] = tempNeg;
__syncthreads();
int NewI = blockDim.x/2;
while(NewI!=0)
{
if(cacheIndex < NewI)
{
cacheNeg[cacheIndex] += cacheNeg[cacheIndex+NewI];
}
__syncthreads();
NewI/=2;
}
if(cacheIndex == 0)
{
*negSum = cacheNeg[0];
sumNegShared = cacheNeg[0];
}
__syncthreads();
// Buscar el primer indice donde queda anulada la suma negativa.
tid = blockDim.x * blockIdx.x + threadIdx.x;
thatPoint = size;
while(tid < size-1)
{
float firstFloat = aCopy[tid];
float secondFloat = aCopy[tid+1];
if(firstFloat > 0 && secondFloat < 0)
{
thatPoint = tid; // Solo uno puede entrar aqu
*cuttingIdx = tid;
}
tid += blockDim.x*gridDim.x;
}
}
__global__ void getBalancedThreshold(float *a, int* ind, float* negSum, int size, int* cuttingIdx, float *posSum)
{
__shared__ float cachePos[threadsPerOnlyOneBlock];
__shared__ float sumNegShared;
__shared__ bool breakFlux;
breakFlux = false;
sumNegShared = *negSum;
// En un proceso iterativo y redux obtengo el indice de corte.
int valuesToSum = *cuttingIdx;
int miniBlockSize = (valuesToSum+1)/blockDim.x+1;
int finBlock = valuesToSum - miniBlockSize*threadIdx.x;
int iniBlock = valuesToSum - miniBlockSize*(threadIdx.x+1);
float parSum = 0.0;
for(int indice = finBlock; indice > iniBlock && ! breakFlux; indice--)
{
if(indice < 0) continue;
parSum += a[ind[indice]];
// Quizas no deberia estar aqui
if(parSum > -sumNegShared && threadIdx.x == 0)
{
*cuttingIdx = indice;
breakFlux = true;
}
}
// Si ya hemos acabado, volvemos. No es lo normal, quizas conviene quitarlo
// para ir mas rapido en la mayoria de los casos.
__syncthreads();
if(breakFlux) return;
// Esto podria sobrar
if(threadIdx.x == 0) *posSum = parSum;
// Almacenamos las sumas parciales en el array de cache
// Ordenamos el array a la inversa... asi funciona todo
// el algoritmo que teniamos ya en fucnionamiento.
int cacheIndex = threadIdx.x;
cachePos[cacheIndex] = parSum;
__shared__ float sumAccumulated;
__shared__ int idxAccumulated;
__shared__ bool refined;
__shared__ bool cutIdx;
idxAccumulated = 0;
sumAccumulated = 0.0;
refined = false;
cutIdx = false;
float tempSum = 0.0;
int restValuesToSum = valuesToSum;
while(!refined)
{
// inicializacion de cache.
int threadReposition = threadIdx.x-idxAccumulated;
cachePos[threadIdx.x] = 0.0;
if(threadReposition >= 0)
{
cachePos[threadReposition] = parSum;
}
__syncthreads();
cutIdx = false;
int offset = 1;
int particion = offset*2;
while(!cutIdx && offset <= restValuesToSum)
{
int secondTempIdx = threadReposition+offset;
if(threadReposition >= 0 && threadReposition%particion == 0)
{
tempSum = cachePos[threadReposition] + cachePos[secondTempIdx];
if(threadReposition == 0 && tempSum + sumAccumulated > -sumNegShared)
{
cutIdx = true;
sumAccumulated += cachePos[threadReposition];
idxAccumulated += offset;
}
else
{
cachePos[threadReposition] = tempSum;
}
}
__syncthreads();
offset = particion;
particion = offset*2;
}
__syncthreads();
restValuesToSum = valuesToSum - idxAccumulated;
if(threadReposition == 0 && (sumAccumulated > -sumNegShared || restValuesToSum <= 0))
{
refined = true;
}
}
__syncthreads();
if(cacheIndex == 0)
{
*posSum = sumAccumulated;
*cuttingIdx = *cuttingIdx - idxAccumulated*miniBlockSize;
}
}
__global__ void swapOnKernel(int *a, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x * 2;
int cacheFirst;
int cacheSecond;
int cacheThird;
for (int j = 0; j < size/2 + 1; j++)
{
i = blockDim.x * blockIdx.x + threadIdx.x * 2;
while(i < size)
{
if(i+1 < size) {
cacheFirst = a[i];
cacheSecond = a[i+1];
if(cacheFirst > cacheSecond) {
int temp = cacheFirst;
a[i] = cacheSecond;
cacheSecond = a[i+1] = temp;
}
}
__syncthreads();
if(i+2 < size) {
cacheThird = a[i+2];
if(cacheSecond > cacheThird) {
int temp = cacheSecond;
a[i+1] = cacheThird;
a[i+2] = temp;
}
}
i += (blockDim.x * gridDim.x)*2;
}
__syncthreads();
}
}
__host__ void bubbleSort(float arr[], int n) {
bool swapped = true;
int j = 0;
float tmp;
while (swapped) {
swapped = false;
j++;
for (int i = 0; i < n - j; i++) {
if (arr[i] > arr[i + 1]) {
tmp = arr[i];
arr[i] = arr[i + 1];
arr[i + 1] = tmp;
swapped = true;
}
}
}
}
int sortAndGetThreashold(float* weights, int* indirection, int* threshold, int size, bool verbose)
{
// Create timer
hipEvent_t start, stop;
float time = 0.0;
if(verbose)
{
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
}
float *dev_aCopy = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for one vectors.
cudaStatus = hipMalloc((void**)&dev_aCopy, size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
int maxThreads = threadsPerBlock;
int computingThreads = size/2;
if(computingThreads > maxThreads)
computingThreads = maxThreads;
float* negSum;
float* posSum;
int* lock;
cudaStatus = hipMalloc((void**)&negSum, sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc negSum failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&posSum, sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc posSum failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&lock, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc lock failed!");
goto Error;
}
int hostLock = 0;
cudaStatus = hipMemcpy(lock, &hostLock, sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy hostLock failed!");
goto Error;
}
// Realizamos la ordenacion del array de pesos y lo codificamos en dev_ind
hipLaunchKernelGGL(( sort), dim3(1), dim3(computingThreads), 0, 0, weights, dev_aCopy, indirection, negSum, lock, size, threshold , posSum);
if(verbose)
{
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "sort fallo: %s\n", hipGetErrorString(cudaStatus));
fflush(0);
return cudaStatus;
}
}
// Obtenemos el indice de corte
hipLaunchKernelGGL(( getBalancedThreshold), dim3(1), dim3(computingThreads), 0, 0, weights, indirection, negSum, size, threshold, posSum);
if(verbose)
{
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
printf("hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("getBalancedThreshold failed: %s\n", hipGetErrorString(cudaStatus));fflush(0);
goto Error;
}
else
printf("ordenacion correcta!\n");
float *a = (float*)malloc(size * sizeof(float));
float *a2 = (float*)malloc(size * sizeof(float));
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(a, weights, size * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy aCopy failed!");
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(a2, dev_aCopy, size * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy aCopy failed!");
goto Error;
}
int *ind = (int*)malloc(size * sizeof(int));
cudaStatus = hipMemcpy(ind, indirection, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy ind failed!");
goto Error;
}
int cuttingIdxHost2 = 0;
cudaStatus = hipMemcpy(&cuttingIdxHost2, threshold, sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy cuttingIdxHost failed!");
goto Error;
}
printf("\n\nCUDA RESULTS...\n");
printf("Hemos cortado en :%d\n", cuttingIdxHost2);
float sumNegHost = 0;
float sumPosHost = 0;
cudaStatus = hipMemcpy(&sumNegHost, negSum, sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy ind failed!");
goto Error;
}
cudaStatus = hipMemcpy(&sumPosHost, posSum, sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy ind failed!");
goto Error;
}
printf("Sumatorio total negativo: %f\n sumatorio de corte: %f\n", sumNegHost, sumPosHost);
double sumaTotal = 0.0;
double sumaPos = 0.0;
double sumaNeg = 0.0;
double sumaTh = 0.0;
double sumaPreTh = 0.0;
int threshold = -1;
double sumaAcumuladaTh = 0.0;
double ordenado = 99999999;
double restoPos = 0.0;
double relacionSumatorioTh = 0.0;
int change = 0;
printf("\n\nCPU RESULTS...\n");
for(int i = 0; i < size; i++)
{
float value = a[ind[i]];
if(a2[i] != value)
{
printf("PROBLEMA!!! -> La indireccion no esta bien,\n");
printf(" o hay una incongruencia con los vectores ordenados: %d -> [%f][%f]\n", i, a2[i], value);
}
if(value < 0 && ordenado >= 0)
printf("pos->neg: %d\n", i);
if(value > ordenado)
printf("No esta ordenado!: %d -> [%f][%f]\n", i, a2[i], value);
ordenado = value;
sumaTotal += value;
if(value >= 0)
{
sumaPos += value;
}
else sumaNeg += value;
if(i >= cuttingIdxHost2 && value >= 0)
relacionSumatorioTh += value;
else if(value >= 0)
sumaTh += value;
}
for(int i = size-1; i>=0; i--)
{
float value = a[ind[i]];
if(value >= 0)
{
if(threshold < 0)
{
sumaAcumuladaTh += value;
if(sumaAcumuladaTh > -sumaNeg)
threshold = i;
}
else
{
restoPos += value;
}
}
}
printf("Suma total: %f\n", sumaTotal);
printf("sumaNeg total: %f\n", sumaNeg);
printf("resto de posSum: %f\n", restoPos);
printf("sumaAcumuladaTh: %f\n", sumaAcumuladaTh);
printf("relacion sumaAcumuladaTh con cuda: %f\n", relacionSumatorioTh);
printf("threshold: %d\n", threshold);
}
Error:
// Solo eliminamos datos temporales...
hipFree(dev_aCopy);
hipFree(negSum);
hipFree(posSum);
return cudaStatus;
}
int thrust_sort(void)
{
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << std::endl;
// print contents of H
for(int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << std::endl;
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << std::endl;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
// H and D are automatically deleted when the function returns
return 0;
}
int doSorting(int number)
{
srand((unsigned)time(0));
int arraySize;
if(number > 10) arraySize = number;
else arraySize = 10;
// Create vector and fill it with values
thrust::host_vector<float> Ahost(arraySize);
thrust::host_vector<int> indhost(arraySize);
std::vector<float> a(arraySize);
for (int i = 0; i < arraySize; i++)
{
a[i] = 0.6-((float)rand()/RAND_MAX );
Ahost[i] = a[i];
indhost[i] = i;
}
std::vector<float> b(a);
thrust::device_vector<float> A = Ahost;
thrust::device_vector<int> ind = indhost;
int64 stlSortStart1 = GetTimeMs64();
thrust::sort_by_key(A.begin(), A.end(), ind.begin(), thrust::greater<float>());
int64 stlSortFinish1 = GetTimeMs64();
thrust::copy(ind.begin(), ind.end(), indhost.begin());
float time = 0.0;
// Swap elements in parallel.
//hipError_t cudaStatus = sortWithCuda(&a[0], &ind[0], a.size(), &time);
//if (cudaStatus != hipSuccess) {
// fprintf(stderr, "sortWithCuda failed!");
// return 1;
//}
int64 stlSortStart = GetTimeMs64();
bubbleSort(&b[0], b.size());
int64 stlSortFinish = GetTimeMs64();
FILE* fout;
fout = fopen("C:\\Users\\chus\\Documents\\dev\\Data\\models\\vel.txt", "a");
fprintf (fout, " %d, %d, ", arraySize, stlSortFinish1 - stlSortStart1);
fprintf (fout, "%d, ", (stlSortFinish - stlSortStart));
bool sortingSuccessful = true;
for (int i = 0; i < Ahost.size()-1 /*&& sortingSuccessful*/; i++)
{
//printf("Valores: posicion %d---> %d %f %f\n ", i, indhost[i], Ahost[indhost[i]], b[Ahost.size()-1-i]);
if (Ahost[indhost[i]] < Ahost[indhost[i+1]])
{
sortingSuccessful = false;
printf("esta desordenado: posicion %d---> %f %f\n ", i, Ahost[indhost[i]], Ahost[indhost[i+1]]);
}
if(Ahost[indhost[i]] != b[Ahost.size()-1-i])
{
sortingSuccessful = false;
printf("No es igual: posicion %d---> %f %f\n ", i, Ahost[indhost[i]], b[Ahost.size()-1-i]);
}
}
if(!sortingSuccessful) {
printf("Sorting failed.\n");
}
fprintf(fout, " %f\n", ((double)(stlSortFinish - stlSortStart))/(double)(stlSortFinish1 - stlSortStart1));
fclose(fout);
Ahost.clear();
A.clear();
Ahost.shrink_to_fit();
A.shrink_to_fit();
ind.clear();
ind.shrink_to_fit();
indhost.clear();
indhost.shrink_to_fit();
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
//getchar();
return 0;
}
// Helper function for using CUDA to sort vectors in parallel.
__host__ hipError_t sortWithCuda(float *a, int *ind, size_t size, float* time)
{
float *dev_a = 0;
float *dev_aCopy = 0;
int *dev_ind = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for one vectors.
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Allocate GPU buffers for one vectors.
cudaStatus = hipMalloc((void**)&dev_aCopy, size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_ind, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
int maxThreads = threadsPerOnlyOneBlock;
int computingThreads = size/2;
if(computingThreads > maxThreads)
computingThreads = maxThreads;
int* cutting_idx;
cudaStatus = hipMalloc((void**)&cutting_idx, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc cutting_idx failed!");
goto Error;
}
float* negSum;
cudaStatus = hipMalloc((void**)&negSum, sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc negSum failed!");
goto Error;
}
float* posSum;
cudaStatus = hipMalloc((void**)&posSum, sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc posSum failed!");
goto Error;
}
int* lock;
cudaStatus = hipMalloc((void**)&lock, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc lock failed!");
goto Error;
}
int hostLock = 0;
cudaStatus = hipMemcpy(lock, &hostLock, sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy hostLock failed!");
goto Error;
}
// Create timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( sort), dim3(1), dim3(computingThreads), 0, 0, dev_a, dev_aCopy, dev_ind, negSum, lock, size, cutting_idx , posSum);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "sort fallo: %s\n", hipGetErrorString(cudaStatus));
fflush(0);
return cudaStatus;
}
int cuttingIdxHost = 0;
cudaStatus = hipMemcpy(&cuttingIdxHost, cutting_idx, sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy cuttingIdxHost failed!");
goto Error;
}
hipLaunchKernelGGL(( getBalancedThreshold), dim3(1), dim3(computingThreads), 0, 0, dev_a, dev_ind, negSum, size, cutting_idx, posSum);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(time, start, stop);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
printf("hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("getBalancedThreshold failed: %s\n", hipGetErrorString(cudaStatus));fflush(0);
goto Error;
}
else
printf("getBalancedThreshold correcta!\n");
float *a2 = (float*)malloc(size * sizeof(float));
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(a, dev_a, size * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy aCopy failed!");
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(a2, dev_aCopy, size * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy aCopy failed!");
goto Error;
}
cudaStatus = hipMemcpy(ind, dev_ind, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy ind failed!");
goto Error;
}
int cuttingIdxHost2 = 0;
cudaStatus = hipMemcpy(&cuttingIdxHost2, cutting_idx, sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy cuttingIdxHost failed!");
goto Error;
}
printf("\n\nCUDA RESULTS...\n");
printf("pos->neg :%d\n", cuttingIdxHost);
printf("Hemos cortado en :%d\n", cuttingIdxHost2);
float sumNegHost = 0;
float sumPosHost = 0;
cudaStatus = hipMemcpy(&sumNegHost, negSum, sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy ind failed!");
goto Error;
}
cudaStatus = hipMemcpy(&sumPosHost, posSum, sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy ind failed!");
goto Error;
}
printf("Sumatorio total negativo: %f\n sumatorio de corte: %f\n", sumNegHost, sumPosHost);
double sumaTotal = 0.0;
double sumaPos = 0.0;
double sumaNeg = 0.0;
double sumaTh = 0.0;
double sumaPreTh = 0.0;
int threshold = -1;
double sumaAcumuladaTh = 0.0;
double ordenado = 99999999;
double restoPos = 0.0;
double relacionSumatorioTh = 0.0;
int change = 0;
printf("\n\nCPU RESULTS...\n");
for(int i = 0; i < size; i++)
{
float value = a[ind[i]];
if(a2[i] != value)
printf("PROBLEMA!!! -> La indireccion no esta bien o algo: %d -> [%f][%f]\n", i, a2[i], value);
if(value < 0 && ordenado >= 0)
printf("pos->neg: %d\n", i);
if(value > ordenado)
printf("No esta ordenado!: %d -> [%f][%f]\n", i, a2[i], value);
ordenado = value;
sumaTotal += value;
if(value >= 0)
{
sumaPos += value;
}
else sumaNeg += value;
if(i >= cuttingIdxHost2 && value >= 0)
relacionSumatorioTh += value;
else if(value >= 0)
sumaTh += value;
}
for(int i = size-1; i>=0; i--)
{
float value = a[ind[i]];
if(value >= 0)
{
if(threshold < 0)
{
sumaAcumuladaTh += value;
if(sumaAcumuladaTh > -sumaNeg)
threshold = i;
}
else
{
restoPos += value;
}
}
}
printf("Suma total: %f\n", sumaTotal);
printf("sumaNeg total: %f\n", sumaNeg);
printf("resto de posSum: %f\n", restoPos);
printf("sumaAcumuladaTh: %f\n", sumaAcumuladaTh);
printf("relacion sumaAcumuladaTh con cuda: %f\n", relacionSumatorioTh);
printf("threshold: %d\n", threshold);
Error:
hipFree(dev_a);
hipFree(dev_aCopy);
hipFree(dev_ind);
hipFree(negSum);
hipFree(posSum);
hipFree(cutting_idx);
return cudaStatus;
}
| aaea288e44feab78a8b9f83a1443d7ab9cf9b7d9.cu | #include "cudaBubbleSort.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
#include <stdio.h>
#include <vector>
#include <limits>
#include <algorithm>
#include <Windows.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <iostream>
__host__ int64 GetTimeMs64()
{
/* Windows */
FILETIME ft;
LARGE_INTEGER li;
/* Get the amount of 100 nano seconds intervals elapsed
* since January 1, 1601 (UTC) and copy it
* to a LARGE_INTEGER structure. */
GetSystemTimeAsFileTime(&ft);
li.LowPart = ft.dwLowDateTime;
li.HighPart = ft.dwHighDateTime;
uint64 ret = li.QuadPart;
ret -= 116444736000000000LL;
/* Convert from file time to UNIX epoch time. */
ret /= 10000; /* From 100 nano seconds (10^-7)
to 1 millisecond (10^-3) intervals */
return ret;
}
/*
__global__ void sumAndGetThreshold(float *a, float *aCopy,
int* ind, float* negSum,
int* lock, int size,
int* cuttingIdx, float *posSum)
{
int elem = threadIdx.x + blockDim.x * blockIdx.x;
while(elem < size)
{
aCopy[elem] = a[elem];
ind[elem] = elem;
elem = elem + blockDim.x * gridDim.x;
}
__syncthreads();
float cacheFirst = 0;
float cacheSecond = 0;
float cacheThird = 0;
int cacheFirstInd;
int cacheSecondInd;
int cacheThirdInd ;
for (int j = 0; j < size/2+1; j++)
{
int i = (threadIdx.x + blockDim.x*blockIdx.x)*2;
while(i+1 < size)
{
cacheFirst = aCopy[i];
cacheSecond = aCopy[i+1];
cacheFirstInd = ind[i];
cacheSecondInd = ind[i+1];
if(cacheFirst < cacheSecond)
{
// Weight
float temp = cacheFirst;
aCopy[i] = cacheSecond;
aCopy[i+1] = temp;
// Indirection
int tempInd = cacheFirstInd;
ind[i] = cacheSecondInd;
ind[i+1] = tempInd;
}
i += (blockDim.x*gridDim.x)*2;
}
__syncthreads();
i = (threadIdx.x + blockDim.x*blockIdx.x)*2 +1;
while(i+1 < size)
{
cacheFirst = aCopy[i];
cacheSecond = aCopy[i+1];
cacheFirstInd = ind[i];
cacheSecondInd = ind[i+1];
if(cacheFirst < cacheSecond)
{
// Weight
float temp = cacheFirst;
aCopy[i] = cacheSecond;
aCopy[i+1] = temp;
// Indirection
int tempInd = cacheFirstInd;
ind[i] = cacheSecondInd;
ind[i+1] = tempInd;
}
i += (blockDim.x*gridDim.x)*2;
}
}
__syncthreads();
__shared__ int thatPoint;
__shared__ float cachePos[1024];
__shared__ float cacheNeg[1024];
__shared__ float sumNegShared;
sumNegShared = 0.0;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float tempNeg = 0.0;
float tempPos = 0.0;
while(tid < size)
{
float aCopyTemp = aCopy[tid];
if(aCopyTemp < 0.0)
tempNeg += aCopyTemp;
//else
// tempPos += aCopyTemp;
tid += blockDim.x*gridDim.x;
}
//cachePos[cacheIndex] = tempPos;
cacheNeg[cacheIndex] = tempNeg;
__syncthreads();
int NewI = blockDim.x/2;
while(NewI!=0)
{
if(cacheIndex < NewI)
{
//cachePos[cacheIndex] += cachePos[cacheIndex+NewI];
cacheNeg[cacheIndex] += cacheNeg[cacheIndex+NewI];
}
__syncthreads();
NewI/=2;
}
if(cacheIndex == 0)
{
//*posSum = (float)cachePos[0];
*negSum = cacheNeg[0];
sumNegShared = cacheNeg[0];
}
__syncthreads();
// Buscar el primer indice donde queda anulada la suma negativa.
tid = blockDim.x * blockIdx.x + threadIdx.x;
thatPoint = size;
while(tid < size-1)
{
float firstFloat = aCopy[tid];
float secondFloat = aCopy[tid+1];
if(firstFloat > 0 && secondFloat < 0)
{
thatPoint = tid; // Solo uno puede entrar aquí
*cuttingIdx = tid;
}
tid += blockDim.x*gridDim.x;
}
__syncthreads();
//__shared__ float cachePos[1024];
//__shared__ float sumNegShared;
__shared__ bool breakFlux;
breakFlux = false;
sumNegShared = *negSum;
// En un proceso iterativo y redux obtengo el indice de corte.
int valuesToSum = *cuttingIdx;
int miniBlockSize = (valuesToSum+1)/blockDim.x+1;
int finBlock = valuesToSum - miniBlockSize*threadIdx.x;
int iniBlock = valuesToSum - miniBlockSize*(threadIdx.x+1);
float parSum = 0.0;
for(int indice = finBlock; indice > iniBlock && ! breakFlux; indice--)
{
if(indice < 0) continue;
parSum += a[ind[indice]];
// Quizas no deberia estar aqui
if(parSum > -sumNegShared && threadIdx.x == 0)
{
*cuttingIdx = indice;
breakFlux = true;
}
}
// Si ya hemos acabado, volvemos. No es lo normal, quizas conviene quitarlo
// para ir mas rapido en la mayoria de los casos.
__syncthreads();
if(breakFlux) return;
// Esto podria sobrar
if(threadIdx.x == 0) *posSum = parSum;
// Almacenamos las sumas parciales en el array de cache
// Ordenamos el array a la inversa... asi funciona todo
// el algoritmo que teniamos ya en fucnionamiento.
//int cacheIndex = threadIdx.x;
cachePos[cacheIndex] = parSum;
__shared__ float sumAccumulated;
__shared__ int idxAccumulated;
__shared__ bool refined;
__shared__ bool cutIdx;
idxAccumulated = 0;
sumAccumulated = 0.0;
refined = false;
cutIdx = false;
float tempSum = 0.0;
int restValuesToSum = valuesToSum;
while(!refined)
{
// inicializacion de cache.
int threadReposition = threadIdx.x-idxAccumulated;
cachePos[threadIdx.x] = 0.0;
if(threadReposition >= 0)
{
cachePos[threadReposition] = parSum;
}
__syncthreads();
cutIdx = false;
int offset = 1;
int particion = offset*2;
while(!cutIdx && offset <= restValuesToSum)
{
int secondTempIdx = threadReposition+offset;
if(threadReposition >= 0 && threadReposition%particion == 0)
{
tempSum = cachePos[threadReposition] + cachePos[secondTempIdx];
if(threadReposition == 0 && tempSum + sumAccumulated > -sumNegShared)
{
cutIdx = true;
sumAccumulated += cachePos[threadReposition];
idxAccumulated += offset;
}
else
{
cachePos[threadReposition] = tempSum;
}
}
__syncthreads();
offset = particion;
particion = offset*2;
}
__syncthreads();
restValuesToSum = valuesToSum - idxAccumulated;
if(threadReposition == 0 && (sumAccumulated > -sumNegShared || restValuesToSum <= 0))
{
refined = true;
}
}
__syncthreads();
if(cacheIndex == 0)
{
*posSum = sumAccumulated;
*cuttingIdx = *cuttingIdx - idxAccumulated*miniBlockSize;
}
}
*/
__global__ void sort(float *a, float *aCopy, int* ind, float* negSum, int* lock, int size, int* cuttingIdx, float *posSum)
{
int elem = threadIdx.x + blockDim.x * blockIdx.x;
while(elem < size)
{
aCopy[elem] = a[elem];
ind[elem] = elem;
elem = elem + blockDim.x * gridDim.x;
}
__syncthreads();
float cacheFirst = 0;
float cacheSecond = 0;
float cacheThird = 0;
int cacheFirstInd;
int cacheSecondInd;
int cacheThirdInd ;
for (int j = 0; j < size/2+1 ; j++)
{
int i = (threadIdx.x + blockDim.x * blockIdx.x)*2;
while(i+1 < size)
{
cacheFirst = aCopy[i];
cacheSecond = aCopy[i+1];
cacheFirstInd = ind[i];
cacheSecondInd = ind[i+1];
if(cacheFirst < cacheSecond)
{
// Weight
float temp = cacheFirst;
aCopy[i] = cacheSecond;
aCopy[i+1] = temp;
// Indirection
int tempInd = cacheFirstInd;
ind[i] = cacheSecondInd;
ind[i+1] = tempInd;
}
i += (blockDim.x*gridDim.x)*2;
}
__syncthreads();
i = (threadIdx.x + blockDim.x*blockIdx.x)*2 +1;
while(i+1 < size)
{
cacheFirst = aCopy[i];
cacheSecond = aCopy[i+1];
cacheFirstInd = ind[i];
cacheSecondInd = ind[i+1];
if(cacheFirst < cacheSecond)
{
// Weight
float temp = cacheFirst;
aCopy[i] = cacheSecond;
aCopy[i+1] = temp;
// Indirection
int tempInd = cacheFirstInd;
ind[i] = cacheSecondInd;
ind[i+1] = tempInd;
}
i += (blockDim.x*gridDim.x)*2;
}
}
__syncthreads();
__shared__ int thatPoint;
__shared__ float cacheNeg[threadsPerOnlyOneBlock];
__shared__ float sumNegShared;
sumNegShared = 0.0;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float tempNeg = 0.0;
float tempPos = 0.0;
while(tid < size)
{
float aCopyTemp = aCopy[tid];
if(aCopyTemp < 0.0)
tempNeg += aCopyTemp;
tid += blockDim.x*gridDim.x;
}
cacheNeg[cacheIndex] = tempNeg;
__syncthreads();
int NewI = blockDim.x/2;
while(NewI!=0)
{
if(cacheIndex < NewI)
{
cacheNeg[cacheIndex] += cacheNeg[cacheIndex+NewI];
}
__syncthreads();
NewI/=2;
}
if(cacheIndex == 0)
{
*negSum = cacheNeg[0];
sumNegShared = cacheNeg[0];
}
__syncthreads();
// Buscar el primer indice donde queda anulada la suma negativa.
tid = blockDim.x * blockIdx.x + threadIdx.x;
thatPoint = size;
while(tid < size-1)
{
float firstFloat = aCopy[tid];
float secondFloat = aCopy[tid+1];
if(firstFloat > 0 && secondFloat < 0)
{
thatPoint = tid; // Solo uno puede entrar aquí
*cuttingIdx = tid;
}
tid += blockDim.x*gridDim.x;
}
}
__global__ void getBalancedThreshold(float *a, int* ind, float* negSum, int size, int* cuttingIdx, float *posSum)
{
__shared__ float cachePos[threadsPerOnlyOneBlock];
__shared__ float sumNegShared;
__shared__ bool breakFlux;
breakFlux = false;
sumNegShared = *negSum;
// En un proceso iterativo y redux obtengo el indice de corte.
int valuesToSum = *cuttingIdx;
int miniBlockSize = (valuesToSum+1)/blockDim.x+1;
int finBlock = valuesToSum - miniBlockSize*threadIdx.x;
int iniBlock = valuesToSum - miniBlockSize*(threadIdx.x+1);
float parSum = 0.0;
for(int indice = finBlock; indice > iniBlock && ! breakFlux; indice--)
{
if(indice < 0) continue;
parSum += a[ind[indice]];
// Quizas no deberia estar aqui
if(parSum > -sumNegShared && threadIdx.x == 0)
{
*cuttingIdx = indice;
breakFlux = true;
}
}
// Si ya hemos acabado, volvemos. No es lo normal, quizas conviene quitarlo
// para ir mas rapido en la mayoria de los casos.
__syncthreads();
if(breakFlux) return;
// Esto podria sobrar
if(threadIdx.x == 0) *posSum = parSum;
// Almacenamos las sumas parciales en el array de cache
// Ordenamos el array a la inversa... asi funciona todo
// el algoritmo que teniamos ya en fucnionamiento.
int cacheIndex = threadIdx.x;
cachePos[cacheIndex] = parSum;
__shared__ float sumAccumulated;
__shared__ int idxAccumulated;
__shared__ bool refined;
__shared__ bool cutIdx;
idxAccumulated = 0;
sumAccumulated = 0.0;
refined = false;
cutIdx = false;
float tempSum = 0.0;
int restValuesToSum = valuesToSum;
while(!refined)
{
// inicializacion de cache.
int threadReposition = threadIdx.x-idxAccumulated;
cachePos[threadIdx.x] = 0.0;
if(threadReposition >= 0)
{
cachePos[threadReposition] = parSum;
}
__syncthreads();
cutIdx = false;
int offset = 1;
int particion = offset*2;
while(!cutIdx && offset <= restValuesToSum)
{
int secondTempIdx = threadReposition+offset;
if(threadReposition >= 0 && threadReposition%particion == 0)
{
tempSum = cachePos[threadReposition] + cachePos[secondTempIdx];
if(threadReposition == 0 && tempSum + sumAccumulated > -sumNegShared)
{
cutIdx = true;
sumAccumulated += cachePos[threadReposition];
idxAccumulated += offset;
}
else
{
cachePos[threadReposition] = tempSum;
}
}
__syncthreads();
offset = particion;
particion = offset*2;
}
__syncthreads();
restValuesToSum = valuesToSum - idxAccumulated;
if(threadReposition == 0 && (sumAccumulated > -sumNegShared || restValuesToSum <= 0))
{
refined = true;
}
}
__syncthreads();
if(cacheIndex == 0)
{
*posSum = sumAccumulated;
*cuttingIdx = *cuttingIdx - idxAccumulated*miniBlockSize;
}
}
__global__ void swapOnKernel(int *a, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x * 2;
int cacheFirst;
int cacheSecond;
int cacheThird;
for (int j = 0; j < size/2 + 1; j++)
{
i = blockDim.x * blockIdx.x + threadIdx.x * 2;
while(i < size)
{
if(i+1 < size) {
cacheFirst = a[i];
cacheSecond = a[i+1];
if(cacheFirst > cacheSecond) {
int temp = cacheFirst;
a[i] = cacheSecond;
cacheSecond = a[i+1] = temp;
}
}
__syncthreads();
if(i+2 < size) {
cacheThird = a[i+2];
if(cacheSecond > cacheThird) {
int temp = cacheSecond;
a[i+1] = cacheThird;
a[i+2] = temp;
}
}
i += (blockDim.x * gridDim.x)*2;
}
__syncthreads();
}
}
__host__ void bubbleSort(float arr[], int n) {
bool swapped = true;
int j = 0;
float tmp;
while (swapped) {
swapped = false;
j++;
for (int i = 0; i < n - j; i++) {
if (arr[i] > arr[i + 1]) {
tmp = arr[i];
arr[i] = arr[i + 1];
arr[i + 1] = tmp;
swapped = true;
}
}
}
}
int sortAndGetThreashold(float* weights, int* indirection, int* threshold, int size, bool verbose)
{
// Create timer
cudaEvent_t start, stop;
float time = 0.0;
if(verbose)
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
}
float *dev_aCopy = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for one vectors.
cudaStatus = cudaMalloc((void**)&dev_aCopy, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
int maxThreads = threadsPerBlock;
int computingThreads = size/2;
if(computingThreads > maxThreads)
computingThreads = maxThreads;
float* negSum;
float* posSum;
int* lock;
cudaStatus = cudaMalloc((void**)&negSum, sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc negSum failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&posSum, sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc posSum failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&lock, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc lock failed!");
goto Error;
}
int hostLock = 0;
cudaStatus = cudaMemcpy(lock, &hostLock, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy hostLock failed!");
goto Error;
}
// Realizamos la ordenacion del array de pesos y lo codificamos en dev_ind
sort<<<1, computingThreads>>>(weights, dev_aCopy, indirection, negSum, lock, size, threshold , posSum);
if(verbose)
{
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "sort fallo: %s\n", cudaGetErrorString(cudaStatus));
fflush(0);
return cudaStatus;
}
}
// Obtenemos el indice de corte
getBalancedThreshold<<<1, computingThreads>>>(weights, indirection, negSum, size, threshold, posSum);
if(verbose)
{
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("getBalancedThreshold failed: %s\n", cudaGetErrorString(cudaStatus));fflush(0);
goto Error;
}
else
printf("ordenacion correcta!\n");
float *a = (float*)malloc(size * sizeof(float));
float *a2 = (float*)malloc(size * sizeof(float));
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(a, weights, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy aCopy failed!");
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(a2, dev_aCopy, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy aCopy failed!");
goto Error;
}
int *ind = (int*)malloc(size * sizeof(int));
cudaStatus = cudaMemcpy(ind, indirection, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy ind failed!");
goto Error;
}
int cuttingIdxHost2 = 0;
cudaStatus = cudaMemcpy(&cuttingIdxHost2, threshold, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy cuttingIdxHost failed!");
goto Error;
}
printf("\n\nCUDA RESULTS...\n");
printf("Hemos cortado en :%d\n", cuttingIdxHost2);
float sumNegHost = 0;
float sumPosHost = 0;
cudaStatus = cudaMemcpy(&sumNegHost, negSum, sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy ind failed!");
goto Error;
}
cudaStatus = cudaMemcpy(&sumPosHost, posSum, sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy ind failed!");
goto Error;
}
printf("Sumatorio total negativo: %f\n sumatorio de corte: %f\n", sumNegHost, sumPosHost);
double sumaTotal = 0.0;
double sumaPos = 0.0;
double sumaNeg = 0.0;
double sumaTh = 0.0;
double sumaPreTh = 0.0;
int threshold = -1;
double sumaAcumuladaTh = 0.0;
double ordenado = 99999999;
double restoPos = 0.0;
double relacionSumatorioTh = 0.0;
int change = 0;
printf("\n\nCPU RESULTS...\n");
for(int i = 0; i < size; i++)
{
float value = a[ind[i]];
if(a2[i] != value)
{
printf("PROBLEMA!!! -> La indireccion no esta bien,\n");
printf(" o hay una incongruencia con los vectores ordenados: %d -> [%f][%f]\n", i, a2[i], value);
}
if(value < 0 && ordenado >= 0)
printf("pos->neg: %d\n", i);
if(value > ordenado)
printf("No esta ordenado!: %d -> [%f][%f]\n", i, a2[i], value);
ordenado = value;
sumaTotal += value;
if(value >= 0)
{
sumaPos += value;
}
else sumaNeg += value;
if(i >= cuttingIdxHost2 && value >= 0)
relacionSumatorioTh += value;
else if(value >= 0)
sumaTh += value;
}
for(int i = size-1; i>=0; i--)
{
float value = a[ind[i]];
if(value >= 0)
{
if(threshold < 0)
{
sumaAcumuladaTh += value;
if(sumaAcumuladaTh > -sumaNeg)
threshold = i;
}
else
{
restoPos += value;
}
}
}
printf("Suma total: %f\n", sumaTotal);
printf("sumaNeg total: %f\n", sumaNeg);
printf("resto de posSum: %f\n", restoPos);
printf("sumaAcumuladaTh: %f\n", sumaAcumuladaTh);
printf("relacion sumaAcumuladaTh con cuda: %f\n", relacionSumatorioTh);
printf("threshold: %d\n", threshold);
}
Error:
// Solo eliminamos datos temporales...
cudaFree(dev_aCopy);
cudaFree(negSum);
cudaFree(posSum);
return cudaStatus;
}
int thrust_sort(void)
{
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << std::endl;
// print contents of H
for(int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << std::endl;
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << std::endl;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
// H and D are automatically deleted when the function returns
return 0;
}
int doSorting(int number)
{
srand((unsigned)time(0));
int arraySize;
if(number > 10) arraySize = number;
else arraySize = 10;
// Create vector and fill it with values
thrust::host_vector<float> Ahost(arraySize);
thrust::host_vector<int> indhost(arraySize);
std::vector<float> a(arraySize);
for (int i = 0; i < arraySize; i++)
{
a[i] = 0.6-((float)rand()/RAND_MAX );
Ahost[i] = a[i];
indhost[i] = i;
}
std::vector<float> b(a);
thrust::device_vector<float> A = Ahost;
thrust::device_vector<int> ind = indhost;
int64 stlSortStart1 = GetTimeMs64();
thrust::sort_by_key(A.begin(), A.end(), ind.begin(), thrust::greater<float>());
int64 stlSortFinish1 = GetTimeMs64();
thrust::copy(ind.begin(), ind.end(), indhost.begin());
float time = 0.0;
// Swap elements in parallel.
//cudaError_t cudaStatus = sortWithCuda(&a[0], &ind[0], a.size(), &time);
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "sortWithCuda failed!");
// return 1;
//}
int64 stlSortStart = GetTimeMs64();
bubbleSort(&b[0], b.size());
int64 stlSortFinish = GetTimeMs64();
FILE* fout;
fout = fopen("C:\\Users\\chus\\Documents\\dev\\Data\\models\\vel.txt", "a");
fprintf (fout, " %d, %d, ", arraySize, stlSortFinish1 - stlSortStart1);
fprintf (fout, "%d, ", (stlSortFinish - stlSortStart));
bool sortingSuccessful = true;
for (int i = 0; i < Ahost.size()-1 /*&& sortingSuccessful*/; i++)
{
//printf("Valores: posicion %d---> %d %f %f\n ", i, indhost[i], Ahost[indhost[i]], b[Ahost.size()-1-i]);
if (Ahost[indhost[i]] < Ahost[indhost[i+1]])
{
sortingSuccessful = false;
printf("esta desordenado: posicion %d---> %f %f\n ", i, Ahost[indhost[i]], Ahost[indhost[i+1]]);
}
if(Ahost[indhost[i]] != b[Ahost.size()-1-i])
{
sortingSuccessful = false;
printf("No es igual: posicion %d---> %f %f\n ", i, Ahost[indhost[i]], b[Ahost.size()-1-i]);
}
}
if(!sortingSuccessful) {
printf("Sorting failed.\n");
}
fprintf(fout, " %f\n", ((double)(stlSortFinish - stlSortStart))/(double)(stlSortFinish1 - stlSortStart1));
fclose(fout);
Ahost.clear();
A.clear();
Ahost.shrink_to_fit();
A.shrink_to_fit();
ind.clear();
ind.shrink_to_fit();
indhost.clear();
indhost.shrink_to_fit();
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
//getchar();
return 0;
}
// Helper function for using CUDA to sort vectors in parallel.
__host__ cudaError_t sortWithCuda(float *a, int *ind, size_t size, float* time)
{
float *dev_a = 0;
float *dev_aCopy = 0;
int *dev_ind = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for one vectors.
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Allocate GPU buffers for one vectors.
cudaStatus = cudaMalloc((void**)&dev_aCopy, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_ind, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
int maxThreads = threadsPerOnlyOneBlock;
int computingThreads = size/2;
if(computingThreads > maxThreads)
computingThreads = maxThreads;
int* cutting_idx;
cudaStatus = cudaMalloc((void**)&cutting_idx, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc cutting_idx failed!");
goto Error;
}
float* negSum;
cudaStatus = cudaMalloc((void**)&negSum, sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc negSum failed!");
goto Error;
}
float* posSum;
cudaStatus = cudaMalloc((void**)&posSum, sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc posSum failed!");
goto Error;
}
int* lock;
cudaStatus = cudaMalloc((void**)&lock, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc lock failed!");
goto Error;
}
int hostLock = 0;
cudaStatus = cudaMemcpy(lock, &hostLock, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy hostLock failed!");
goto Error;
}
// Create timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
sort<<<1, computingThreads>>>(dev_a, dev_aCopy, dev_ind, negSum, lock, size, cutting_idx , posSum);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "sort fallo: %s\n", cudaGetErrorString(cudaStatus));
fflush(0);
return cudaStatus;
}
int cuttingIdxHost = 0;
cudaStatus = cudaMemcpy(&cuttingIdxHost, cutting_idx, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy cuttingIdxHost failed!");
goto Error;
}
getBalancedThreshold<<<1, computingThreads>>>(dev_a, dev_ind, negSum, size, cutting_idx, posSum);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(time, start, stop);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("getBalancedThreshold failed: %s\n", cudaGetErrorString(cudaStatus));fflush(0);
goto Error;
}
else
printf("getBalancedThreshold correcta!\n");
float *a2 = (float*)malloc(size * sizeof(float));
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(a, dev_a, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy aCopy failed!");
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(a2, dev_aCopy, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy aCopy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(ind, dev_ind, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy ind failed!");
goto Error;
}
int cuttingIdxHost2 = 0;
cudaStatus = cudaMemcpy(&cuttingIdxHost2, cutting_idx, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy cuttingIdxHost failed!");
goto Error;
}
printf("\n\nCUDA RESULTS...\n");
printf("pos->neg :%d\n", cuttingIdxHost);
printf("Hemos cortado en :%d\n", cuttingIdxHost2);
float sumNegHost = 0;
float sumPosHost = 0;
cudaStatus = cudaMemcpy(&sumNegHost, negSum, sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy ind failed!");
goto Error;
}
cudaStatus = cudaMemcpy(&sumPosHost, posSum, sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy ind failed!");
goto Error;
}
printf("Sumatorio total negativo: %f\n sumatorio de corte: %f\n", sumNegHost, sumPosHost);
double sumaTotal = 0.0;
double sumaPos = 0.0;
double sumaNeg = 0.0;
double sumaTh = 0.0;
double sumaPreTh = 0.0;
int threshold = -1;
double sumaAcumuladaTh = 0.0;
double ordenado = 99999999;
double restoPos = 0.0;
double relacionSumatorioTh = 0.0;
int change = 0;
printf("\n\nCPU RESULTS...\n");
for(int i = 0; i < size; i++)
{
float value = a[ind[i]];
if(a2[i] != value)
printf("PROBLEMA!!! -> La indireccion no esta bien o algo: %d -> [%f][%f]\n", i, a2[i], value);
if(value < 0 && ordenado >= 0)
printf("pos->neg: %d\n", i);
if(value > ordenado)
printf("No esta ordenado!: %d -> [%f][%f]\n", i, a2[i], value);
ordenado = value;
sumaTotal += value;
if(value >= 0)
{
sumaPos += value;
}
else sumaNeg += value;
if(i >= cuttingIdxHost2 && value >= 0)
relacionSumatorioTh += value;
else if(value >= 0)
sumaTh += value;
}
for(int i = size-1; i>=0; i--)
{
float value = a[ind[i]];
if(value >= 0)
{
if(threshold < 0)
{
sumaAcumuladaTh += value;
if(sumaAcumuladaTh > -sumaNeg)
threshold = i;
}
else
{
restoPos += value;
}
}
}
printf("Suma total: %f\n", sumaTotal);
printf("sumaNeg total: %f\n", sumaNeg);
printf("resto de posSum: %f\n", restoPos);
printf("sumaAcumuladaTh: %f\n", sumaAcumuladaTh);
printf("relacion sumaAcumuladaTh con cuda: %f\n", relacionSumatorioTh);
printf("threshold: %d\n", threshold);
Error:
cudaFree(dev_a);
cudaFree(dev_aCopy);
cudaFree(dev_ind);
cudaFree(negSum);
cudaFree(posSum);
cudaFree(cutting_idx);
return cudaStatus;
}
|
cd4c90c973f07fee9eb53b33ff40014e21f7ce9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "CudaKernels.h"
#include <iostream>
#include <hip/hip_vector_types.h>
#include <vector_functions.h>
#include <math_functions.h>
//#include <cutil_math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#define N 10000
typedef struct {
double3 p;
double r;
} SSphere;
typedef struct {
double3 o;
double3 d;
} SRay;
typedef struct {
double tn;
double tf;
double l;
} SInInfo;
__device__ double3 minus(const double3& u, const double3& v) {
return make_double3(u.x - v.x, u.y - v.y, u.z - v.z);
}
__device__ double dot(const double3& u, const double3& v) {
return u.x*v.x + u.y*v.y + u.z*v.z;
}
__device__ int RaySphereIntersection(const SRay &ray, const SSphere &sphere, SInInfo &t)
{
double b, c, d;
double3 sr = minus(ray.o, sphere.p);
b = dot(sr,ray.d);
c = dot(sr,sr) - (sphere.r*sphere.r);
d = b*b - c;
if (d > 0)
{
double e = sqrt(d);
t.tn = -b-e;
t.tf = -b+e;
return 1;
}
return 0;
}
SSphere* dev_spheres;
__global__ void add(int *a, int *b, int *c) {
int tID = blockIdx.x;
if (tID < N)
c[tID] = a[tID] + b[tID];
}
__global__ void hit(SSphere *s, int* r) {
int tID = blockIdx.x;
if(tID < 2) {
if(s[tID].r == 2.0)
r[tID] = 1;
else
r[tID] = 0;
}
}
void loadUpScene() {
SSphere spheres[2];
spheres[0].p = make_double3(0.0, 0.0, 0.0);
spheres[0].r = 2.0;
spheres[1].p = make_double3(0.0, 0.0, 0.0);
spheres[1].r = 2.0;
int r[2];
int* dev_r;
int num = 2;
hipMalloc((void **) &dev_spheres, num * sizeof(SSphere));
hipMemcpy(dev_spheres, spheres, 2*sizeof(SSphere), hipMemcpyHostToDevice);
hipMalloc((void **) &dev_r, 2 * sizeof(int));
hipLaunchKernelGGL(( hit), dim3(N), dim3(1), 0, 0, dev_spheres, dev_r);
hipMemcpy(r, dev_r, 2*sizeof(int), hipMemcpyDeviceToHost);
std::cout<<r[0]<<" -- "<<r[1]<<std::endl;
}
void someOperation() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **) &dev_a, N*sizeof(int));
hipMalloc((void **) &dev_b, N*sizeof(int));
hipMalloc((void **) &dev_c, N*sizeof(int));
// Fill Arrays
for (int i = 0; i < N; i++) {
a[i] = i,
b[i] = 1;
}
hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
std::cout<<a[i]<<"+"<< b[i]<<"="<<c[i]<<std::endl;
// initialize all ten integers of a device_vector to 1
thrust::device_vector<int> D(10, 1);
// set the first seven elements of a vector to 9
thrust::fill(D.begin(), D.begin() + 7, 9);
// initialize a host_vector with the first five elements of D
thrust::host_vector<int> H(D.begin(), D.begin() + 5);
// set the elements of H to 0, 1, 2, 3, ...
thrust::sequence(H.begin(), H.end());
// copy all of H back to the beginning of
thrust::copy(H.begin(), H.end(), D.begin());
// print D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
}
/*
int main() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **) &dev_a, N*sizeof(int));
hipMalloc((void **) &dev_b, N*sizeof(int));
hipMalloc((void **) &dev_c, N*sizeof(int));
// Fill Arrays
for (int i = 0; i < N; i++) {
a[i] = i,
b[i] = 1;
}
hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);
return 0;
}*/
| cd4c90c973f07fee9eb53b33ff40014e21f7ce9b.cu | //#include "CudaKernels.h"
#include <iostream>
#include <vector_types.h>
#include <vector_functions.h>
#include <math_functions.h>
//#include <cutil_math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#define N 10000
typedef struct {
double3 p;
double r;
} SSphere;
typedef struct {
double3 o;
double3 d;
} SRay;
typedef struct {
double tn;
double tf;
double l;
} SInInfo;
__device__ double3 minus(const double3& u, const double3& v) {
return make_double3(u.x - v.x, u.y - v.y, u.z - v.z);
}
__device__ double dot(const double3& u, const double3& v) {
return u.x*v.x + u.y*v.y + u.z*v.z;
}
__device__ int RaySphereIntersection(const SRay &ray, const SSphere &sphere, SInInfo &t)
{
double b, c, d;
double3 sr = minus(ray.o, sphere.p);
b = dot(sr,ray.d);
c = dot(sr,sr) - (sphere.r*sphere.r);
d = b*b - c;
if (d > 0)
{
double e = sqrt(d);
t.tn = -b-e;
t.tf = -b+e;
return 1;
}
return 0;
}
SSphere* dev_spheres;
__global__ void add(int *a, int *b, int *c) {
int tID = blockIdx.x;
if (tID < N)
c[tID] = a[tID] + b[tID];
}
__global__ void hit(SSphere *s, int* r) {
int tID = blockIdx.x;
if(tID < 2) {
if(s[tID].r == 2.0)
r[tID] = 1;
else
r[tID] = 0;
}
}
void loadUpScene() {
SSphere spheres[2];
spheres[0].p = make_double3(0.0, 0.0, 0.0);
spheres[0].r = 2.0;
spheres[1].p = make_double3(0.0, 0.0, 0.0);
spheres[1].r = 2.0;
int r[2];
int* dev_r;
int num = 2;
cudaMalloc((void **) &dev_spheres, num * sizeof(SSphere));
cudaMemcpy(dev_spheres, spheres, 2*sizeof(SSphere), cudaMemcpyHostToDevice);
cudaMalloc((void **) &dev_r, 2 * sizeof(int));
hit<<<N, 1>>>(dev_spheres, dev_r);
cudaMemcpy(r, dev_r, 2*sizeof(int), cudaMemcpyDeviceToHost);
std::cout<<r[0]<<" -- "<<r[1]<<std::endl;
}
void someOperation() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, N*sizeof(int));
cudaMalloc((void **) &dev_b, N*sizeof(int));
cudaMalloc((void **) &dev_c, N*sizeof(int));
// Fill Arrays
for (int i = 0; i < N; i++) {
a[i] = i,
b[i] = 1;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<N,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
std::cout<<a[i]<<"+"<< b[i]<<"="<<c[i]<<std::endl;
// initialize all ten integers of a device_vector to 1
thrust::device_vector<int> D(10, 1);
// set the first seven elements of a vector to 9
thrust::fill(D.begin(), D.begin() + 7, 9);
// initialize a host_vector with the first five elements of D
thrust::host_vector<int> H(D.begin(), D.begin() + 5);
// set the elements of H to 0, 1, 2, 3, ...
thrust::sequence(H.begin(), H.end());
// copy all of H back to the beginning of
thrust::copy(H.begin(), H.end(), D.begin());
// print D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
}
/*
int main() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, N*sizeof(int));
cudaMalloc((void **) &dev_b, N*sizeof(int));
cudaMalloc((void **) &dev_c, N*sizeof(int));
// Fill Arrays
for (int i = 0; i < N; i++) {
a[i] = i,
b[i] = 1;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<N,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
return 0;
}*/
|
258b4f527d99dc43849b65f1c86c0f096f4f096c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "jacobi_iteration.h"
// Write the GPU kernel to solve the Jacobi iterations
__global__ void jacobi_iteration_kernel ()
{
}
| 258b4f527d99dc43849b65f1c86c0f096f4f096c.cu | #include "jacobi_iteration.h"
// Write the GPU kernel to solve the Jacobi iterations
__global__ void jacobi_iteration_kernel ()
{
}
|
3388f74e731aef03d248720eccfce7582a5092eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "core/common.cuh"
#include <hip/hip_fp16.h>
#include <thrust/fill.h>
#include <hip/hip_runtime.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
__global__
void gen_mask_offset(const int64_t *every_seq_len, const int current_batch_num, int64_t *dest) {
const int tid = threadIdx.x;
extern __shared__ __align__(sizeof(int)) int s_every_len[];
s_every_len[tid] = every_seq_len[tid];
int sum_of_previous_seq = 0;
int current_len = s_every_len[tid];
for (int i = 0; i < tid; i++) {
sum_of_previous_seq += s_every_len[i];
}
for (int i = 0; i < current_len; i++) {
dest[sum_of_previous_seq + i] = sum_of_previous_seq;
}
}
void fill_kernel(int64_t* data_ptr, int64_t size, int64_t val) {
thrust::device_ptr<int64_t> data_dev_ptr = thrust::device_pointer_cast(data_ptr);
thrust::fill(thrust::device, data_dev_ptr, data_dev_ptr + size, val);
}
int reduce_kernel(int64_t* data_ptr, int64_t size) {
thrust::device_ptr<int64_t> data_dev_ptr = thrust::device_pointer_cast(data_ptr);
return thrust::reduce(thrust::device, data_dev_ptr, data_dev_ptr + size, 0);
}
void gen_mask_offset_kernel(const int64_t* every_seq_len_inbatch, const int current_batch_num, int64_t* dest, const hipStream_t stream){
dim3 grid(1);
assert(current_batch_num <= 1024);
dim3 block(current_batch_num);
int shared_mem_size = current_batch_num * sizeof(int);
hipLaunchKernelGGL(( gen_mask_offset), dim3(grid), dim3(block), shared_mem_size, stream, every_seq_len_inbatch, current_batch_num, dest);
}
__global__
void compute_len_inbatch(int64_t* data_ptr, int batch_size, int seq_len, int64_t* dest){
int64_t *data = data_ptr + blockIdx.x * seq_len;
thrust::device_ptr<int64_t> data_dev_ptr = thrust::device_pointer_cast(data);
dest[blockIdx.x] = thrust::reduce(thrust::device, data, data + seq_len, 0);
}
void compute_len_inbatch_kernel(int64_t *data_ptr, int batch_size,int seq_len, int64_t *dest, const hipStream_t stream){
dim3 grid(batch_size);
dim3 block(1);
hipLaunchKernelGGL(( compute_len_inbatch), dim3(grid), dim3(block),0, stream, data_ptr, batch_size,seq_len,dest);
}
void gen_mask_offset_kernel(const int64_t *every_seq_len_inbatch,
const int current_batch_num, int64_t *dest, const hipStream_t stream);
void fill_kernel(int64_t *data_ptr, int64_t size, int64_t val);
int reduce_kernel(int64_t *data_ptr, int64_t size);
void compute_len_inbatch_kernel(int64_t *data_ptr, int batch_size, int seq_len, int64_t *dest, const hipStream_t stream);
| 3388f74e731aef03d248720eccfce7582a5092eb.cu | #include "core/common.cuh"
#include <cuda_fp16.h>
#include <thrust/fill.h>
#include <cuda_runtime.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
__global__
void gen_mask_offset(const int64_t *every_seq_len, const int current_batch_num, int64_t *dest) {
const int tid = threadIdx.x;
extern __shared__ __align__(sizeof(int)) int s_every_len[];
s_every_len[tid] = every_seq_len[tid];
int sum_of_previous_seq = 0;
int current_len = s_every_len[tid];
for (int i = 0; i < tid; i++) {
sum_of_previous_seq += s_every_len[i];
}
for (int i = 0; i < current_len; i++) {
dest[sum_of_previous_seq + i] = sum_of_previous_seq;
}
}
void fill_kernel(int64_t* data_ptr, int64_t size, int64_t val) {
thrust::device_ptr<int64_t> data_dev_ptr = thrust::device_pointer_cast(data_ptr);
thrust::fill(thrust::device, data_dev_ptr, data_dev_ptr + size, val);
}
int reduce_kernel(int64_t* data_ptr, int64_t size) {
thrust::device_ptr<int64_t> data_dev_ptr = thrust::device_pointer_cast(data_ptr);
return thrust::reduce(thrust::device, data_dev_ptr, data_dev_ptr + size, 0);
}
void gen_mask_offset_kernel(const int64_t* every_seq_len_inbatch, const int current_batch_num, int64_t* dest, const cudaStream_t stream){
dim3 grid(1);
assert(current_batch_num <= 1024);
dim3 block(current_batch_num);
int shared_mem_size = current_batch_num * sizeof(int);
gen_mask_offset<<<grid, block, shared_mem_size, stream>>>(every_seq_len_inbatch, current_batch_num, dest);
}
__global__
void compute_len_inbatch(int64_t* data_ptr, int batch_size, int seq_len, int64_t* dest){
int64_t *data = data_ptr + blockIdx.x * seq_len;
thrust::device_ptr<int64_t> data_dev_ptr = thrust::device_pointer_cast(data);
dest[blockIdx.x] = thrust::reduce(thrust::device, data, data + seq_len, 0);
}
void compute_len_inbatch_kernel(int64_t *data_ptr, int batch_size,int seq_len, int64_t *dest, const cudaStream_t stream){
dim3 grid(batch_size);
dim3 block(1);
compute_len_inbatch<<<grid, block,0, stream>>>(data_ptr, batch_size,seq_len,dest);
}
void gen_mask_offset_kernel(const int64_t *every_seq_len_inbatch,
const int current_batch_num, int64_t *dest, const cudaStream_t stream);
void fill_kernel(int64_t *data_ptr, int64_t size, int64_t val);
int reduce_kernel(int64_t *data_ptr, int64_t size);
void compute_len_inbatch_kernel(int64_t *data_ptr, int batch_size, int seq_len, int64_t *dest, const cudaStream_t stream);
|
03d2462b75dec98217f84ad7536c9e4fe543c631.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHGeneral.h"
#include "THHTensorMath.h"
#include "THHTensorCopy.h"
#include <algorithm>
#ifdef USE_MAGMA
#include <magma.h>
#else
#include "THHBlas.h"
#endif
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
#define NoMagma(name) "No CUDA implementation of '" #name "'. Install MAGMA and rebuild cutorch (http://icl.cs.utk.edu/magma/)"
void THCMagma_init(THCState *state)
{
#ifdef USE_MAGMA
magma_init();
#endif
}
#ifdef USE_MAGMA
static inline float* th_magma_smalloc_pinned(size_t n)
{
float* ptr;
if (MAGMA_SUCCESS != magma_smalloc_pinned(&ptr, n))
THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", n/268435456);
return ptr;
}
static inline int* th_magma_imalloc_pinned(size_t n)
{
int* ptr;
if (MAGMA_SUCCESS != magma_imalloc_pinned(&ptr, n))
THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", n/268435456);
return ptr;
}
static void THCudaTensor_copyArray1d(THCState *state, THCudaTensor *self, float *src, int k)
{
long size[1] = { k };
long stride[1] = { 1 };
THCudaTensor_rawResize(state, self, 1, size, stride);
size_t len = k * sizeof(float);
THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice));
}
static void THCudaTensor_copyArray2d(THCState *state, THCudaTensor *self, float *src, int m, int n)
{
long size[2] = { m, n };
long stride[2] = { 1, m };
THCudaTensor_rawResize(state, self, 2, size, stride);
size_t len = m * n * sizeof(float);
THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice));
}
static void THCudaTensor_copyTensor2d(THCState *state, float *dst, THCudaTensor *self)
{
THAssert(self->nDimension == 2);
size_t len = THCudaTensor_nElement(state, self)*sizeof(float);
THCudaTensor *temp = THCudaTensor_newTranspose(state, self, 0, 1);
THCudaTensor *selfc = THCudaTensor_newContiguous(state, temp);
THCudaCheck(hipMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, hipMemcpyDeviceToHost));
THCudaTensor_free(state, temp);
THCudaTensor_free(state, selfc);
}
#endif
static THCudaTensor* THCudaTensor_newColumnMajor(THCState *state, THCudaTensor *self, THCudaTensor *src)
{
THAssert(src->nDimension == 2);
if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0])
{
THCudaTensor_retain(state, self);
return self;
}
if (self == src)
self = THCudaTensor_new(state);
else
THCudaTensor_retain(state, self);
long size[2] = { src->size[0], src->size[1] };
long stride[2] = { 1, src->size[0] };
THCudaTensor_rawResize(state, self, 2, size, stride);
THCudaTensor_copy(state, self, src);
return self;
}
void THCudaTensor_gesv(THCState *state, THCudaTensor *rb_, THCudaTensor *ra_, THCudaTensor *b_, THCudaTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square");
THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible");
int n = a_->size[0];
int nrhs = b_->size[1];
THCudaTensor *a = THCudaTensor_newColumnMajor(state, ra_, a_);
THCudaTensor *b = THCudaTensor_newColumnMajor(state, rb_, b_);
float *a_data = THCudaTensor_data(state, a);
float *b_data = THCudaTensor_data(state, b);
int *ipiv = th_magma_imalloc_pinned(n);
int info;
magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
if (info < 0)
THError("MAGMA gesv : Argument %d : illegal value", -info);
else if (info > 0)
THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info);
magma_free_pinned(ipiv);
THCudaTensor_freeCopyTo(state, a, ra_);
THCudaTensor_freeCopyTo(state, b, rb_);
#else
THError(NoMagma(gesv));
#endif
}
void THCudaTensor_gels(THCState *state, THCudaTensor *rb_, THCudaTensor *ra_, THCudaTensor *b_, THCudaTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional");
THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b");
THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n");
THCudaTensor *a = THCudaTensor_newColumnMajor(state, ra_, a_);
THCudaTensor *b = THCudaTensor_newColumnMajor(state, rb_, b_);
float *a_data = THCudaTensor_data(state, a);
float *b_data = THCudaTensor_data(state, b);
int m = a->size[0];
int n = a->size[1];
int nrhs = b->size[1];
float wkopt;
int info;
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
float *hwork = th_magma_smalloc_pinned((size_t)wkopt);
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCudaTensor_freeCopyTo(state, a, ra_);
THCudaTensor_freeCopyTo(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
void THCudaTensor_syev(THCState *state, THCudaTensor *re_, THCudaTensor *rv_, THCudaTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int n = a->size[0];
int lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCudaTensor *input = THCudaTensor_newColumnMajor(state, rv_, a);
float *input_data = THCudaTensor_data(state, input);
// eigen values and workspace
float *w = th_magma_smalloc_pinned(n);
float *wA = th_magma_smalloc_pinned(lda);
// compute optimal size of work array
int info;
float lwork;
int liwork;
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
float *work = th_magma_smalloc_pinned((size_t)lwork);
int *iwork = th_magma_imalloc_pinned(liwork);
// compute eigenvalues and, optionally, eigenvectors
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
// copy eigen values from w to re_
if (info == 0)
THCudaTensor_copyArray1d(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
THCudaTensor_freeCopyTo(state, input, rv_);
#else
THError(NoMagma(syev));
#endif
}
void THCudaTensor_geev(THCState *state, THCudaTensor *re_, THCudaTensor *rv_, THCudaTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int n = a_->size[0];
float *a_data = th_magma_smalloc_pinned(n * n);
THCudaTensor_copyTensor2d(state, a_data, a_);
float *wr = th_magma_smalloc_pinned(n);
float *wi = th_magma_smalloc_pinned(n);
float *vr_data = NULL;
int ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_smalloc_pinned(n * n);
ldvr = n;
}
float wkopt;
int info;
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
int lwork = (int) wkopt;
float *work_data = th_magma_smalloc_pinned(lwork);
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
{
THCudaTensor_resize2d(state, re_, 2, n);
THCudaTensor *re = THCudaTensor_newContiguous(state, re_);
THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(float), hipMemcpyHostToDevice));
THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(float), hipMemcpyHostToDevice));
THCudaTensor_freeCopyTo(state, re, re_);
THCudaTensor_transpose(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCudaTensor_copyArray2d(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
void THCudaTensor_gesvd(THCState *state, THCudaTensor *ru_, THCudaTensor *rs_, THCudaTensor *rv_, THCudaTensor *a, const char *jobu)
{
#ifdef USE_MAGMA
THCudaTensor *ra_ = THCudaTensor_new(state);
THCudaTensor_gesvd2(state, ru_, rs_, rv_, ra_, a, jobu);
THCudaTensor_free(state, ra_);
#else
THError(NoMagma(gesvd));
#endif
}
void THCudaTensor_gesvd2(THCState *state, THCudaTensor *ru_, THCudaTensor *rs_, THCudaTensor *rv_, THCudaTensor *ra_, THCudaTensor *a, const char *jobus)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
magma_vec_t jobu = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec;
magma_vec_t jobvt = jobu;
int m = a->size[0];
int n = a->size[1];
int k = m < n ? m : n;
int j = (jobu == MagmaAllVec) ? m : k;
float *a_data = th_magma_smalloc_pinned(m * n);
THCudaTensor_copyTensor2d(state, a_data, a);
float *rs_data = th_magma_smalloc_pinned(k);
float *ru_data = th_magma_smalloc_pinned(m * j);
float *rv_data = th_magma_smalloc_pinned(n * n);
float wkopt;
int info;
magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, &info);
int lwork = (int) wkopt;
float *work_data = th_magma_smalloc_pinned(lwork);
magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, &info);
if (info > 0)
THError("MAGMA gesvd : %d superdiagonals failed to converge", info);
else if (info < 0)
THError("MAGMA gesvd : Argument %d : illegal value", -info);
THCudaTensor_copyArray2d(state, rv_, rv_data, n, n);
THCudaTensor_transpose(state, rv_, NULL, 0, 1);
THCudaTensor_copyArray2d(state, ru_, ru_data, m, j);
THCudaTensor_copyArray1d(state, rs_, rs_data, k);
THCudaTensor_copyArray2d(state, ra_, a_data, m, n);
magma_free_pinned(work_data);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesvd2));
#endif
}
void THCudaTensor_getri(THCState *state, THCudaTensor *ra_, THCudaTensor *a)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int info;
int n = a->size[0];
int lwork = n * magma_get_sgetri_nb(n);
THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a);
float *input_data = THCudaTensor_data(state, input);
int *ipiv = th_magma_imalloc_pinned(n);
THCudaTensor *work = THCudaTensor_newWithSize1d(state, lwork);
float *work_data = THCudaTensor_data(state, work);
// Run LU
magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info);
if (info > 0)
THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getrf : Argument %d : illegal value", -info);
// Inverse
magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
if (info > 0)
THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getri : Argument %d : illegal value", -info);
THCudaTensor_free(state, work);
magma_free_pinned(ipiv);
THCudaTensor_freeCopyTo(state, input, ra_);
#else
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
// input
THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a);
// output
THCudaTensor *output = THCudaTensor_newColumnMajor(state, ra_, a);
size_t matrices_size = sizeof(float*);
float **matrices1 = (float **)THAlloc(matrices_size);
const float **matrices1_const = (const float **)THAlloc(matrices_size);
float **matrices2 = (float **)THAlloc(matrices_size);
matrices1[0] = THCudaTensor_data(state, input);
matrices1_const[0] = THCudaTensor_data(state, input);
matrices2[0] = THCudaTensor_data(state, output);
// Copy pointers to device.
float **d_matrices1, **d_matrices2;
const float **d_matrices1_const;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1_const, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size));
THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(hipMemcpyAsync(d_matrices1_const, matrices1_const, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
int info;
int *info_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int)));
int *ipiv_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int)));
// Run LU
THCudaBlas_getrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getrf : Argument %d : illegal value", -info);
// Inverse
THCudaBlas_getri(state, n, d_matrices1_const, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
if (info > 0)
THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getri : Argument %d : illegal value", -info);
THCudaCheck(THCudaFree(state, ipiv_gpu));
THCudaCheck(THCudaFree(state, info_gpu));
THCudaTensor_freeCopyTo(state, output, input);
#endif
}
__global__ void THCudaTensor_copyUpperSymmetric(float *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
void THCudaTensor_potri(THCState *state, THCudaTensor *ra_, THCudaTensor *a)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a);
float *input_data = THCudaTensor_data(state, input);
int info;
magma_spotrf_gpu(MagmaUpper, n, input_data, n, &info);
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
magma_spotri_gpu(MagmaUpper, n, input_data, n, &info);
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
hipStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(::min(DIVUP(len, 128), 65535));
dim3 threads(128);
hipLaunchKernelGGL(( THCudaTensor_copyUpperSymmetric), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
THCudaTensor_freeCopyTo(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
void THCudaTensor_potrf(THCState *state, THCudaTensor *ra_, THCudaTensor *a)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a);
float *input_data = THCudaTensor_data(state, input);
int info;
magma_spotrf_gpu(MagmaUpper, n, input_data, n, &info);
// check error value
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
THCudaTensor_triu(state, ra_, input, 0);
THCudaTensor_free(state, input);
#else
THError(NoMagma(potrf));
#endif
}
void THCudaTensor_qr(THCState *state, THCudaTensor *rq_, THCudaTensor *rr_, THCudaTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
THCudaTensor *a = THCudaTensor_newColumnMajor(state, rr_, a_);
int m = a->size[0];
int n = a->size[1];
int k = (m < n ? m : n);
int nb = magma_get_sgeqrf_nb(m);
float *a_data = THCudaTensor_data(state, a);
float *tau_data = th_magma_smalloc_pinned(n*n);
THCudaTensor *work = THCudaTensor_newWithSize1d(state, (2*k + ((n+31)/32)*32)*nb);
float *work_data = THCudaTensor_data(state, work);
int info;
magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
if (info != 0)
THError("MAGMA geqrf : Argument %d : illegal value.", -info);
THCudaTensor *q = THCudaTensor_newColumnMajor(state, rq_, a);
float *q_data = THCudaTensor_data(state, q);
THCudaTensor_narrow(state, a, a, 0, 0, k);
THCudaTensor_triu(state, rr_, a, 0);
THCudaTensor_free(state, a);
magma_sorgqr_gpu(m, n, k, q_data, m, tau_data, work_data, nb, &info);
if (info != 0)
THError("MAGMA orgqr : Argument %d : illegal value.", -info);
THCudaTensor_free(state, work);
magma_free_pinned(tau_data);
THCudaTensor_narrow(state, q, q, 1, 0, k);
THCudaTensor_freeCopyTo(state, q, rq_);
#else
THError(NoMagma(qr));
#endif
}
| 03d2462b75dec98217f84ad7536c9e4fe543c631.cu | #include "THCGeneral.h"
#include "THCTensorMath.h"
#include "THCTensorCopy.h"
#include <algorithm>
#ifdef USE_MAGMA
#include <magma.h>
#else
#include "THCBlas.h"
#endif
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
#define NoMagma(name) "No CUDA implementation of '" #name "'. Install MAGMA and rebuild cutorch (http://icl.cs.utk.edu/magma/)"
void THCMagma_init(THCState *state)
{
#ifdef USE_MAGMA
magma_init();
#endif
}
#ifdef USE_MAGMA
static inline float* th_magma_smalloc_pinned(size_t n)
{
float* ptr;
if (MAGMA_SUCCESS != magma_smalloc_pinned(&ptr, n))
THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", n/268435456);
return ptr;
}
static inline int* th_magma_imalloc_pinned(size_t n)
{
int* ptr;
if (MAGMA_SUCCESS != magma_imalloc_pinned(&ptr, n))
THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", n/268435456);
return ptr;
}
static void THCudaTensor_copyArray1d(THCState *state, THCudaTensor *self, float *src, int k)
{
long size[1] = { k };
long stride[1] = { 1 };
THCudaTensor_rawResize(state, self, 1, size, stride);
size_t len = k * sizeof(float);
THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice));
}
static void THCudaTensor_copyArray2d(THCState *state, THCudaTensor *self, float *src, int m, int n)
{
long size[2] = { m, n };
long stride[2] = { 1, m };
THCudaTensor_rawResize(state, self, 2, size, stride);
size_t len = m * n * sizeof(float);
THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice));
}
static void THCudaTensor_copyTensor2d(THCState *state, float *dst, THCudaTensor *self)
{
THAssert(self->nDimension == 2);
size_t len = THCudaTensor_nElement(state, self)*sizeof(float);
THCudaTensor *temp = THCudaTensor_newTranspose(state, self, 0, 1);
THCudaTensor *selfc = THCudaTensor_newContiguous(state, temp);
THCudaCheck(cudaMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, cudaMemcpyDeviceToHost));
THCudaTensor_free(state, temp);
THCudaTensor_free(state, selfc);
}
#endif
static THCudaTensor* THCudaTensor_newColumnMajor(THCState *state, THCudaTensor *self, THCudaTensor *src)
{
THAssert(src->nDimension == 2);
if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0])
{
THCudaTensor_retain(state, self);
return self;
}
if (self == src)
self = THCudaTensor_new(state);
else
THCudaTensor_retain(state, self);
long size[2] = { src->size[0], src->size[1] };
long stride[2] = { 1, src->size[0] };
THCudaTensor_rawResize(state, self, 2, size, stride);
THCudaTensor_copy(state, self, src);
return self;
}
void THCudaTensor_gesv(THCState *state, THCudaTensor *rb_, THCudaTensor *ra_, THCudaTensor *b_, THCudaTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square");
THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible");
int n = a_->size[0];
int nrhs = b_->size[1];
THCudaTensor *a = THCudaTensor_newColumnMajor(state, ra_, a_);
THCudaTensor *b = THCudaTensor_newColumnMajor(state, rb_, b_);
float *a_data = THCudaTensor_data(state, a);
float *b_data = THCudaTensor_data(state, b);
int *ipiv = th_magma_imalloc_pinned(n);
int info;
magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
if (info < 0)
THError("MAGMA gesv : Argument %d : illegal value", -info);
else if (info > 0)
THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info);
magma_free_pinned(ipiv);
THCudaTensor_freeCopyTo(state, a, ra_);
THCudaTensor_freeCopyTo(state, b, rb_);
#else
THError(NoMagma(gesv));
#endif
}
void THCudaTensor_gels(THCState *state, THCudaTensor *rb_, THCudaTensor *ra_, THCudaTensor *b_, THCudaTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional");
THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b");
THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n");
THCudaTensor *a = THCudaTensor_newColumnMajor(state, ra_, a_);
THCudaTensor *b = THCudaTensor_newColumnMajor(state, rb_, b_);
float *a_data = THCudaTensor_data(state, a);
float *b_data = THCudaTensor_data(state, b);
int m = a->size[0];
int n = a->size[1];
int nrhs = b->size[1];
float wkopt;
int info;
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
float *hwork = th_magma_smalloc_pinned((size_t)wkopt);
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCudaTensor_freeCopyTo(state, a, ra_);
THCudaTensor_freeCopyTo(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
void THCudaTensor_syev(THCState *state, THCudaTensor *re_, THCudaTensor *rv_, THCudaTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int n = a->size[0];
int lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCudaTensor *input = THCudaTensor_newColumnMajor(state, rv_, a);
float *input_data = THCudaTensor_data(state, input);
// eigen values and workspace
float *w = th_magma_smalloc_pinned(n);
float *wA = th_magma_smalloc_pinned(lda);
// compute optimal size of work array
int info;
float lwork;
int liwork;
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
float *work = th_magma_smalloc_pinned((size_t)lwork);
int *iwork = th_magma_imalloc_pinned(liwork);
// compute eigenvalues and, optionally, eigenvectors
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
// copy eigen values from w to re_
if (info == 0)
THCudaTensor_copyArray1d(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
THCudaTensor_freeCopyTo(state, input, rv_);
#else
THError(NoMagma(syev));
#endif
}
void THCudaTensor_geev(THCState *state, THCudaTensor *re_, THCudaTensor *rv_, THCudaTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int n = a_->size[0];
float *a_data = th_magma_smalloc_pinned(n * n);
THCudaTensor_copyTensor2d(state, a_data, a_);
float *wr = th_magma_smalloc_pinned(n);
float *wi = th_magma_smalloc_pinned(n);
float *vr_data = NULL;
int ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_smalloc_pinned(n * n);
ldvr = n;
}
float wkopt;
int info;
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
int lwork = (int) wkopt;
float *work_data = th_magma_smalloc_pinned(lwork);
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
{
THCudaTensor_resize2d(state, re_, 2, n);
THCudaTensor *re = THCudaTensor_newContiguous(state, re_);
THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(float), cudaMemcpyHostToDevice));
THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(float), cudaMemcpyHostToDevice));
THCudaTensor_freeCopyTo(state, re, re_);
THCudaTensor_transpose(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCudaTensor_copyArray2d(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
void THCudaTensor_gesvd(THCState *state, THCudaTensor *ru_, THCudaTensor *rs_, THCudaTensor *rv_, THCudaTensor *a, const char *jobu)
{
#ifdef USE_MAGMA
THCudaTensor *ra_ = THCudaTensor_new(state);
THCudaTensor_gesvd2(state, ru_, rs_, rv_, ra_, a, jobu);
THCudaTensor_free(state, ra_);
#else
THError(NoMagma(gesvd));
#endif
}
void THCudaTensor_gesvd2(THCState *state, THCudaTensor *ru_, THCudaTensor *rs_, THCudaTensor *rv_, THCudaTensor *ra_, THCudaTensor *a, const char *jobus)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
magma_vec_t jobu = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec;
magma_vec_t jobvt = jobu;
int m = a->size[0];
int n = a->size[1];
int k = m < n ? m : n;
int j = (jobu == MagmaAllVec) ? m : k;
float *a_data = th_magma_smalloc_pinned(m * n);
THCudaTensor_copyTensor2d(state, a_data, a);
float *rs_data = th_magma_smalloc_pinned(k);
float *ru_data = th_magma_smalloc_pinned(m * j);
float *rv_data = th_magma_smalloc_pinned(n * n);
float wkopt;
int info;
magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, &info);
int lwork = (int) wkopt;
float *work_data = th_magma_smalloc_pinned(lwork);
magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, &info);
if (info > 0)
THError("MAGMA gesvd : %d superdiagonals failed to converge", info);
else if (info < 0)
THError("MAGMA gesvd : Argument %d : illegal value", -info);
THCudaTensor_copyArray2d(state, rv_, rv_data, n, n);
THCudaTensor_transpose(state, rv_, NULL, 0, 1);
THCudaTensor_copyArray2d(state, ru_, ru_data, m, j);
THCudaTensor_copyArray1d(state, rs_, rs_data, k);
THCudaTensor_copyArray2d(state, ra_, a_data, m, n);
magma_free_pinned(work_data);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesvd2));
#endif
}
void THCudaTensor_getri(THCState *state, THCudaTensor *ra_, THCudaTensor *a)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int info;
int n = a->size[0];
int lwork = n * magma_get_sgetri_nb(n);
THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a);
float *input_data = THCudaTensor_data(state, input);
int *ipiv = th_magma_imalloc_pinned(n);
THCudaTensor *work = THCudaTensor_newWithSize1d(state, lwork);
float *work_data = THCudaTensor_data(state, work);
// Run LU
magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info);
if (info > 0)
THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getrf : Argument %d : illegal value", -info);
// Inverse
magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
if (info > 0)
THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getri : Argument %d : illegal value", -info);
THCudaTensor_free(state, work);
magma_free_pinned(ipiv);
THCudaTensor_freeCopyTo(state, input, ra_);
#else
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
// input
THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a);
// output
THCudaTensor *output = THCudaTensor_newColumnMajor(state, ra_, a);
size_t matrices_size = sizeof(float*);
float **matrices1 = (float **)THAlloc(matrices_size);
const float **matrices1_const = (const float **)THAlloc(matrices_size);
float **matrices2 = (float **)THAlloc(matrices_size);
matrices1[0] = THCudaTensor_data(state, input);
matrices1_const[0] = THCudaTensor_data(state, input);
matrices2[0] = THCudaTensor_data(state, output);
// Copy pointers to device.
float **d_matrices1, **d_matrices2;
const float **d_matrices1_const;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1_const, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size));
THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(cudaMemcpyAsync(d_matrices1_const, matrices1_const, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
int info;
int *info_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int)));
int *ipiv_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int)));
// Run LU
THCudaBlas_getrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getrf : Argument %d : illegal value", -info);
// Inverse
THCudaBlas_getri(state, n, d_matrices1_const, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
if (info > 0)
THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getri : Argument %d : illegal value", -info);
THCudaCheck(THCudaFree(state, ipiv_gpu));
THCudaCheck(THCudaFree(state, info_gpu));
THCudaTensor_freeCopyTo(state, output, input);
#endif
}
__global__ void THCudaTensor_copyUpperSymmetric(float *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
void THCudaTensor_potri(THCState *state, THCudaTensor *ra_, THCudaTensor *a)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a);
float *input_data = THCudaTensor_data(state, input);
int info;
magma_spotrf_gpu(MagmaUpper, n, input_data, n, &info);
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
magma_spotri_gpu(MagmaUpper, n, input_data, n, &info);
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
cudaStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(std::min(DIVUP(len, 128), 65535));
dim3 threads(128);
THCudaTensor_copyUpperSymmetric<<<blocks, threads, 0, stream>>>(input_data, n, len);
THCudaTensor_freeCopyTo(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
void THCudaTensor_potrf(THCState *state, THCudaTensor *ra_, THCudaTensor *a)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int n = a->size[0];
THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a);
float *input_data = THCudaTensor_data(state, input);
int info;
magma_spotrf_gpu(MagmaUpper, n, input_data, n, &info);
// check error value
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
THCudaTensor_triu(state, ra_, input, 0);
THCudaTensor_free(state, input);
#else
THError(NoMagma(potrf));
#endif
}
void THCudaTensor_qr(THCState *state, THCudaTensor *rq_, THCudaTensor *rr_, THCudaTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
THCudaTensor *a = THCudaTensor_newColumnMajor(state, rr_, a_);
int m = a->size[0];
int n = a->size[1];
int k = (m < n ? m : n);
int nb = magma_get_sgeqrf_nb(m);
float *a_data = THCudaTensor_data(state, a);
float *tau_data = th_magma_smalloc_pinned(n*n);
THCudaTensor *work = THCudaTensor_newWithSize1d(state, (2*k + ((n+31)/32)*32)*nb);
float *work_data = THCudaTensor_data(state, work);
int info;
magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
if (info != 0)
THError("MAGMA geqrf : Argument %d : illegal value.", -info);
THCudaTensor *q = THCudaTensor_newColumnMajor(state, rq_, a);
float *q_data = THCudaTensor_data(state, q);
THCudaTensor_narrow(state, a, a, 0, 0, k);
THCudaTensor_triu(state, rr_, a, 0);
THCudaTensor_free(state, a);
magma_sorgqr_gpu(m, n, k, q_data, m, tau_data, work_data, nb, &info);
if (info != 0)
THError("MAGMA orgqr : Argument %d : illegal value.", -info);
THCudaTensor_free(state, work);
magma_free_pinned(tau_data);
THCudaTensor_narrow(state, q, q, 1, 0, k);
THCudaTensor_freeCopyTo(state, q, rq_);
#else
THError(NoMagma(qr));
#endif
}
|
e51a78102cd571640480f7e4b14b6da12632e222.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <algorithm>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
void sortByThrust(const uint32_t * in, int n,
uint32_t * out)
{
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int blockSize)
{
int nBins = 1 << nBits; // s bin
int m=(n - 1) / blockSize + 1;// gridSize
// cp pht
// local hist
int **localHist = (int **)malloc(m * sizeof(int *));
for (int i=0; i<m; i++)
{
localHist[i] = (int *)malloc(nBins * sizeof(int));
}
// scan
int **scan = (int **)malloc(m * sizeof(int *));
for (int i=0; i<m; i++)
{
scan[i] = (int *)malloc(nBins * sizeof(int));
}
// ch s bt u
int **start = (int **)malloc(m * sizeof(int *));
for (int i=0; i<m; i++)
{
start[i] = (int *)malloc(nBins * sizeof(int));
}
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
GpuTimer timerTmp1,timerTmp2,timerTmp3,timerTmp4,timerTmp5;
float time1,time2,time3,time4,time5;
time1=time2=time3=time4=time5=0;
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
timerTmp1.Start();
// gn localHist=0
for (int i=0; i<m; i++)
{
memset(localHist[i], 0, nBins * sizeof(int));
}
// Tnh localHist
for(int blockIdx=0;blockIdx<m;blockIdx++)
{
for(int threadIdx=0;threadIdx<blockSize;threadIdx++)
{
int i=blockSize*blockIdx+threadIdx;
if(i<n)
{
int bin = (src[i] >> bit) & (nBins - 1);
localHist[blockIdx][bin]++;
}
}
}
timerTmp1.Stop();
time1 = time1 + timerTmp1.Elapsed();
timerTmp2.Start();
// cp pht scan=0
for (int i=0; i<m; i++)
{
memset(scan[i], 0, nBins * sizeof(int));
}
// tnh exclusive scan
for(int bin=0; bin<nBins;bin++)
{
for (int blockIdx=0;blockIdx<m;blockIdx++)
{
if(blockIdx==0&&bin==0)
{
scan[blockIdx][bin]=0;
}
else
{
if (blockIdx==0)
{
scan[blockIdx][bin]=scan[m-1][bin-1]+localHist[m-1][bin-1];
}
else
{
scan[blockIdx][bin]=scan[blockIdx-1][bin]+localHist[blockIdx-1][bin];
}
}
}
}
timerTmp2.Stop();
time2 = time2 + timerTmp2.Elapsed();
timerTmp3.Start();
// sp xp cc b
for(int blockIdx=0;blockIdx<m;blockIdx++)
{
for(int index=0;index<blockSize-1;index++)
{
for(int threadIdx=0;threadIdx<blockSize-1-index;threadIdx++)
{
int i=blockIdx*blockSize+threadIdx;
if(i+1<n)
{
if(((src[i] >> bit) & (nBins - 1))>((src[i+1] >> bit) & (nBins - 1)))
{
uint32_t temp=src[i];
src[i]=src[i+1];
src[i+1]=temp;
}
}
}
}
}
timerTmp3.Stop();
time3 = time3 + timerTmp3.Elapsed();
timerTmp4.Start();
// cp pht start=-1
for (int i=0; i<m; i++)
{
memset(start[i], -1, nBins * sizeof(int));
}
// tnh ch s bt u
for(int blockIdx=0;blockIdx<m;blockIdx++)
{
for(int threadIdx=0;threadIdx<blockSize;threadIdx++)
{
int i=blockIdx*blockSize+threadIdx;
if (i<n)
{
if(threadIdx==0)
{
start[blockIdx][((src[i] >> bit) & (nBins - 1))]=threadIdx;
}
else
{
if(((src[i] >> bit) & (nBins - 1))!=((src[i-1] >> bit) & (nBins - 1)))
{
start[blockIdx][((src[i] >> bit) & (nBins - 1))]=threadIdx;
}
}
}
}
}
timerTmp4.Stop();
time4 = time4 + timerTmp4.Elapsed();
timerTmp5.Start();
//scatter
for(int blockIdx=0;blockIdx<m;blockIdx++)
{
for(int threadIdx=0;threadIdx<blockSize;threadIdx++)
{
int i=blockIdx*blockSize+threadIdx;
if(i<n)
{
int bin = (src[i] >> bit) & (nBins - 1);
int rank=scan[blockIdx][bin]+threadIdx-start[blockIdx][bin];
dst[rank]=src[i];
}
}
}
timerTmp5.Stop();
time5 = time5 + timerTmp5.Elapsed();
uint32_t * temp = src;
src = dst;
dst = temp;
}
printf("Time (local hist): %.3f ms\n", time1);
printf("Time (exclusive scan): %.3f ms\n", time2);
printf("Time (local sort): %.3f ms\n", time3);
printf("Time (start value): %.3f ms\n", time4);
printf("Time (scatter): %.3f ms\n", time5);
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
for (int i=0; i<m; i++)
{
free(localHist[i]);
}
free(localHist);
for (int i=0; i<m; i++)
{
free(scan[i]);
}
free(scan);
for (int i=0; i<m; i++)
{
free(start[i]);
}
free(start);
free(originalSrc);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int blockSizes=512)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort by Thrust\n");
sortByThrust(in, n, out);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
n = 1000000;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
//printArray(in, n);
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes=512; // One for histogram, one for scan
if (argc == 3)
{
blockSizes = atoi(argv[2]);
}
printf("\nBlock size: %d", blockSizes);
// SORT BY HOST
sort(in, n, correctOut, nBits);
// printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
// printArray(out,n);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
| e51a78102cd571640480f7e4b14b6da12632e222.cu | #include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <algorithm>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
void sortByThrust(const uint32_t * in, int n,
uint32_t * out)
{
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int blockSize)
{
int nBins = 1 << nBits; // số bin
int m=(n - 1) / blockSize + 1;// gridSize
// cấp phát
// local hist
int **localHist = (int **)malloc(m * sizeof(int *));
for (int i=0; i<m; i++)
{
localHist[i] = (int *)malloc(nBins * sizeof(int));
}
// scan
int **scan = (int **)malloc(m * sizeof(int *));
for (int i=0; i<m; i++)
{
scan[i] = (int *)malloc(nBins * sizeof(int));
}
// chỉ số bắt đầu
int **start = (int **)malloc(m * sizeof(int *));
for (int i=0; i<m; i++)
{
start[i] = (int *)malloc(nBins * sizeof(int));
}
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
GpuTimer timerTmp1,timerTmp2,timerTmp3,timerTmp4,timerTmp5;
float time1,time2,time3,time4,time5;
time1=time2=time3=time4=time5=0;
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
timerTmp1.Start();
// gán localHist=0
for (int i=0; i<m; i++)
{
memset(localHist[i], 0, nBins * sizeof(int));
}
// Tính localHist
for(int blockIdx=0;blockIdx<m;blockIdx++)
{
for(int threadIdx=0;threadIdx<blockSize;threadIdx++)
{
int i=blockSize*blockIdx+threadIdx;
if(i<n)
{
int bin = (src[i] >> bit) & (nBins - 1);
localHist[blockIdx][bin]++;
}
}
}
timerTmp1.Stop();
time1 = time1 + timerTmp1.Elapsed();
timerTmp2.Start();
// cấp phát scan=0
for (int i=0; i<m; i++)
{
memset(scan[i], 0, nBins * sizeof(int));
}
// tính exclusive scan
for(int bin=0; bin<nBins;bin++)
{
for (int blockIdx=0;blockIdx<m;blockIdx++)
{
if(blockIdx==0&&bin==0)
{
scan[blockIdx][bin]=0;
}
else
{
if (blockIdx==0)
{
scan[blockIdx][bin]=scan[m-1][bin-1]+localHist[m-1][bin-1];
}
else
{
scan[blockIdx][bin]=scan[blockIdx-1][bin]+localHist[blockIdx-1][bin];
}
}
}
}
timerTmp2.Stop();
time2 = time2 + timerTmp2.Elapsed();
timerTmp3.Start();
// sắp xếp cục bộ
for(int blockIdx=0;blockIdx<m;blockIdx++)
{
for(int index=0;index<blockSize-1;index++)
{
for(int threadIdx=0;threadIdx<blockSize-1-index;threadIdx++)
{
int i=blockIdx*blockSize+threadIdx;
if(i+1<n)
{
if(((src[i] >> bit) & (nBins - 1))>((src[i+1] >> bit) & (nBins - 1)))
{
uint32_t temp=src[i];
src[i]=src[i+1];
src[i+1]=temp;
}
}
}
}
}
timerTmp3.Stop();
time3 = time3 + timerTmp3.Elapsed();
timerTmp4.Start();
// cấp phát start=-1
for (int i=0; i<m; i++)
{
memset(start[i], -1, nBins * sizeof(int));
}
// tính chỉ số bắt đầu
for(int blockIdx=0;blockIdx<m;blockIdx++)
{
for(int threadIdx=0;threadIdx<blockSize;threadIdx++)
{
int i=blockIdx*blockSize+threadIdx;
if (i<n)
{
if(threadIdx==0)
{
start[blockIdx][((src[i] >> bit) & (nBins - 1))]=threadIdx;
}
else
{
if(((src[i] >> bit) & (nBins - 1))!=((src[i-1] >> bit) & (nBins - 1)))
{
start[blockIdx][((src[i] >> bit) & (nBins - 1))]=threadIdx;
}
}
}
}
}
timerTmp4.Stop();
time4 = time4 + timerTmp4.Elapsed();
timerTmp5.Start();
//scatter
for(int blockIdx=0;blockIdx<m;blockIdx++)
{
for(int threadIdx=0;threadIdx<blockSize;threadIdx++)
{
int i=blockIdx*blockSize+threadIdx;
if(i<n)
{
int bin = (src[i] >> bit) & (nBins - 1);
int rank=scan[blockIdx][bin]+threadIdx-start[blockIdx][bin];
dst[rank]=src[i];
}
}
}
timerTmp5.Stop();
time5 = time5 + timerTmp5.Elapsed();
uint32_t * temp = src;
src = dst;
dst = temp;
}
printf("Time (local hist): %.3f ms\n", time1);
printf("Time (exclusive scan): %.3f ms\n", time2);
printf("Time (local sort): %.3f ms\n", time3);
printf("Time (start value): %.3f ms\n", time4);
printf("Time (scatter): %.3f ms\n", time5);
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
for (int i=0; i<m; i++)
{
free(localHist[i]);
}
free(localHist);
for (int i=0; i<m; i++)
{
free(scan[i]);
}
free(scan);
for (int i=0; i<m; i++)
{
free(start[i]);
}
free(start);
free(originalSrc);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int blockSizes=512)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort by Thrust\n");
sortByThrust(in, n, out);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
n = 1000000;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
//printArray(in, n);
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes=512; // One for histogram, one for scan
if (argc == 3)
{
blockSizes = atoi(argv[2]);
}
printf("\nBlock size: %d", blockSizes);
// SORT BY HOST
sort(in, n, correctOut, nBits);
// printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
// printArray(out,n);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
b481e21150b15df86a1e8fa7d6346a297bdf1e28.hip | // !!! This is a file automatically generated by hipify!!!
/*
* A program that compare performance of gfft and cuFFT library
* Test the speed and accuracy of FP16 and FP32 calculation
*/
// C library, CUDA runtime, helpers, and utilities
#include "../util/my_include.h"
#include <vector>
// gfft
#include "../util/gfft_using_fft4.h"
// CUFFT
#include <hipfft.h>
#include <hipfftXt.h>
typedef half2 Chalf;
typedef float2 Csingle;
const float NORM = 1.0f;
const int BATCH = 16;
const int SIZE = 256;
const int DISPLAY_DATA = 0;
int cuFFT16(int N, int B, Chalf* X, Chalf* FX){
// Allocate unified momory for input and output
int mem_size = N * B *sizeof(Chalf);
Chalf *d_idata, *d_odata;
checkCudaErrors(hipMallocManaged((void **) &d_idata, mem_size));
checkCudaErrors(hipMallocManaged((void **) &d_odata, mem_size));
// Copy input data to memory
checkCudaErrors(hipMemcpy(d_idata, X, mem_size, hipMemcpyHostToDevice));
// cuFFT plan
hipfftResult result;
hipfftHandle plan;
size_t workSize;
long long int input_size_long = N;
result = hipfftCreate(&plan);
if (result != HIPFFT_SUCCESS)
{
printf("hipfftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \
HIP_C_16F, NULL, 1, 1, HIP_C_16F, B, \
&workSize, HIP_C_16F);
if (result != HIPFFT_SUCCESS)
{
printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// cuFFT execution
result = cufftXtExec(plan, reinterpret_cast<hipfftComplex *>(d_idata), \
reinterpret_cast<hipfftComplex *>(d_odata), \
HIPFFT_FORWARD);
if (result != HIPFFT_SUCCESS)
{
printf("hipfftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// Copy Device memory to output
checkCudaErrors(hipMemcpy(FX, d_odata, mem_size, hipMemcpyDeviceToHost));
// Clean up content and memory
hipfftDestroy(plan);
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
return 0;
}
int main(int argc, char **argv)
{
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?") ||
checkCmdLineFlag(argc, (const char **)argv, "h")) {
printf("Usage: -norm=upper_bound (Max norm of input elements)\n"
" -n=size (Input vector size)\n"
" -batch=batch_size (Number of input vectors)\n"
" -display=show_result (0 or 1) \n");
exit(EXIT_SUCCESS);
}
// Get and set parameter
//// Norm
float norm = NORM;
if (checkCmdLineFlag(argc, (const char **)argv, "norm")) {
norm = getCmdLineArgumentInt(argc, (const char **)argv, "norm");
}
//// Input size
int n = SIZE;
if (checkCmdLineFlag(argc, (const char **)argv, "n")) {
n = getCmdLineArgumentInt(argc, (const char **)argv, "n");
}
//// Batch size
int batch = BATCH;
if (checkCmdLineFlag(argc, (const char **)argv, "batch")) {
batch = getCmdLineArgumentInt(argc, (const char **)argv, "batch");
}
//// Result display mode
int display = DISPLAY_DATA;
if (checkCmdLineFlag(argc, (const char **)argv, "display")) {
display = getCmdLineArgumentInt(argc, (const char **)argv, "display");
}
// Start program
printf("Problem size = %d, batch size = %d, norm = %f\n", n, batch, norm);
printf("[Testing of cuFFT FP16] - Starting...\n");
// Define input and output
Chalf X_16[n * batch], FX_16[n * batch];
// Run experiment
for (int i = 0; i < 1; i++){
// Initialize input
srand(time(NULL));
for (int j = 0; j < n * batch; j++){
X_16[j].x = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm;
X_16[j].y = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm;
if (display == 1){
printf("X[%d] = (%.10f, %.10f) \n", j, (float)X_16[j].x, (float)X_16[j].y);
}
}
// Call cuFFT32
cuFFT16(n, batch, X_16, FX_16);
}
exit(0);
}
| b481e21150b15df86a1e8fa7d6346a297bdf1e28.cu | /*
* A program that compare performance of gfft and cuFFT library
* Test the speed and accuracy of FP16 and FP32 calculation
*/
// C library, CUDA runtime, helpers, and utilities
#include "../util/my_include.h"
#include <vector>
// gfft
#include "../util/gfft_using_fft4.h"
// CUFFT
#include <cufft.h>
#include <cufftXt.h>
typedef half2 Chalf;
typedef float2 Csingle;
const float NORM = 1.0f;
const int BATCH = 16;
const int SIZE = 256;
const int DISPLAY_DATA = 0;
int cuFFT16(int N, int B, Chalf* X, Chalf* FX){
// Allocate unified momory for input and output
int mem_size = N * B *sizeof(Chalf);
Chalf *d_idata, *d_odata;
checkCudaErrors(cudaMallocManaged((void **) &d_idata, mem_size));
checkCudaErrors(cudaMallocManaged((void **) &d_odata, mem_size));
// Copy input data to memory
checkCudaErrors(cudaMemcpy(d_idata, X, mem_size, cudaMemcpyHostToDevice));
// cuFFT plan
cufftResult result;
cufftHandle plan;
size_t workSize;
long long int input_size_long = N;
result = cufftCreate(&plan);
if (result != CUFFT_SUCCESS)
{
printf("cufftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \
CUDA_C_16F, NULL, 1, 1, CUDA_C_16F, B, \
&workSize, CUDA_C_16F);
if (result != CUFFT_SUCCESS)
{
printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// cuFFT execution
result = cufftXtExec(plan, reinterpret_cast<cufftComplex *>(d_idata), \
reinterpret_cast<cufftComplex *>(d_odata), \
CUFFT_FORWARD);
if (result != CUFFT_SUCCESS)
{
printf("cufftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// Copy Device memory to output
checkCudaErrors(cudaMemcpy(FX, d_odata, mem_size, cudaMemcpyDeviceToHost));
// Clean up content and memory
cufftDestroy(plan);
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
return 0;
}
int main(int argc, char **argv)
{
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?") ||
checkCmdLineFlag(argc, (const char **)argv, "h")) {
printf("Usage: -norm=upper_bound (Max norm of input elements)\n"
" -n=size (Input vector size)\n"
" -batch=batch_size (Number of input vectors)\n"
" -display=show_result (0 or 1) \n");
exit(EXIT_SUCCESS);
}
// Get and set parameter
//// Norm
float norm = NORM;
if (checkCmdLineFlag(argc, (const char **)argv, "norm")) {
norm = getCmdLineArgumentInt(argc, (const char **)argv, "norm");
}
//// Input size
int n = SIZE;
if (checkCmdLineFlag(argc, (const char **)argv, "n")) {
n = getCmdLineArgumentInt(argc, (const char **)argv, "n");
}
//// Batch size
int batch = BATCH;
if (checkCmdLineFlag(argc, (const char **)argv, "batch")) {
batch = getCmdLineArgumentInt(argc, (const char **)argv, "batch");
}
//// Result display mode
int display = DISPLAY_DATA;
if (checkCmdLineFlag(argc, (const char **)argv, "display")) {
display = getCmdLineArgumentInt(argc, (const char **)argv, "display");
}
// Start program
printf("Problem size = %d, batch size = %d, norm = %f\n", n, batch, norm);
printf("[Testing of cuFFT FP16] - Starting...\n");
// Define input and output
Chalf X_16[n * batch], FX_16[n * batch];
// Run experiment
for (int i = 0; i < 1; i++){
// Initialize input
srand(time(NULL));
for (int j = 0; j < n * batch; j++){
X_16[j].x = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm;
X_16[j].y = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm;
if (display == 1){
printf("X[%d] = (%.10f, %.10f) \n", j, (float)X_16[j].x, (float)X_16[j].y);
}
}
// Call cuFFT32
cuFFT16(n, batch, X_16, FX_16);
}
exit(0);
}
|
68ecaf7882d52a82692e1a554e9a0fb46bcc8c9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "CTrame.h"
#include "../../custom/custom_cuda.h"
CTrame::CTrame(int width, int height){
_width = width;
_height = height;
_frame = 1;
t_in_bits = new int [ nb_info() ];
t_coded_bits = new int [ nb_data() ];
CUDA_MALLOC_HOST(&t_noise_data, nb_data() + 1);
// t_noise_data = new float[ nb_data() + 1 ];
t_fpoint_data = new int [ nb_data() ];
t_decode_data = new int [ nb_data() ];
t_decode_bits = new int [ nb_info() ];
}
CTrame::CTrame(int width, int height, int frame){
_width = width;
_height = height;
_frame = frame;
t_in_bits = new int [ nb_info() * frame ];
t_coded_bits = new int [ nb_data() * frame ];
CUDA_MALLOC_HOST(&t_noise_data, nb_data() * frame + 4);
// t_noise_data = new float[ nb_data() ];
t_fpoint_data = new int [ nb_data() * frame ];
t_decode_data = new int [ nb_data() * frame ];
t_decode_bits = new int [ nb_info() * frame ];
}
CTrame::~CTrame(){
delete t_in_bits;
delete t_coded_bits;
// delete t_noise_data;
hipHostFree(t_noise_data);
delete t_fpoint_data;
delete t_decode_data;
delete t_decode_bits;
}
int CTrame::nb_info(){
return /*nb_frames() * */(nb_data()-nb_checks());
}
int CTrame::nb_frames(){
return _frame;
}
int CTrame::nb_checks(){
return _height;
}
int CTrame::nb_data(){
return _width;
}
int* CTrame::get_t_in_bits(){
return t_in_bits;
}
int* CTrame::get_t_coded_bits(){
return t_coded_bits;
}
double* CTrame::get_t_noise_data(){
return t_noise_data;
}
int* CTrame::get_t_fpoint_data(){
return t_fpoint_data;
}
int* CTrame::get_t_decode_data(){
return t_decode_data;
}
int* CTrame::get_t_decode_bits(){
return t_decode_bits;
}
| 68ecaf7882d52a82692e1a554e9a0fb46bcc8c9d.cu | #include "CTrame.h"
#include "../../custom/custom_cuda.h"
CTrame::CTrame(int width, int height){
_width = width;
_height = height;
_frame = 1;
t_in_bits = new int [ nb_info() ];
t_coded_bits = new int [ nb_data() ];
CUDA_MALLOC_HOST(&t_noise_data, nb_data() + 1);
// t_noise_data = new float[ nb_data() + 1 ];
t_fpoint_data = new int [ nb_data() ];
t_decode_data = new int [ nb_data() ];
t_decode_bits = new int [ nb_info() ];
}
CTrame::CTrame(int width, int height, int frame){
_width = width;
_height = height;
_frame = frame;
t_in_bits = new int [ nb_info() * frame ];
t_coded_bits = new int [ nb_data() * frame ];
CUDA_MALLOC_HOST(&t_noise_data, nb_data() * frame + 4);
// t_noise_data = new float[ nb_data() ];
t_fpoint_data = new int [ nb_data() * frame ];
t_decode_data = new int [ nb_data() * frame ];
t_decode_bits = new int [ nb_info() * frame ];
}
CTrame::~CTrame(){
delete t_in_bits;
delete t_coded_bits;
// delete t_noise_data;
cudaFreeHost(t_noise_data);
delete t_fpoint_data;
delete t_decode_data;
delete t_decode_bits;
}
int CTrame::nb_info(){
return /*nb_frames() * */(nb_data()-nb_checks());
}
int CTrame::nb_frames(){
return _frame;
}
int CTrame::nb_checks(){
return _height;
}
int CTrame::nb_data(){
return _width;
}
int* CTrame::get_t_in_bits(){
return t_in_bits;
}
int* CTrame::get_t_coded_bits(){
return t_coded_bits;
}
double* CTrame::get_t_noise_data(){
return t_noise_data;
}
int* CTrame::get_t_fpoint_data(){
return t_fpoint_data;
}
int* CTrame::get_t_decode_data(){
return t_decode_data;
}
int* CTrame::get_t_decode_bits(){
return t_decode_bits;
}
|
39ffcd4957e24260324df80e41233a93ffaf95b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <stdio.h>
#define N 512
#define NELEMS (N * N)
#define TRANSP 1
#define BLOCK 32
#define CUDA_CHECK_RETURN(value) \
{ \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) \
{ \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} \
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
__global__ void tr1(float *a, float *b, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if ((i < n) && (j < n))
b[j * n + i] = a[i * n + j];
}
__global__ void tr2(float *a, float *b, int n)
{
__shared__ float smem[BLOCK][BLOCK];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
smem[threadIdx.y][threadIdx.x] = a[i + j * n];
__syncthreads();
i = threadIdx.x + blockIdx.y * blockDim.x;
j = threadIdx.y + blockIdx.x * blockDim.y;
b[i + j * n] = smem[threadIdx.x][threadIdx.y];
}
__global__ void tr3(float *a, float *b, int n)
{
__shared__ float smem[BLOCK][BLOCK + 1];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
smem[threadIdx.y][threadIdx.x] = a[i + j * n];
__syncthreads();
i = threadIdx.x + blockIdx.y * blockDim.x;
j = threadIdx.y + blockIdx.x * blockDim.y;
b[i + j * n] = smem[threadIdx.x][threadIdx.y];
}
int main()
{
size_t size = sizeof(float) * NELEMS;
double tgpu = 0, tmem = 0;
float elapsedTime = 0;
hipEvent_t start, stop;
/* Allocate vectors on host */
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
if (h_A == NULL || h_B == NULL)
{
fprintf(stderr, "Allocation error.\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < NELEMS; ++i)
{
h_A[i] = rand() / (float)RAND_MAX;
}
/* Allocate vectors on device */
float *d_A = NULL, *d_B = NULL;
tmem = -wtime();
CUDA_CHECK_RETURN(hipMalloc((void **)&d_A, size));
CUDA_CHECK_RETURN(hipMalloc((void **)&d_B, size));
/* Copy the host vectors to device */
CUDA_CHECK_RETURN(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice))
tmem += wtime();
/* Launch the kernel */
hipEventCreate(&start);
hipEventCreate(&stop);
tgpu = -wtime();
int threadsPerBlockDim = BLOCK;
dim3 blockDim(threadsPerBlockDim, threadsPerBlockDim, 1);
int blocksPerGridDimX = ceilf(N / (float)threadsPerBlockDim);
int blocksPerGridDimY = ceilf(N / (float)threadsPerBlockDim);
dim3 gridDim(blocksPerGridDimX, blocksPerGridDimY, 1);
hipEventRecord(start, 0);
// #if TRANSP == 1
hipLaunchKernelGGL(( tr1), dim3(gridDim), dim3(blockDim), 0, 0, d_A, d_B, N);
// #elif TRANSP == 2
hipLaunchKernelGGL(( tr2), dim3(gridDim), dim3(blockDim), 0, 0, d_A, d_B, N);
// #else
hipLaunchKernelGGL(( tr3), dim3(gridDim), dim3(blockDim), 0, 0, d_A, d_B, N);
// #endif
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
tgpu += wtime();
CUDA_CHECK_RETURN(hipGetLastError());
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
/* Copy the device vectors to host */
tmem -= wtime();
CUDA_CHECK_RETURN(hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost));
tmem += wtime();
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
if (fabs(h_A[i * N + j] - h_B[j * N + i]) > 1e-5)
{
fprintf(stderr,
"Result verification failed at element %d , %d! Ex: %f, Real: %f\n",
i, j, h_A[i * N + j], h_B[j * N + i]);
exit(EXIT_FAILURE);
}
}
printf("Transponse\n");
printf("GPU version (sec.): %.6lf\n", tgpu);
printf("Memory ops. (sec.): %.6lf\n", tmem);
printf("Total time (sec.): %.6lf\n", tgpu + tmem);
printf("Events Time %.6f\n", elapsedTime);
hipFree(d_A);
hipFree(d_B);
free(h_A);
free(h_B);
hipDeviceReset();
return 0;
} | 39ffcd4957e24260324df80e41233a93ffaf95b3.cu | #include <cuda.h>
#include <stdio.h>
#include <sys/time.h>
#include <stdio.h>
#define N 512
#define NELEMS (N * N)
#define TRANSP 1
#define BLOCK 32
#define CUDA_CHECK_RETURN(value) \
{ \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) \
{ \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} \
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
__global__ void tr1(float *a, float *b, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if ((i < n) && (j < n))
b[j * n + i] = a[i * n + j];
}
__global__ void tr2(float *a, float *b, int n)
{
__shared__ float smem[BLOCK][BLOCK];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
smem[threadIdx.y][threadIdx.x] = a[i + j * n];
__syncthreads();
i = threadIdx.x + blockIdx.y * blockDim.x;
j = threadIdx.y + blockIdx.x * blockDim.y;
b[i + j * n] = smem[threadIdx.x][threadIdx.y];
}
__global__ void tr3(float *a, float *b, int n)
{
__shared__ float smem[BLOCK][BLOCK + 1];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
smem[threadIdx.y][threadIdx.x] = a[i + j * n];
__syncthreads();
i = threadIdx.x + blockIdx.y * blockDim.x;
j = threadIdx.y + blockIdx.x * blockDim.y;
b[i + j * n] = smem[threadIdx.x][threadIdx.y];
}
int main()
{
size_t size = sizeof(float) * NELEMS;
double tgpu = 0, tmem = 0;
float elapsedTime = 0;
cudaEvent_t start, stop;
/* Allocate vectors on host */
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
if (h_A == NULL || h_B == NULL)
{
fprintf(stderr, "Allocation error.\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < NELEMS; ++i)
{
h_A[i] = rand() / (float)RAND_MAX;
}
/* Allocate vectors on device */
float *d_A = NULL, *d_B = NULL;
tmem = -wtime();
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_A, size));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_B, size));
/* Copy the host vectors to device */
CUDA_CHECK_RETURN(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice))
tmem += wtime();
/* Launch the kernel */
cudaEventCreate(&start);
cudaEventCreate(&stop);
tgpu = -wtime();
int threadsPerBlockDim = BLOCK;
dim3 blockDim(threadsPerBlockDim, threadsPerBlockDim, 1);
int blocksPerGridDimX = ceilf(N / (float)threadsPerBlockDim);
int blocksPerGridDimY = ceilf(N / (float)threadsPerBlockDim);
dim3 gridDim(blocksPerGridDimX, blocksPerGridDimY, 1);
cudaEventRecord(start, 0);
// #if TRANSP == 1
tr1<<<gridDim, blockDim>>>(d_A, d_B, N);
// #elif TRANSP == 2
tr2<<<gridDim, blockDim>>>(d_A, d_B, N);
// #else
tr3<<<gridDim, blockDim>>>(d_A, d_B, N);
// #endif
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
tgpu += wtime();
CUDA_CHECK_RETURN(cudaGetLastError());
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* Copy the device vectors to host */
tmem -= wtime();
CUDA_CHECK_RETURN(cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost));
tmem += wtime();
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
if (fabs(h_A[i * N + j] - h_B[j * N + i]) > 1e-5)
{
fprintf(stderr,
"Result verification failed at element %d , %d! Ex: %f, Real: %f\n",
i, j, h_A[i * N + j], h_B[j * N + i]);
exit(EXIT_FAILURE);
}
}
printf("Transponse\n");
printf("GPU version (sec.): %.6lf\n", tgpu);
printf("Memory ops. (sec.): %.6lf\n", tmem);
printf("Total time (sec.): %.6lf\n", tgpu + tmem);
printf("Events Time %.6f\n", elapsedTime);
cudaFree(d_A);
cudaFree(d_B);
free(h_A);
free(h_B);
cudaDeviceReset();
return 0;
} |
05e1fc8df5f274b3352ac6e3a2dcec96cfd9b55d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void printNumber(int number)
{
printf("%d\n", number);
}
int main()
{
hipStream_t stream[5]; // CUDA streams are of type `hipStream_t`.
for(int i = 0; i < 5; i++)
hipStreamCreate(&stream[i]);
for (int i = 0; i < 5; ++i)
{
hipLaunchKernelGGL(( printNumber), dim3(1), dim3(1), 0, stream[i], i);
}
hipDeviceSynchronize();
for (int i = 0; i < 5; ++i)
hipStreamDestroy(stream[i]);
}
| 05e1fc8df5f274b3352ac6e3a2dcec96cfd9b55d.cu | #include <stdio.h>
__global__ void printNumber(int number)
{
printf("%d\n", number);
}
int main()
{
cudaStream_t stream[5]; // CUDA streams are of type `cudaStream_t`.
for(int i = 0; i < 5; i++)
cudaStreamCreate(&stream[i]);
for (int i = 0; i < 5; ++i)
{
printNumber<<<1, 1, 0, stream[i]>>>(i);
}
cudaDeviceSynchronize();
for (int i = 0; i < 5; ++i)
cudaStreamDestroy(stream[i]);
}
|
f6f8b95c41d63a4097efd7cd607c2661e91cdc81.hip | // !!! This is a file automatically generated by hipify!!!
/****************************/
/* THIS IS OPEN SOURCE CODE */
/****************************/
/**
* @file nvml_power_limiting_test.cu
* CVS: $Id$
* @author Tony Castaldo ([email protected]) removed extraneous code and fixed a bug on multiple GPU setups. (Sept 2018).
* @author Asim YarKhan ([email protected]) HelloWorld altered to test power capping (October 2017)
* @author Heike Jagode ([email protected])
* Mods: <your name here> <your email address>
*
* @brief
* This file tests the ability to do power control using NVML.
* The papi configure and papi Makefile will take care of the
* compilation of the component tests (if all tests are added to a
* directory named 'tests' in the specific component dir). See
* components/README for more details.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "papi.h"
#include "papi_test.h"
// Host function
int main( int argc, char** argv )
{
#define NUM_EVENTS 32 /* Max number of GPUs on a node this code can handle. */
int retval, i, j, device_count;
int EventSet = PAPI_NULL;
long long values[NUM_EVENTS];
int device_id[NUM_EVENTS];
char *EventName[NUM_EVENTS];
int events[NUM_EVENTS];
int eventCount = 0;
const PAPI_component_info_t *cmpinfo;
char event_name[PAPI_MAX_STR_LEN];
/* PAPI Initialization */
retval = PAPI_library_init( PAPI_VER_CURRENT );
if( retval != PAPI_VER_CURRENT ) {
fprintf( stderr, "PAPI_library_init failed.\n" );
test_fail(__FILE__, __LINE__, "PAPI_library_init() failed.\n", retval);
}
printf( "PAPI_VERSION : %4d %6d %7d\n",
PAPI_VERSION_MAJOR( PAPI_VERSION ),
PAPI_VERSION_MINOR( PAPI_VERSION ),
PAPI_VERSION_REVISION( PAPI_VERSION ) );
int numcmp = PAPI_num_components();
// Search for the NVML component.
int cid = 0;
for (cid=0; cid<numcmp; cid++) {
cmpinfo = PAPI_get_component_info(cid);
if (cmpinfo == NULL) { // NULL?
fprintf(stderr, "PAPI error: PAPI reports %d components, but PAPI_get_component_info(%d) returns NULL pointer.\n", numcmp, cid);
test_fail( __FILE__, __LINE__,"PAPI_get_component_info failed\n",-1 );
} else {
if ( strstr( cmpinfo->name, "nvml" ) ) break; // If we found it,
}
}
if ( cid==numcmp ) { // If true we looped through all without finding nvml.
fprintf(stderr, "NVML PAPI Component was not found.\n");
test_skip( __FILE__, __LINE__,"Component nvml is not present\n",-1 );
}
printf( "NVML found as Component %d of %d: %s: %d events\n", (1+cmpinfo->CmpIdx), numcmp, cmpinfo->name, cmpinfo->num_native_events );
if (cmpinfo->disabled) { // If disabled,
fprintf(stderr, "NVML PAPI Component is disabled.\n");
test_skip( __FILE__,__LINE__,"Component nvml is disabled", 0 );
}
hipGetDeviceCount( &device_count );
printf("Found %d cuda devices\n", device_count);
int code = PAPI_NATIVE_MASK;
int ii=0;
int event_modifier = PAPI_ENUM_FIRST;
for ( ii=0; ii<cmpinfo->num_native_events; ii++ ) {
retval = PAPI_enum_cmp_event( &code, event_modifier, cid );
event_modifier = PAPI_ENUM_EVENTS;
if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_event_code_to_name", retval );
retval = PAPI_event_code_to_name( code, event_name );
char *ss;
// We need events that END in power_management_limit; and must
// exclude those that end in power_management_limit_min or _max,
// and correspond to an existing cuda device.
ss = strstr(event_name, "power_management_limit"); // get position of this string.
if (ss == NULL) continue; // skip if not present.
if (ss[22] != 0) continue; // skip if there is anything after it.
ss = strstr(event_name, "device_"); // Look for the device id.
if (ss == NULL) continue; // Not a valid name.
int did = atoi(ss+7); // convert it.
if (did >= device_count) continue; // Invalid device count.
EventName[eventCount] = strdup(event_name); // Valid! Remember the name.
device_id[eventCount] = did; // Remember the device id.
printf("Found event '%s' for device %i.\n", event_name, did); // Report what we found.
eventCount++; // Add to the number of events found.
}
if (eventCount == 0) { // If we found nothing,
fprintf(stderr, "No NVML events found. Skipping Test.\n");
test_skip( __FILE__,__LINE__,"Component nvml does not have a power_management_limit event.", 0 );
}
/* convert PAPI native events to PAPI code */
for(i=0; i < eventCount; i++) {
retval = PAPI_event_name_to_code( ( char * )EventName[i], &events[i] );
if( retval != PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
fprintf(stderr, "PAPI_event_name_to_code failure for event [%s] returned %i [%s].\n", EventName[i], retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"PAPI_event_name_to_code failed.", retval );
}
}
retval = PAPI_create_eventset( &EventSet );
if( retval != PAPI_OK ) {
fprintf(stderr, "PAPI_create_eventset failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"PAPI_create_eventset failed.", retval );
}
for (i=0; i< eventCount; i++) {
printf( "cuda_device %d is being used\n", device_id[i]);
hipSetDevice(device_id[i]);
retval = PAPI_add_events( EventSet, &events[i], 1);
if( retval != PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "PAPI_add_events failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"PAPI_add_events failed.", retval );
}
retval = PAPI_start( EventSet );
if( retval != PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "PAPI_startfailure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"PAPI_start failed.", retval );
}
retval = PAPI_read( EventSet, values+i ); // Get initial value for this event.
if( retval != PAPI_OK ) {
fprintf(stderr, "PAPI_read failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__, __LINE__, "PAPI_read failed.", retval );
}
printf( "%s = %lld (read initial power management limit)\n", EventName[i], values[i]);
long long int initial_power_management_limit = values[i];
printf("On device %d the power_management_limit is going to be reduced by 30\n", device_id[i]);
long long int newPower=initial_power_management_limit-30;
retval = PAPI_write( EventSet, &newPower);
if ( retval!=PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "PAPI_write failure returned %i, = %s.\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"Attempted PAPI_write of power_management_limit failed: Possible reasons: Insufficient permissions; Power management unavailable;. Outside min/max limits; failed to run with sudo.", retval );
} else {
printf("Call succeeded to set power_management_limit to %llu milliWatts\n", newPower);
}
retval = PAPI_read(EventSet, values+i);
if( retval != PAPI_OK ) {
fprintf(stderr, "PAPI_read failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__, __LINE__, "PAPI_read failed.", retval );
}
if ( values[i] != newPower) {
fprintf(stderr, "Mismatch: power_management_limit on device %d set to %llu but read as %llu\n", device_id[i], newPower, values[i]);
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
test_fail( __FILE__,__LINE__,"Mismatch: power_management_limit on device set to one value but read as a different value", -1 );
} else {
printf("Verified: Power management limit was successfully reduced.\n");
}
retval = PAPI_write( EventSet, &initial_power_management_limit); // Try to write the original value.
if ( retval!=PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "Restoring value, PAPI_write failure returned %i, = %s.\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"Attempted PAPI_write to restore power_management_limit failed: Possible reasons: Insufficient permissions; Power management unavailable;. Outside min/max limits; failed to run with sudo.", retval );
}
retval = PAPI_read( EventSet, values+i ); // Now read it back.
if( retval != PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "PAPI_read failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__, __LINE__, "PAPI_read failed.", retval );
}
if ( values[i] != initial_power_management_limit) {
fprintf(stderr, "Mismatch on reset: power_management_limit on device %d set to %llu but read as %llu\n", device_id[i], initial_power_management_limit, values[i] );
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
test_fail( __FILE__,__LINE__,"Mismatch on reset: power_management_limit on device set to one value but read as a different value", -1 );
} else {
printf("Reset to initial power level of %lld was successful.\n", values[i]);
}
PAPI_stop(EventSet, values); // Stop it so we can clear it.
PAPI_cleanup_eventset(EventSet); // Empty it for the next one.
} // end loop for all found events.
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_destroy_eventset(&EventSet); // All done, don't leak memory.
test_pass( __FILE__);
return 0;
} // end main.
| f6f8b95c41d63a4097efd7cd607c2661e91cdc81.cu | /****************************/
/* THIS IS OPEN SOURCE CODE */
/****************************/
/**
* @file nvml_power_limiting_test.cu
* CVS: $Id$
* @author Tony Castaldo ([email protected]) removed extraneous code and fixed a bug on multiple GPU setups. (Sept 2018).
* @author Asim YarKhan ([email protected]) HelloWorld altered to test power capping (October 2017)
* @author Heike Jagode ([email protected])
* Mods: <your name here> <your email address>
*
* @brief
* This file tests the ability to do power control using NVML.
* The papi configure and papi Makefile will take care of the
* compilation of the component tests (if all tests are added to a
* directory named 'tests' in the specific component dir). See
* components/README for more details.
*/
#include <cuda.h>
#include <stdio.h>
#include "papi.h"
#include "papi_test.h"
// Host function
int main( int argc, char** argv )
{
#define NUM_EVENTS 32 /* Max number of GPUs on a node this code can handle. */
int retval, i, j, device_count;
int EventSet = PAPI_NULL;
long long values[NUM_EVENTS];
int device_id[NUM_EVENTS];
char *EventName[NUM_EVENTS];
int events[NUM_EVENTS];
int eventCount = 0;
const PAPI_component_info_t *cmpinfo;
char event_name[PAPI_MAX_STR_LEN];
/* PAPI Initialization */
retval = PAPI_library_init( PAPI_VER_CURRENT );
if( retval != PAPI_VER_CURRENT ) {
fprintf( stderr, "PAPI_library_init failed.\n" );
test_fail(__FILE__, __LINE__, "PAPI_library_init() failed.\n", retval);
}
printf( "PAPI_VERSION : %4d %6d %7d\n",
PAPI_VERSION_MAJOR( PAPI_VERSION ),
PAPI_VERSION_MINOR( PAPI_VERSION ),
PAPI_VERSION_REVISION( PAPI_VERSION ) );
int numcmp = PAPI_num_components();
// Search for the NVML component.
int cid = 0;
for (cid=0; cid<numcmp; cid++) {
cmpinfo = PAPI_get_component_info(cid);
if (cmpinfo == NULL) { // NULL?
fprintf(stderr, "PAPI error: PAPI reports %d components, but PAPI_get_component_info(%d) returns NULL pointer.\n", numcmp, cid);
test_fail( __FILE__, __LINE__,"PAPI_get_component_info failed\n",-1 );
} else {
if ( strstr( cmpinfo->name, "nvml" ) ) break; // If we found it,
}
}
if ( cid==numcmp ) { // If true we looped through all without finding nvml.
fprintf(stderr, "NVML PAPI Component was not found.\n");
test_skip( __FILE__, __LINE__,"Component nvml is not present\n",-1 );
}
printf( "NVML found as Component %d of %d: %s: %d events\n", (1+cmpinfo->CmpIdx), numcmp, cmpinfo->name, cmpinfo->num_native_events );
if (cmpinfo->disabled) { // If disabled,
fprintf(stderr, "NVML PAPI Component is disabled.\n");
test_skip( __FILE__,__LINE__,"Component nvml is disabled", 0 );
}
cudaGetDeviceCount( &device_count );
printf("Found %d cuda devices\n", device_count);
int code = PAPI_NATIVE_MASK;
int ii=0;
int event_modifier = PAPI_ENUM_FIRST;
for ( ii=0; ii<cmpinfo->num_native_events; ii++ ) {
retval = PAPI_enum_cmp_event( &code, event_modifier, cid );
event_modifier = PAPI_ENUM_EVENTS;
if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_event_code_to_name", retval );
retval = PAPI_event_code_to_name( code, event_name );
char *ss;
// We need events that END in power_management_limit; and must
// exclude those that end in power_management_limit_min or _max,
// and correspond to an existing cuda device.
ss = strstr(event_name, "power_management_limit"); // get position of this string.
if (ss == NULL) continue; // skip if not present.
if (ss[22] != 0) continue; // skip if there is anything after it.
ss = strstr(event_name, "device_"); // Look for the device id.
if (ss == NULL) continue; // Not a valid name.
int did = atoi(ss+7); // convert it.
if (did >= device_count) continue; // Invalid device count.
EventName[eventCount] = strdup(event_name); // Valid! Remember the name.
device_id[eventCount] = did; // Remember the device id.
printf("Found event '%s' for device %i.\n", event_name, did); // Report what we found.
eventCount++; // Add to the number of events found.
}
if (eventCount == 0) { // If we found nothing,
fprintf(stderr, "No NVML events found. Skipping Test.\n");
test_skip( __FILE__,__LINE__,"Component nvml does not have a power_management_limit event.", 0 );
}
/* convert PAPI native events to PAPI code */
for(i=0; i < eventCount; i++) {
retval = PAPI_event_name_to_code( ( char * )EventName[i], &events[i] );
if( retval != PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
fprintf(stderr, "PAPI_event_name_to_code failure for event [%s] returned %i [%s].\n", EventName[i], retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"PAPI_event_name_to_code failed.", retval );
}
}
retval = PAPI_create_eventset( &EventSet );
if( retval != PAPI_OK ) {
fprintf(stderr, "PAPI_create_eventset failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"PAPI_create_eventset failed.", retval );
}
for (i=0; i< eventCount; i++) {
printf( "cuda_device %d is being used\n", device_id[i]);
cudaSetDevice(device_id[i]);
retval = PAPI_add_events( EventSet, &events[i], 1);
if( retval != PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "PAPI_add_events failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"PAPI_add_events failed.", retval );
}
retval = PAPI_start( EventSet );
if( retval != PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "PAPI_startfailure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"PAPI_start failed.", retval );
}
retval = PAPI_read( EventSet, values+i ); // Get initial value for this event.
if( retval != PAPI_OK ) {
fprintf(stderr, "PAPI_read failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__, __LINE__, "PAPI_read failed.", retval );
}
printf( "%s = %lld (read initial power management limit)\n", EventName[i], values[i]);
long long int initial_power_management_limit = values[i];
printf("On device %d the power_management_limit is going to be reduced by 30\n", device_id[i]);
long long int newPower=initial_power_management_limit-30;
retval = PAPI_write( EventSet, &newPower);
if ( retval!=PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "PAPI_write failure returned %i, = %s.\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"Attempted PAPI_write of power_management_limit failed: Possible reasons: Insufficient permissions; Power management unavailable;. Outside min/max limits; failed to run with sudo.", retval );
} else {
printf("Call succeeded to set power_management_limit to %llu milliWatts\n", newPower);
}
retval = PAPI_read(EventSet, values+i);
if( retval != PAPI_OK ) {
fprintf(stderr, "PAPI_read failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__, __LINE__, "PAPI_read failed.", retval );
}
if ( values[i] != newPower) {
fprintf(stderr, "Mismatch: power_management_limit on device %d set to %llu but read as %llu\n", device_id[i], newPower, values[i]);
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
test_fail( __FILE__,__LINE__,"Mismatch: power_management_limit on device set to one value but read as a different value", -1 );
} else {
printf("Verified: Power management limit was successfully reduced.\n");
}
retval = PAPI_write( EventSet, &initial_power_management_limit); // Try to write the original value.
if ( retval!=PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "Restoring value, PAPI_write failure returned %i, = %s.\n", retval, PAPI_strerror(retval));
test_fail( __FILE__,__LINE__,"Attempted PAPI_write to restore power_management_limit failed: Possible reasons: Insufficient permissions; Power management unavailable;. Outside min/max limits; failed to run with sudo.", retval );
}
retval = PAPI_read( EventSet, values+i ); // Now read it back.
if( retval != PAPI_OK ) {
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
fprintf(stderr, "PAPI_read failure returned %i [%s].\n", retval, PAPI_strerror(retval));
test_fail( __FILE__, __LINE__, "PAPI_read failed.", retval );
}
if ( values[i] != initial_power_management_limit) {
fprintf(stderr, "Mismatch on reset: power_management_limit on device %d set to %llu but read as %llu\n", device_id[i], initial_power_management_limit, values[i] );
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_stop(EventSet, values); // Must be stopped.
PAPI_cleanup_eventset(EventSet); // Empty it.
PAPI_destroy_eventset(&EventSet); // Release memory.
test_fail( __FILE__,__LINE__,"Mismatch on reset: power_management_limit on device set to one value but read as a different value", -1 );
} else {
printf("Reset to initial power level of %lld was successful.\n", values[i]);
}
PAPI_stop(EventSet, values); // Stop it so we can clear it.
PAPI_cleanup_eventset(EventSet); // Empty it for the next one.
} // end loop for all found events.
for (j=0; j<eventCount; j++) free(EventName[j]); // clean up memory.
PAPI_destroy_eventset(&EventSet); // All done, don't leak memory.
test_pass( __FILE__);
return 0;
} // end main.
|
11b98af57220615c2685961a7fcec376a38d6136.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef RESAMPLE_BILINEAR
#define RESAMPLE_BILINEAR
#if __CUDACC_VER_MAJOR__ >= 9
#include <hip/hip_fp16.h>
#endif
#include "PrGPU/KernelSupport/KernelCore.h" //includes KernelWrapper.h
#include "PrGPU/KernelSupport/KernelMemory.h"
#if GF_DEVICE_TARGET_DEVICE
GF_TEXTURE_GLOBAL(float4, inSrcTexture, GF_DOMAIN_NATURAL, GF_RANGE_NATURAL_CUDA, GF_EDGE_CLAMP, GF_FILTER_LINEAR)
GF_KERNEL_FUNCTION(kConvolve3x3,
((GF_TEXTURE_TYPE(float4))(GF_TEXTURE_NAME(inSrcTexture)))
((GF_PTR(float4))(destImg))
((GF_PTR(float))(kernelBuf)),
((int)(kernelRadius))
((int)(destPitch))
((int)(in16f))
((unsigned int)(outWidth))
((unsigned int)(outHeight)),
((uint2)(outXY)(KERNEL_XY)))
{
float4 dest;
if (outXY.x >= outWidth || outXY.y >= outHeight) return;
float4 color;
color.x = 0;
color.y = 0;
color.z = 0;
color.w = 1.0;
int i = 0;
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
float kernelVal = kernelBuf[i++];;
float4 texVal = GF_READTEXTURE(GF_TEXTURE_NAME(inSrcTexture), (outXY.x + x*kernelRadius + 0.5) / outWidth, (outXY.y + y*kernelRadius + 0.5) / outHeight);
color.x += texVal.x*kernelVal;
color.y += texVal.y*kernelVal;
color.z += texVal.z*kernelVal;
}
}
dest = color;
WriteFloat4(dest, destImg, outXY.y * destPitch + outXY.x, !!in16f);
}
#endif
#if __NVCC__
void Convolve3x3_CUDA (
hipTextureObject_t inSrcTexture,
float *destBuf,
float *kernelBuf,
int kernelRadius,
int destPitch,
int is16f,
unsigned int width,
unsigned int height)
{
dim3 blockDim (16, 16, 1);
dim3 gridDim ( (width + blockDim.x - 1)/ blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1 );
hipLaunchKernelGGL(( kConvolve3x3) , dim3(gridDim), dim3(blockDim), 0 , 0, inSrcTexture, (float4*) destBuf, kernelBuf, kernelRadius, destPitch, is16f, width, height );
hipDeviceSynchronize();
}
#endif //GF_DEVICE_TARGET_HOST
#endif //SDK_CROSS_DISSOLVE
| 11b98af57220615c2685961a7fcec376a38d6136.cu |
#ifndef RESAMPLE_BILINEAR
#define RESAMPLE_BILINEAR
#if __CUDACC_VER_MAJOR__ >= 9
#include <cuda_fp16.h>
#endif
#include "PrGPU/KernelSupport/KernelCore.h" //includes KernelWrapper.h
#include "PrGPU/KernelSupport/KernelMemory.h"
#if GF_DEVICE_TARGET_DEVICE
GF_TEXTURE_GLOBAL(float4, inSrcTexture, GF_DOMAIN_NATURAL, GF_RANGE_NATURAL_CUDA, GF_EDGE_CLAMP, GF_FILTER_LINEAR)
GF_KERNEL_FUNCTION(kConvolve3x3,
((GF_TEXTURE_TYPE(float4))(GF_TEXTURE_NAME(inSrcTexture)))
((GF_PTR(float4))(destImg))
((GF_PTR(float))(kernelBuf)),
((int)(kernelRadius))
((int)(destPitch))
((int)(in16f))
((unsigned int)(outWidth))
((unsigned int)(outHeight)),
((uint2)(outXY)(KERNEL_XY)))
{
float4 dest;
if (outXY.x >= outWidth || outXY.y >= outHeight) return;
float4 color;
color.x = 0;
color.y = 0;
color.z = 0;
color.w = 1.0;
int i = 0;
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
float kernelVal = kernelBuf[i++];;
float4 texVal = GF_READTEXTURE(GF_TEXTURE_NAME(inSrcTexture), (outXY.x + x*kernelRadius + 0.5) / outWidth, (outXY.y + y*kernelRadius + 0.5) / outHeight);
color.x += texVal.x*kernelVal;
color.y += texVal.y*kernelVal;
color.z += texVal.z*kernelVal;
}
}
dest = color;
WriteFloat4(dest, destImg, outXY.y * destPitch + outXY.x, !!in16f);
}
#endif
#if __NVCC__
void Convolve3x3_CUDA (
cudaTextureObject_t inSrcTexture,
float *destBuf,
float *kernelBuf,
int kernelRadius,
int destPitch,
int is16f,
unsigned int width,
unsigned int height)
{
dim3 blockDim (16, 16, 1);
dim3 gridDim ( (width + blockDim.x - 1)/ blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1 );
kConvolve3x3 <<< gridDim, blockDim, 0 >>> ( inSrcTexture, (float4*) destBuf, kernelBuf, kernelRadius, destPitch, is16f, width, height );
cudaDeviceSynchronize();
}
#endif //GF_DEVICE_TARGET_HOST
#endif //SDK_CROSS_DISSOLVE
|
43764f7c50ca98736079fb8b5a91b71c7e1e8547.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MyCU.h"
void swap(int* v1, int* v2) {
int tmp=(*v1);
(*v1)=(*v2);
(*v2)=tmp;
}
static const char *cudaGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
EXPORT bool initCUDA() {
// init CUDA GPU
if (hipSetDevice(0)!=hipSuccess) {
printf("hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return false;
}
return true;
}
EXPORT bool initCUBLAS(void* cublasH) {
// init CUBLAS
if (hipblasCreate((hipblasHandle_t*)cublasH)!=HIPBLAS_STATUS_SUCCESS) {
printf("CUBLAS initialization error!\n");
return false;
}
return true;
}
EXPORT bool initCURand(void* cuRandH) {
if (hiprandCreateGenerator((hiprandGenerator_t*)cuRandH, HIPRAND_RNG_PSEUDO_DEFAULT)!=HIPRAND_STATUS_SUCCESS) {
//if (hiprandCreateGenerator((hiprandGenerator_t*)cuRandH, HIPRAND_RNG_PSEUDO_DEFAULT)!=HIPRAND_STATUS_SUCCESS) {
printf("CURAND initialization error!\n");
return false;
}
/* Set seed */
if (hiprandSetPseudoRandomGeneratorSeed((*(hiprandGenerator_t*)cuRandH), timeGetTime())!=HIPRAND_STATUS_SUCCESS) return false;
return true;
}
EXPORT bool initCUstreams(void* cuStream[]) {
for (int s=0; s<MAX_STREAMS; s++) {
if (hipStreamCreate((hipStream_t*)cuStream[s])!=hipSuccess) return false;
}
return true;
}
EXPORT bool Malloc_cu(numtype** var, int size) {
return ((hipMalloc(var, size*sizeof(numtype))==hipSuccess));
}
EXPORT bool Free_cu(numtype* var) {
return (hipFree(var)==hipSuccess);
}
//-- CPU<->GPU transfer functions
EXPORT bool h2d_cu(numtype* destAddr, numtype* srcAddr, int size, void* cuStream[]) {
if(cuStream==nullptr) {
return ((hipMemcpy(destAddr, srcAddr, size, hipMemcpyHostToDevice)==hipSuccess));
} else {
int streamSize=size/sizeof(numtype)/MAX_STREAMS;
size_t streamBytes=streamSize*sizeof(numtype);
for (int s=0; s<MAX_STREAMS; s++) {
int offset=s*streamSize;
if (hipMemcpyAsync(&destAddr[offset], &srcAddr[offset], streamBytes, hipMemcpyHostToDevice, (*(hipStream_t*)cuStream[s]))!=hipSuccess) {
printf("s=%d ; CUDA error %d\n", s, hipGetLastError());
return false;
}
}
return true;
}
}
EXPORT bool d2h_cu(numtype* destAddr, numtype* srcAddr, int size, void* cuStream[]) {
if (cuStream==nullptr) {
return ((hipMemcpy(destAddr, srcAddr, size, hipMemcpyDeviceToHost)==hipSuccess));
} else {
int streamSize=size/sizeof(numtype)/MAX_STREAMS;
size_t streamBytes=streamSize*sizeof(numtype);
for (int s=0; s<MAX_STREAMS; s++) {
int offset=s*streamSize;
if (hipMemcpyAsync(&destAddr[offset], &srcAddr[offset], streamBytes, hipMemcpyDeviceToHost, (*(hipStream_t*)cuStream[s]))!=hipSuccess) {
printf("s=%d ; CUDA error %d\n", s, hipGetLastError());
return false;
}
}
return true;
}
}
__global__ void initGPUData_ker(float *data, int numElements, float value) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < numElements) {
data[tid] = value;
}
}
EXPORT void initGPUData(float *data, int numElements, float value) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (numElements+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( initGPUData_ker), dim3(gridDim), dim3(blockDim), 0, 0, data, numElements, value);
}
EXPORT bool loadBatchData_cu(numtype* destAddr, numtype* srcAddr, int size, void* cuStream[]) {
int streamSize=size/sizeof(numtype)/MAX_STREAMS;
size_t streamBytes=streamSize*sizeof(numtype);
for (int s=0; s<MAX_STREAMS; s++) {
int offset=s*streamSize;
if (hipMemcpyAsync(&destAddr[offset], &srcAddr[offset], streamBytes, hipMemcpyHostToDevice, (*(hipStream_t*)cuStream[s]))!=hipSuccess) {
printf("s=%d ; CUDA error %d\n", s, hipGetLastError());
return false;
}
}
return true;
}
EXPORT bool dumpArray_cu(int vlen, numtype* v, const char* fname) {
numtype* hw=(numtype*)malloc(vlen*sizeof(numtype));
if (hipMemcpy(hw, v, vlen*sizeof(numtype), hipMemcpyDeviceToHost)!=hipSuccess) return false;
FILE* f=fopen(fname, "w");
if (f==nullptr) return false;
for (int i=0; i<vlen; i++) fprintf(f, "%f\n", hw[i]);
free(hw);
fclose(f);
return true;
}
EXPORT bool loadArray_cu(int vlen, numtype* v, const char* fname){
numtype fh;
numtype* vh=(numtype*)malloc(vlen*sizeof(numtype));
FILE* f=fopen(fname, "r");
if (f==nullptr) return false;
for (int i=0; i<vlen; i++) {
if(fscanf(f, "%f\n", &fh)==0) return false;
vh[i]=fh;
}
if (hipMemcpy(v, vh, vlen*sizeof(numtype), hipMemcpyHostToDevice)!=hipSuccess) return false;
fclose(f);
free(vh);
return true;
}
//-- matrix functions
EXPORT bool cuMtr_cublas(void* cublasH, int my, int mx, numtype* m, numtype* otm) {
float alpha=1;
float beta=0;
if (hipblasSgeam((*(hipblasHandle_t*)cublasH), HIPBLAS_OP_T, HIPBLAS_OP_T, my, mx, &alpha, m, mx, &beta, m, mx, otm, my)!=HIPBLAS_STATUS_SUCCESS) return false;
return true;
}
EXPORT bool MbyM_cu(void* cublasH, int Ay, int Ax, numtype Ascale, bool Atr, numtype* A, int By, int Bx, numtype Bscale, bool Btr, numtype* B, numtype* C) {
float *alpha = &Ascale;
float *beta = &Bscale;
hipblasOperation_t Aop=HIPBLAS_OP_N;
hipblasOperation_t Bop=HIPBLAS_OP_N;
int m=Bx;
int n=Ay;
int k=Ax;
int ldA=Ax;
int ldB=Bx;
int ldC=Bx;
numtype* vA = A;
numtype* vB = B;
if (Atr) {
Aop=HIPBLAS_OP_T;
n=Ax; k=Ay;
}
if (Btr) {
Bop=HIPBLAS_OP_T;
m=By;
ldC=By;
}
if (!Vinit_cu(m*n, C, 0, 0)) return false;
if (hipblasSgemm((*(hipblasHandle_t*)cublasH), Bop, Aop, m, n, k, alpha, vB, ldB, vA, ldA, beta, C, ldC)!=HIPBLAS_STATUS_SUCCESS) return false;
return true;
}
__global__ void cuSadd(const numtype* s1, const numtype* s2, numtype* ssum) {
ssum[0]=s1[0]+s2[0];
}
__global__ void cuVscale_ker(const int vlen, numtype *v, const numtype s) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < vlen) v[tid] *= s;
}
__global__ void cuVcopy_ker(const int vlen, const numtype *v1, numtype *v2) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < vlen) v2[tid] = v1[tid];
}
__global__ void cuVminusV_ker(const int vlen, const numtype *a, const numtype sa, const numtype *b, const numtype sb, numtype* c) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < vlen) c[tid] = a[tid]*sa-b[tid]*sb;
}
__global__ void cuVplusV_ker(const int vlen, const numtype *a, const numtype sa, const numtype *b, const numtype sb, numtype* c) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < vlen) c[tid] = a[tid]*sa+b[tid]*sb;
}
__global__ void cuVsum_ker(const int vlen, const numtype *v, numtype* osum) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[2*CUDA_BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2*blockIdx.x * CUDA_BLOCK_SIZE;
if (start+t < vlen)
partialSum[t] = v[start+t];
else
partialSum[t] = 0;
if (start+CUDA_BLOCK_SIZE+t < vlen)
partialSum[CUDA_BLOCK_SIZE+t] = v[start+CUDA_BLOCK_SIZE+t];
else
partialSum[CUDA_BLOCK_SIZE+t] = 0;
//@@ Traverse the reduction tree
for (unsigned int stride = CUDA_BLOCK_SIZE; stride>=1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t==0)
osum[blockIdx.x] = partialSum[0];
}
__global__ void cuVssum_ker(const int vlen, const numtype *v, numtype* ossum) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[2*CUDA_BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2*blockIdx.x * CUDA_BLOCK_SIZE;
if (start+t < vlen)
partialSum[t] = v[start+t]*v[start+t];
else
partialSum[t] = 0;
if (start+CUDA_BLOCK_SIZE+t < vlen)
partialSum[CUDA_BLOCK_SIZE+t] = v[start+CUDA_BLOCK_SIZE+t]*v[start+CUDA_BLOCK_SIZE+t];
else
partialSum[CUDA_BLOCK_SIZE+t] = 0;
//@@ Traverse the reduction tree
for (unsigned int stride = CUDA_BLOCK_SIZE; stride>=1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t==0)
ossum[blockIdx.x] = partialSum[0];
}
__global__ void Vscale(int vlen, numtype* v, numtype scaleM, numtype scaleP) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) v[i] = scaleM*v[i]+scaleP;
}
__global__ void Vinit_ker(int vlen, numtype* v, numtype start, numtype inc) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) v[i] = start+i*inc;
}
__global__ void VbyV2V_ker(int vlen, numtype* v1, numtype* v2, numtype* ov) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) ov[i]=v1[i]*v2[i];
}
//-- scalar functions
EXPORT bool Sadd_cu(numtype* s1, numtype* s2, numtype* ssum) {
hipLaunchKernelGGL(( cuSadd), dim3(1), dim3(1), 0, 0, s1, s2, ssum);
return ((hipGetLastError()==hipSuccess));
}
//-- vector functions;
EXPORT bool getMcol_cu(void* cublasH, int Ay, int Ax, numtype* A, int col, numtype* oCol) {
hipblasStatus_t err=hipblasScopy((*((hipblasHandle_t*)cublasH)), Ax, A, Ax, oCol, 1);
if (err!=HIPBLAS_STATUS_SUCCESS) {
printf("getMcol_cu() CUBLAS error %d: %s\n", err, cudaGetErrorEnum(err));
return false;
}
return true;
}
EXPORT bool Vscale_cu(int vlen, numtype* v, numtype s){
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuVscale_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v, s);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool Vcopy_cu(int vlen, numtype* v1, numtype* v2) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuVcopy_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v1, v2);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool Vadd_cu(int vlen, numtype* v1, numtype scale1, numtype* v2, numtype scale2, numtype* ov) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuVplusV_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v1, scale1, v2, scale2, ov);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool Vdiff_cu(int vlen, numtype* v1, numtype scale1, numtype* v2, numtype scale2, numtype* ov) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuVminusV_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v1, scale1, v2, scale2, ov);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool Vsum_cu(int vlen, numtype* v, numtype* ovsum, numtype* ss_d) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuVsum_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v, ss_d );
if (hipMemcpy(ovsum, ss_d, sizeof(numtype), hipMemcpyDeviceToHost)!=hipSuccess) return false;
return ((hipGetLastError()==hipSuccess));
}
EXPORT bool Vssum_cu(int vlen, numtype* v, numtype* ovssum) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuVssum_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v, ovssum);
return ((hipGetLastError()==hipSuccess));
}
EXPORT bool Vssum_cu_cublas(void* cublasH, int Vlen, numtype* V, numtype* oVssum, numtype* ss_d) {
if (hipblasSnrm2((*(hipblasHandle_t*)cublasH), Vlen, V, 1, oVssum)!=HIPBLAS_STATUS_SUCCESS) return false;
(*oVssum)=(*oVssum)*(*oVssum);
return true;
}
EXPORT bool Vnorm_cu(void* cublasH, int Vlen, numtype* V, numtype* oVnorm, numtype* ss_d) {
if (hipblasSnrm2((*(hipblasHandle_t*)cublasH), Vlen, V, 1, oVnorm)!=HIPBLAS_STATUS_SUCCESS) return false;
if (hipMemcpy(oVnorm, ss_d, sizeof(numtype), hipMemcpyDeviceToHost)!=hipSuccess) return false;
return true;
}
EXPORT bool Vinit_cu(int vlen, numtype* v, numtype start, numtype inc) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( Vinit_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v, start, inc);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool VbyV2V_cu(int vlen, numtype* v1, numtype* v2, numtype* ov) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( VbyV2V_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v1, v2, ov);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool VinitRnd_cu(int vlen, numtype* v, numtype rndmin, numtype rndmax, void* cuRandH) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
//-- Generate n floats on device, with values between 0.0 and 1.0, where 0.0 is excluded and 1.0 is included
if(hiprandGenerateUniform((*(hiprandGenerator_t*)cuRandH), v, vlen) !=HIPRAND_STATUS_SUCCESS) return false;
//-- need to scale to rndmin<->rndmax
hipLaunchKernelGGL(( Vscale), dim3(gridDim), dim3(blockDim), 0, 0, vlen, v, (rndmax-rndmin), rndmax-(rndmax-rndmin)*1);
/*/-- !!!!!!!!!!!!! REMOVE !!!!!!!!!!
numtype* hw=(numtype*)malloc(vlen*sizeof(numtype));
if (hipMemcpy(hw, v, vlen*sizeof(numtype), hipMemcpyDeviceToHost)!=hipSuccess) return false;
char* fname = "C:/temp/rndw.txt";
FILE* f=fopen(fname, "w");
for (int i=0; i<vlen; i++) fprintf(f, "%f\n", hw[i]);
free(hw);
fclose(f);
//--
*/
return((hipGetLastError()==hipSuccess));
}
__global__ void cuTanh_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = tanhf(in[i]);
}
__global__ void cudTanh_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = 1-tanhf(in[i])*tanhf(in[i]);
}
__global__ void ORIG_cuTanh_ker(int vlen, numtype* in, numtype* out) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) out[i] = tanhf(in[i]);
}
__global__ void ORIG_cudTanh_ker(int vlen, numtype* in, numtype* out) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) out[i] = 1-tanhf(in[i])*tanhf(in[i]);
}
__global__ void cuExp4_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = 1/(1+exp(-4*in[i]));
}
__global__ void cudExp4_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = 4*exp(4*in[i])/(pow(exp(4*in[i])+1, 2));
}
__global__ void cuRelu_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = ((in[i] > 0) ? 1 : 0);
}
__global__ void cudRelu_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = ((in[i] > 0) ? in[i] : 0);
}
__global__ void cuSoftPlus_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = log(1+exp(in[i]));
}
__global__ void cudSoftPlus_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = 1/(1+exp(-in[i]));
}
EXPORT bool Tanh_cu(int vlen, numtype* in, numtype* out) {
/* int blockSize=64; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device
int gridSize; // The actual grid size needed, based on input // size
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)cudTanh_ker, 0, vlen);
// Round up according to array size
gridSize = (vlen+blockSize-1)/blockSize;
cudTanh_ker<<< gridSize, blockSize>>> (vlen, in, out);
*/
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuTanh_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, in, out);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool dTanh_cu(int vlen, numtype* in, numtype* out) {
/* int blockSize=64; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device
int gridSize; // The actual grid size needed, based on input // size
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)cudTanh_ker, 0, vlen);
// Round up according to array size
gridSize = (vlen+blockSize-1)/blockSize;
cudTanh_ker<<< gridSize, blockSize>>> (vlen, in, out);
*/
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cudTanh_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, in, out);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool Exp4_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuExp4_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, in, out);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool dExp4_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cudExp4_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, in, out);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool Relu_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuRelu_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, in, out);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool dRelu_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cudRelu_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, in, out);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool SoftPlus_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cuSoftPlus_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, in, out);
return((hipGetLastError()==hipSuccess));
}
EXPORT bool dSoftPlus_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
hipLaunchKernelGGL(( cudSoftPlus_ker), dim3(gridDim), dim3(blockDim), 0, 0, vlen, in, out);
return((hipGetLastError()==hipSuccess));
}
| 43764f7c50ca98736079fb8b5a91b71c7e1e8547.cu | #include "MyCU.h"
void swap(int* v1, int* v2) {
int tmp=(*v1);
(*v1)=(*v2);
(*v2)=tmp;
}
static const char *cudaGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
EXPORT bool initCUDA() {
// init CUDA GPU
if (cudaSetDevice(0)!=cudaSuccess) {
printf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return false;
}
return true;
}
EXPORT bool initCUBLAS(void* cublasH) {
// init CUBLAS
if (cublasCreate((cublasHandle_t*)cublasH)!=CUBLAS_STATUS_SUCCESS) {
printf("CUBLAS initialization error!\n");
return false;
}
return true;
}
EXPORT bool initCURand(void* cuRandH) {
if (curandCreateGenerator((curandGenerator_t*)cuRandH, CURAND_RNG_PSEUDO_DEFAULT)!=CURAND_STATUS_SUCCESS) {
//if (curandCreateGenerator((curandGenerator_t*)cuRandH, CURAND_RNG_PSEUDO_DEFAULT)!=CURAND_STATUS_SUCCESS) {
printf("CURAND initialization error!\n");
return false;
}
/* Set seed */
if (curandSetPseudoRandomGeneratorSeed((*(curandGenerator_t*)cuRandH), timeGetTime())!=CURAND_STATUS_SUCCESS) return false;
return true;
}
EXPORT bool initCUstreams(void* cuStream[]) {
for (int s=0; s<MAX_STREAMS; s++) {
if (cudaStreamCreate((cudaStream_t*)cuStream[s])!=cudaSuccess) return false;
}
return true;
}
EXPORT bool Malloc_cu(numtype** var, int size) {
return ((cudaMalloc(var, size*sizeof(numtype))==cudaSuccess));
}
EXPORT bool Free_cu(numtype* var) {
return (cudaFree(var)==cudaSuccess);
}
//-- CPU<->GPU transfer functions
EXPORT bool h2d_cu(numtype* destAddr, numtype* srcAddr, int size, void* cuStream[]) {
if(cuStream==nullptr) {
return ((cudaMemcpy(destAddr, srcAddr, size, cudaMemcpyHostToDevice)==cudaSuccess));
} else {
int streamSize=size/sizeof(numtype)/MAX_STREAMS;
size_t streamBytes=streamSize*sizeof(numtype);
for (int s=0; s<MAX_STREAMS; s++) {
int offset=s*streamSize;
if (cudaMemcpyAsync(&destAddr[offset], &srcAddr[offset], streamBytes, cudaMemcpyHostToDevice, (*(cudaStream_t*)cuStream[s]))!=cudaSuccess) {
printf("s=%d ; CUDA error %d\n", s, cudaGetLastError());
return false;
}
}
return true;
}
}
EXPORT bool d2h_cu(numtype* destAddr, numtype* srcAddr, int size, void* cuStream[]) {
if (cuStream==nullptr) {
return ((cudaMemcpy(destAddr, srcAddr, size, cudaMemcpyDeviceToHost)==cudaSuccess));
} else {
int streamSize=size/sizeof(numtype)/MAX_STREAMS;
size_t streamBytes=streamSize*sizeof(numtype);
for (int s=0; s<MAX_STREAMS; s++) {
int offset=s*streamSize;
if (cudaMemcpyAsync(&destAddr[offset], &srcAddr[offset], streamBytes, cudaMemcpyDeviceToHost, (*(cudaStream_t*)cuStream[s]))!=cudaSuccess) {
printf("s=%d ; CUDA error %d\n", s, cudaGetLastError());
return false;
}
}
return true;
}
}
__global__ void initGPUData_ker(float *data, int numElements, float value) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < numElements) {
data[tid] = value;
}
}
EXPORT void initGPUData(float *data, int numElements, float value) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (numElements+blockDim.x-1)/blockDim.x;
initGPUData_ker<<< gridDim, blockDim>>> (data, numElements, value);
}
EXPORT bool loadBatchData_cu(numtype* destAddr, numtype* srcAddr, int size, void* cuStream[]) {
int streamSize=size/sizeof(numtype)/MAX_STREAMS;
size_t streamBytes=streamSize*sizeof(numtype);
for (int s=0; s<MAX_STREAMS; s++) {
int offset=s*streamSize;
if (cudaMemcpyAsync(&destAddr[offset], &srcAddr[offset], streamBytes, cudaMemcpyHostToDevice, (*(cudaStream_t*)cuStream[s]))!=cudaSuccess) {
printf("s=%d ; CUDA error %d\n", s, cudaGetLastError());
return false;
}
}
return true;
}
EXPORT bool dumpArray_cu(int vlen, numtype* v, const char* fname) {
numtype* hw=(numtype*)malloc(vlen*sizeof(numtype));
if (cudaMemcpy(hw, v, vlen*sizeof(numtype), cudaMemcpyDeviceToHost)!=cudaSuccess) return false;
FILE* f=fopen(fname, "w");
if (f==nullptr) return false;
for (int i=0; i<vlen; i++) fprintf(f, "%f\n", hw[i]);
free(hw);
fclose(f);
return true;
}
EXPORT bool loadArray_cu(int vlen, numtype* v, const char* fname){
numtype fh;
numtype* vh=(numtype*)malloc(vlen*sizeof(numtype));
FILE* f=fopen(fname, "r");
if (f==nullptr) return false;
for (int i=0; i<vlen; i++) {
if(fscanf(f, "%f\n", &fh)==0) return false;
vh[i]=fh;
}
if (cudaMemcpy(v, vh, vlen*sizeof(numtype), cudaMemcpyHostToDevice)!=cudaSuccess) return false;
fclose(f);
free(vh);
return true;
}
//-- matrix functions
EXPORT bool cuMtr_cublas(void* cublasH, int my, int mx, numtype* m, numtype* otm) {
float alpha=1;
float beta=0;
if (cublasSgeam((*(cublasHandle_t*)cublasH), CUBLAS_OP_T, CUBLAS_OP_T, my, mx, &alpha, m, mx, &beta, m, mx, otm, my)!=CUBLAS_STATUS_SUCCESS) return false;
return true;
}
EXPORT bool MbyM_cu(void* cublasH, int Ay, int Ax, numtype Ascale, bool Atr, numtype* A, int By, int Bx, numtype Bscale, bool Btr, numtype* B, numtype* C) {
float *alpha = &Ascale;
float *beta = &Bscale;
cublasOperation_t Aop=CUBLAS_OP_N;
cublasOperation_t Bop=CUBLAS_OP_N;
int m=Bx;
int n=Ay;
int k=Ax;
int ldA=Ax;
int ldB=Bx;
int ldC=Bx;
numtype* vA = A;
numtype* vB = B;
if (Atr) {
Aop=CUBLAS_OP_T;
n=Ax; k=Ay;
}
if (Btr) {
Bop=CUBLAS_OP_T;
m=By;
ldC=By;
}
if (!Vinit_cu(m*n, C, 0, 0)) return false;
if (cublasSgemm((*(cublasHandle_t*)cublasH), Bop, Aop, m, n, k, alpha, vB, ldB, vA, ldA, beta, C, ldC)!=CUBLAS_STATUS_SUCCESS) return false;
return true;
}
__global__ void cuSadd(const numtype* s1, const numtype* s2, numtype* ssum) {
ssum[0]=s1[0]+s2[0];
}
__global__ void cuVscale_ker(const int vlen, numtype *v, const numtype s) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < vlen) v[tid] *= s;
}
__global__ void cuVcopy_ker(const int vlen, const numtype *v1, numtype *v2) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < vlen) v2[tid] = v1[tid];
}
__global__ void cuVminusV_ker(const int vlen, const numtype *a, const numtype sa, const numtype *b, const numtype sb, numtype* c) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < vlen) c[tid] = a[tid]*sa-b[tid]*sb;
}
__global__ void cuVplusV_ker(const int vlen, const numtype *a, const numtype sa, const numtype *b, const numtype sb, numtype* c) {
int tid = blockIdx.x * blockDim.x+threadIdx.x;
if (tid < vlen) c[tid] = a[tid]*sa+b[tid]*sb;
}
__global__ void cuVsum_ker(const int vlen, const numtype *v, numtype* osum) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[2*CUDA_BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2*blockIdx.x * CUDA_BLOCK_SIZE;
if (start+t < vlen)
partialSum[t] = v[start+t];
else
partialSum[t] = 0;
if (start+CUDA_BLOCK_SIZE+t < vlen)
partialSum[CUDA_BLOCK_SIZE+t] = v[start+CUDA_BLOCK_SIZE+t];
else
partialSum[CUDA_BLOCK_SIZE+t] = 0;
//@@ Traverse the reduction tree
for (unsigned int stride = CUDA_BLOCK_SIZE; stride>=1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t==0)
osum[blockIdx.x] = partialSum[0];
}
__global__ void cuVssum_ker(const int vlen, const numtype *v, numtype* ossum) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[2*CUDA_BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2*blockIdx.x * CUDA_BLOCK_SIZE;
if (start+t < vlen)
partialSum[t] = v[start+t]*v[start+t];
else
partialSum[t] = 0;
if (start+CUDA_BLOCK_SIZE+t < vlen)
partialSum[CUDA_BLOCK_SIZE+t] = v[start+CUDA_BLOCK_SIZE+t]*v[start+CUDA_BLOCK_SIZE+t];
else
partialSum[CUDA_BLOCK_SIZE+t] = 0;
//@@ Traverse the reduction tree
for (unsigned int stride = CUDA_BLOCK_SIZE; stride>=1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t==0)
ossum[blockIdx.x] = partialSum[0];
}
__global__ void Vscale(int vlen, numtype* v, numtype scaleM, numtype scaleP) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) v[i] = scaleM*v[i]+scaleP;
}
__global__ void Vinit_ker(int vlen, numtype* v, numtype start, numtype inc) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) v[i] = start+i*inc;
}
__global__ void VbyV2V_ker(int vlen, numtype* v1, numtype* v2, numtype* ov) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) ov[i]=v1[i]*v2[i];
}
//-- scalar functions
EXPORT bool Sadd_cu(numtype* s1, numtype* s2, numtype* ssum) {
cuSadd<<< 1, 1>>>(s1, s2, ssum);
return ((cudaGetLastError()==cudaSuccess));
}
//-- vector functions;
EXPORT bool getMcol_cu(void* cublasH, int Ay, int Ax, numtype* A, int col, numtype* oCol) {
cublasStatus_t err=cublasScopy((*((cublasHandle_t*)cublasH)), Ax, A, Ax, oCol, 1);
if (err!=CUBLAS_STATUS_SUCCESS) {
printf("getMcol_cu() CUBLAS error %d: %s\n", err, cudaGetErrorEnum(err));
return false;
}
return true;
}
EXPORT bool Vscale_cu(int vlen, numtype* v, numtype s){
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuVscale_ker<<< gridDim, blockDim>>> (vlen, v, s);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool Vcopy_cu(int vlen, numtype* v1, numtype* v2) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuVcopy_ker<<< gridDim, blockDim>>> (vlen, v1, v2);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool Vadd_cu(int vlen, numtype* v1, numtype scale1, numtype* v2, numtype scale2, numtype* ov) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuVplusV_ker<<< gridDim, blockDim>>> (vlen, v1, scale1, v2, scale2, ov);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool Vdiff_cu(int vlen, numtype* v1, numtype scale1, numtype* v2, numtype scale2, numtype* ov) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuVminusV_ker<<< gridDim, blockDim>>> (vlen, v1, scale1, v2, scale2, ov);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool Vsum_cu(int vlen, numtype* v, numtype* ovsum, numtype* ss_d) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuVsum_ker<<< gridDim, blockDim>>> (vlen, v, ss_d );
if (cudaMemcpy(ovsum, ss_d, sizeof(numtype), cudaMemcpyDeviceToHost)!=cudaSuccess) return false;
return ((cudaGetLastError()==cudaSuccess));
}
EXPORT bool Vssum_cu(int vlen, numtype* v, numtype* ovssum) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuVssum_ker<<< gridDim, blockDim>>> (vlen, v, ovssum);
return ((cudaGetLastError()==cudaSuccess));
}
EXPORT bool Vssum_cu_cublas(void* cublasH, int Vlen, numtype* V, numtype* oVssum, numtype* ss_d) {
if (cublasSnrm2((*(cublasHandle_t*)cublasH), Vlen, V, 1, oVssum)!=CUBLAS_STATUS_SUCCESS) return false;
(*oVssum)=(*oVssum)*(*oVssum);
return true;
}
EXPORT bool Vnorm_cu(void* cublasH, int Vlen, numtype* V, numtype* oVnorm, numtype* ss_d) {
if (cublasSnrm2((*(cublasHandle_t*)cublasH), Vlen, V, 1, oVnorm)!=CUBLAS_STATUS_SUCCESS) return false;
if (cudaMemcpy(oVnorm, ss_d, sizeof(numtype), cudaMemcpyDeviceToHost)!=cudaSuccess) return false;
return true;
}
EXPORT bool Vinit_cu(int vlen, numtype* v, numtype start, numtype inc) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
Vinit_ker<<< gridDim, blockDim>>> (vlen, v, start, inc);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool VbyV2V_cu(int vlen, numtype* v1, numtype* v2, numtype* ov) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
VbyV2V_ker<<< gridDim, blockDim>>> (vlen, v1, v2, ov);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool VinitRnd_cu(int vlen, numtype* v, numtype rndmin, numtype rndmax, void* cuRandH) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
//-- Generate n floats on device, with values between 0.0 and 1.0, where 0.0 is excluded and 1.0 is included
if(curandGenerateUniform((*(curandGenerator_t*)cuRandH), v, vlen) !=CURAND_STATUS_SUCCESS) return false;
//-- need to scale to rndmin<->rndmax
Vscale<<< gridDim, blockDim>>>(vlen, v, (rndmax-rndmin), rndmax-(rndmax-rndmin)*1);
/*/-- !!!!!!!!!!!!! REMOVE !!!!!!!!!!
numtype* hw=(numtype*)malloc(vlen*sizeof(numtype));
if (cudaMemcpy(hw, v, vlen*sizeof(numtype), cudaMemcpyDeviceToHost)!=cudaSuccess) return false;
char* fname = "C:/temp/rndw.txt";
FILE* f=fopen(fname, "w");
for (int i=0; i<vlen; i++) fprintf(f, "%f\n", hw[i]);
free(hw);
fclose(f);
//--
*/
return((cudaGetLastError()==cudaSuccess));
}
__global__ void cuTanh_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = tanhf(in[i]);
}
__global__ void cudTanh_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = 1-tanhf(in[i])*tanhf(in[i]);
}
__global__ void ORIG_cuTanh_ker(int vlen, numtype* in, numtype* out) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) out[i] = tanhf(in[i]);
}
__global__ void ORIG_cudTanh_ker(int vlen, numtype* in, numtype* out) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<vlen) out[i] = 1-tanhf(in[i])*tanhf(in[i]);
}
__global__ void cuExp4_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = 1/(1+exp(-4*in[i]));
}
__global__ void cudExp4_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = 4*exp(4*in[i])/(pow(exp(4*in[i])+1, 2));
}
__global__ void cuRelu_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = ((in[i] > 0) ? 1 : 0);
}
__global__ void cudRelu_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = ((in[i] > 0) ? in[i] : 0);
}
__global__ void cuSoftPlus_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = log(1+exp(in[i]));
}
__global__ void cudSoftPlus_ker(int vlen, numtype* in, numtype* out) {
int i = threadIdx.x+blockIdx.x * blockDim.x;
out[i] = 1/(1+exp(-in[i]));
}
EXPORT bool Tanh_cu(int vlen, numtype* in, numtype* out) {
/* int blockSize=64; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device
int gridSize; // The actual grid size needed, based on input // size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)cudTanh_ker, 0, vlen);
// Round up according to array size
gridSize = (vlen+blockSize-1)/blockSize;
cudTanh_ker<<< gridSize, blockSize>>> (vlen, in, out);
*/
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuTanh_ker<<< gridDim, blockDim>>> (vlen, in, out);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool dTanh_cu(int vlen, numtype* in, numtype* out) {
/* int blockSize=64; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device
int gridSize; // The actual grid size needed, based on input // size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)cudTanh_ker, 0, vlen);
// Round up according to array size
gridSize = (vlen+blockSize-1)/blockSize;
cudTanh_ker<<< gridSize, blockSize>>> (vlen, in, out);
*/
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cudTanh_ker<<< gridDim, blockDim>>> (vlen, in, out);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool Exp4_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuExp4_ker<<< gridDim, blockDim>>> (vlen, in, out);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool dExp4_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cudExp4_ker<<< gridDim, blockDim>>> (vlen, in, out);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool Relu_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuRelu_ker<<< gridDim, blockDim>>> (vlen, in, out);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool dRelu_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cudRelu_ker<<< gridDim, blockDim>>> (vlen, in, out);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool SoftPlus_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cuSoftPlus_ker<<< gridDim, blockDim>>> (vlen, in, out);
return((cudaGetLastError()==cudaSuccess));
}
EXPORT bool dSoftPlus_cu(int vlen, numtype* in, numtype* out) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = CUDA_BLOCK_SIZE;
gridDim.x = (vlen+blockDim.x-1)/blockDim.x;
cudSoftPlus_ker<<< gridDim, blockDim>>> (vlen, in, out);
return((cudaGetLastError()==cudaSuccess));
}
|
4c27bc7be2a2c88d1a6235fdd6bca2ce7e682db9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <stdint.h>
#ifndef TSIZE
#define TSIZE 32
#endif
#ifndef BSIZE
#define BSIZE 32
#endif
#define PSIZE 4096
#define FPSIZE (4096/sizeof(float))
#define DEVICE_STATIC_INTRINSIC_QUALIFIERS static __device__ __forceinline__
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
#define PXL_GLOBAL_PTR "l"
#else
#define PXL_GLOBAL_PTR "r"
#endif
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_l1(const void* const ptr)
{
asm("prefetch.global.L1 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_uniform(const void* const ptr)
{
asm("prefetchu.L1 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_l2(const void* const ptr)
{
asm("prefetch.global.L2 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
static __device__ __inline__ uint64_t __nano(){
uint64_t mclk;
asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(mclk));
return mclk ;
}
__global__ void foo(float* a, float* b, float* c)
{
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
size_t tid1 = tid * FPSIZE;
size_t tid2 = tid1 + (FPSIZE * TSIZE);
size_t tid3 = tid2 + (FPSIZE * TSIZE);
if (tid < TSIZE)
{
__prefetch_global_l2(&a[tid1]);
__prefetch_global_l2(&b[tid1]);
__prefetch_global_l2(&c[tid1]);
c[tid1] = a[tid1] + b[tid1];
__prefetch_global_l2(&a[tid2]);
__prefetch_global_l2(&b[tid2]);
__prefetch_global_l2(&c[tid2]);
c[tid2] = a[tid2] + b[tid2];
__prefetch_global_l2(&a[tid3]);
__prefetch_global_l2(&b[tid3]);
__prefetch_global_l2(&c[tid3]);
c[tid3] = a[tid3] + b[tid3];
}
}
size_t pad_2MB(size_t val)
{
size_t ret = val / (PSIZE * 512);
size_t diff = val % (PSIZE * 512);
if (diff)
{
ret += 1;
}
return ret * (PSIZE * 512);
}
int main(void)
{
float* a;
float* b;
float* c;
size_t array_size = pad_2MB(3 * 2 * sizeof(float) * TSIZE * FPSIZE);
assert(!hipMallocManaged(&a, array_size));
assert(!hipMallocManaged(&b, array_size));
assert(!hipMallocManaged(&c, array_size));
printf("array_size: %lu\n", array_size);
//assert(!hipMallocManaged(&a, sizeof(float) * TSIZE * FPSIZE));
//assert(!hipMallocManaged(&b, sizeof(float) * TSIZE * FPSIZE));
//assert(!hipMallocManaged(&c, sizeof(float) * TSIZE * FPSIZE));
for (size_t i = 0; i < TSIZE * FPSIZE; i++)
{
a[i] = i;
b[i] = i;
c[i] = i;
}
hipLaunchKernelGGL(( foo), dim3(TSIZE/BSIZE), dim3(BSIZE), 0, 0, a, b, c);
hipDeviceSynchronize();
return 0;
}
| 4c27bc7be2a2c88d1a6235fdd6bca2ce7e682db9.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <stdint.h>
#ifndef TSIZE
#define TSIZE 32
#endif
#ifndef BSIZE
#define BSIZE 32
#endif
#define PSIZE 4096
#define FPSIZE (4096/sizeof(float))
#define DEVICE_STATIC_INTRINSIC_QUALIFIERS static __device__ __forceinline__
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
#define PXL_GLOBAL_PTR "l"
#else
#define PXL_GLOBAL_PTR "r"
#endif
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_l1(const void* const ptr)
{
asm("prefetch.global.L1 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_uniform(const void* const ptr)
{
asm("prefetchu.L1 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_l2(const void* const ptr)
{
asm("prefetch.global.L2 [%0];" : : PXL_GLOBAL_PTR(ptr));
}
static __device__ __inline__ uint64_t __nano(){
uint64_t mclk;
asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(mclk));
return mclk ;
}
__global__ void foo(float* a, float* b, float* c)
{
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
size_t tid1 = tid * FPSIZE;
size_t tid2 = tid1 + (FPSIZE * TSIZE);
size_t tid3 = tid2 + (FPSIZE * TSIZE);
if (tid < TSIZE)
{
__prefetch_global_l2(&a[tid1]);
__prefetch_global_l2(&b[tid1]);
__prefetch_global_l2(&c[tid1]);
c[tid1] = a[tid1] + b[tid1];
__prefetch_global_l2(&a[tid2]);
__prefetch_global_l2(&b[tid2]);
__prefetch_global_l2(&c[tid2]);
c[tid2] = a[tid2] + b[tid2];
__prefetch_global_l2(&a[tid3]);
__prefetch_global_l2(&b[tid3]);
__prefetch_global_l2(&c[tid3]);
c[tid3] = a[tid3] + b[tid3];
}
}
size_t pad_2MB(size_t val)
{
size_t ret = val / (PSIZE * 512);
size_t diff = val % (PSIZE * 512);
if (diff)
{
ret += 1;
}
return ret * (PSIZE * 512);
}
int main(void)
{
float* a;
float* b;
float* c;
size_t array_size = pad_2MB(3 * 2 * sizeof(float) * TSIZE * FPSIZE);
assert(!cudaMallocManaged(&a, array_size));
assert(!cudaMallocManaged(&b, array_size));
assert(!cudaMallocManaged(&c, array_size));
printf("array_size: %lu\n", array_size);
//assert(!cudaMallocManaged(&a, sizeof(float) * TSIZE * FPSIZE));
//assert(!cudaMallocManaged(&b, sizeof(float) * TSIZE * FPSIZE));
//assert(!cudaMallocManaged(&c, sizeof(float) * TSIZE * FPSIZE));
for (size_t i = 0; i < TSIZE * FPSIZE; i++)
{
a[i] = i;
b[i] = i;
c[i] = i;
}
foo<<<TSIZE/BSIZE, BSIZE>>>(a, b, c);
cudaDeviceSynchronize();
return 0;
}
|
fc3f3b1ae1351b7ce6336f6ca9ae0e7a3b58bc7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
__global__ void test_DEV_GET_SUB_SCORE_LOCAL(bool *d_good){
*d_good = true;
const uint32_t bases[5] = {'A' & 0xF, 'C' & 0xF, 'G' & 0xF, 'T' & 0xF};
for(int i=0;i<4;i++)
for(int j=0;j<4;j++){
const auto score = DEV_GET_SUB_SCORE_LOCAL(bases[i], bases[j]);
if(i==j){
*d_good &= score==_cudaMatchScore;
} else {
*d_good &= score==-_cudaMismatchScore;
}
}
for(int i=0;i<4;i++){
const auto score1 = DEV_GET_SUB_SCORE_LOCAL(bases[i], 'N' & 0xF);
const auto score2 = DEV_GET_SUB_SCORE_LOCAL('N' & 0xF, bases[i]);
#ifdef N_PENALTY
*d_good &= score1==-N_PENALTY;
*d_good &= score2==-N_PENALTY;
#else
*d_good &= score1==0;
*d_good &= score2==0;
#endif
}
}
// __device__ void compute_local_cell(
// const uint32_t gbase,
// const uint32_t rbase,
// const int32_t p,
// short &e,
// int32_t &h,
// int32_t &f
// ){
// __global__ void test_compute_local_cell(){
// } | fc3f3b1ae1351b7ce6336f6ca9ae0e7a3b58bc7b.cu | #pragma once
__global__ void test_DEV_GET_SUB_SCORE_LOCAL(bool *d_good){
*d_good = true;
const uint32_t bases[5] = {'A' & 0xF, 'C' & 0xF, 'G' & 0xF, 'T' & 0xF};
for(int i=0;i<4;i++)
for(int j=0;j<4;j++){
const auto score = DEV_GET_SUB_SCORE_LOCAL(bases[i], bases[j]);
if(i==j){
*d_good &= score==_cudaMatchScore;
} else {
*d_good &= score==-_cudaMismatchScore;
}
}
for(int i=0;i<4;i++){
const auto score1 = DEV_GET_SUB_SCORE_LOCAL(bases[i], 'N' & 0xF);
const auto score2 = DEV_GET_SUB_SCORE_LOCAL('N' & 0xF, bases[i]);
#ifdef N_PENALTY
*d_good &= score1==-N_PENALTY;
*d_good &= score2==-N_PENALTY;
#else
*d_good &= score1==0;
*d_good &= score2==0;
#endif
}
}
// __device__ void compute_local_cell(
// const uint32_t gbase,
// const uint32_t rbase,
// const int32_t p,
// short &e,
// int32_t &h,
// int32_t &f
// ){
// __global__ void test_compute_local_cell(){
// } |
171aa633ec256105299e10354cd312141225d92b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) blockDim: (%d, %d, %d)"
"gridDim: (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
int nElem = 6;
dim3 block (3);
dim3 grid ((nElem + block.x - 1) / block.x);
printf("grid.x %d grid.y %d gird.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
hipLaunchKernelGGL(( checkIndex) , dim3(grid), dim3(block), 0, 0, );
hipDeviceReset();
return(0);
} | 171aa633ec256105299e10354cd312141225d92b.cu | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) blockDim: (%d, %d, %d)"
"gridDim: (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
int nElem = 6;
dim3 block (3);
dim3 grid ((nElem + block.x - 1) / block.x);
printf("grid.x %d grid.y %d gird.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
checkIndex <<<grid, block>>> ();
cudaDeviceReset();
return(0);
} |
18dd0b1059f0ce874df16bc9465444707f755552.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "jacobi_iteration.h"
/* FIXME: Write the device kernels to solve the Jacobi iterations */
/* Use compare and swap to acquire mutex */
__device__ void lock(int *mutex)
{
while (atomicCAS(mutex, 0, 1) != 0);
return;
}
/* Use atomic exchange operation to release mutex */
__device__ void unlock(int *mutex)
{
atomicExch(mutex, 0);
return;
}
__global__ void jacobi_iteration_kernel_naive(float *A, float *naive, float *B, float *new_x, double *ssd, int *mutex)
{
__shared__ double ssd_array[THREAD_BLOCK_SIZE];
/* Find our place in thread block/grid. */
unsigned int threadID = threadIdx.x;
unsigned int dataID = blockIdx.x * blockDim.x + threadIdx.x;
/* Reset ssd to 0 */
if (dataID == 0)
*ssd = 0.0;
/* Perform jacobi */
double sum = -A[dataID * MATRIX_SIZE + dataID] * naive[dataID];
for (int j = 0; j < MATRIX_SIZE; j++) {
sum += A[dataID * MATRIX_SIZE + j] * naive[j];
}
new_x[dataID] = (B[dataID] - sum)/A[dataID * MATRIX_SIZE + dataID];
/* Copy data to shared memory from global memory. */
if (dataID < MATRIX_SIZE)
ssd_array[threadID] = (new_x[dataID] - naive[dataID]) * (new_x[dataID] - naive[dataID]);
else
ssd_array[threadID] = 0.0;
__syncthreads();
/* Parallel reduction */
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if(threadID < stride)
ssd_array[threadID] += ssd_array[threadID + stride];
__syncthreads();
}
/* Store result to global ssd. */
if (threadID == 0) {
lock(mutex);
*ssd += ssd_array[0];
unlock(mutex);
}
return;
}
__global__ void jacobi_iteration_kernel_optimized(float *A, float *naive, float *B, float *new_x, double *ssd, int *mutex)
{
__shared__ double ssd_array[THREAD_BLOCK_SIZE];
/* Find our place in thread block/grid. */
unsigned int threadID = threadIdx.x;
unsigned int dataID = blockIdx.x * blockDim.x + threadIdx.x;
/* Reset ssd to 0 */
if (dataID == 0)
*ssd = 0.0;
/* Perform jacobi */
double sum = -A[dataID * MATRIX_SIZE + dataID] * naive[dataID];
for (int j = 0; j < MATRIX_SIZE; j++) {
sum += A[dataID + MATRIX_SIZE * j] * naive[j];
}
new_x[dataID] = (B[dataID] - sum)/A[dataID * MATRIX_SIZE + dataID];
/* Copy data to shared memory from global memory. */
if (dataID < MATRIX_SIZE)
ssd_array[threadID] = (new_x[dataID] - naive[dataID]) * (new_x[dataID] - naive[dataID]);
else
ssd_array[threadID] = 0.0;
__syncthreads();
/* Parallel reduction */
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if(threadID < stride)
ssd_array[threadID] += ssd_array[threadID + stride];
__syncthreads();
}
/* Store result to global ssd. */
if (threadID == 0) {
lock(mutex);
*ssd += ssd_array[0];
unlock(mutex);
}
return;
}
| 18dd0b1059f0ce874df16bc9465444707f755552.cu | #include "jacobi_iteration.h"
/* FIXME: Write the device kernels to solve the Jacobi iterations */
/* Use compare and swap to acquire mutex */
__device__ void lock(int *mutex)
{
while (atomicCAS(mutex, 0, 1) != 0);
return;
}
/* Use atomic exchange operation to release mutex */
__device__ void unlock(int *mutex)
{
atomicExch(mutex, 0);
return;
}
__global__ void jacobi_iteration_kernel_naive(float *A, float *naive, float *B, float *new_x, double *ssd, int *mutex)
{
__shared__ double ssd_array[THREAD_BLOCK_SIZE];
/* Find our place in thread block/grid. */
unsigned int threadID = threadIdx.x;
unsigned int dataID = blockIdx.x * blockDim.x + threadIdx.x;
/* Reset ssd to 0 */
if (dataID == 0)
*ssd = 0.0;
/* Perform jacobi */
double sum = -A[dataID * MATRIX_SIZE + dataID] * naive[dataID];
for (int j = 0; j < MATRIX_SIZE; j++) {
sum += A[dataID * MATRIX_SIZE + j] * naive[j];
}
new_x[dataID] = (B[dataID] - sum)/A[dataID * MATRIX_SIZE + dataID];
/* Copy data to shared memory from global memory. */
if (dataID < MATRIX_SIZE)
ssd_array[threadID] = (new_x[dataID] - naive[dataID]) * (new_x[dataID] - naive[dataID]);
else
ssd_array[threadID] = 0.0;
__syncthreads();
/* Parallel reduction */
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if(threadID < stride)
ssd_array[threadID] += ssd_array[threadID + stride];
__syncthreads();
}
/* Store result to global ssd. */
if (threadID == 0) {
lock(mutex);
*ssd += ssd_array[0];
unlock(mutex);
}
return;
}
__global__ void jacobi_iteration_kernel_optimized(float *A, float *naive, float *B, float *new_x, double *ssd, int *mutex)
{
__shared__ double ssd_array[THREAD_BLOCK_SIZE];
/* Find our place in thread block/grid. */
unsigned int threadID = threadIdx.x;
unsigned int dataID = blockIdx.x * blockDim.x + threadIdx.x;
/* Reset ssd to 0 */
if (dataID == 0)
*ssd = 0.0;
/* Perform jacobi */
double sum = -A[dataID * MATRIX_SIZE + dataID] * naive[dataID];
for (int j = 0; j < MATRIX_SIZE; j++) {
sum += A[dataID + MATRIX_SIZE * j] * naive[j];
}
new_x[dataID] = (B[dataID] - sum)/A[dataID * MATRIX_SIZE + dataID];
/* Copy data to shared memory from global memory. */
if (dataID < MATRIX_SIZE)
ssd_array[threadID] = (new_x[dataID] - naive[dataID]) * (new_x[dataID] - naive[dataID]);
else
ssd_array[threadID] = 0.0;
__syncthreads();
/* Parallel reduction */
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if(threadID < stride)
ssd_array[threadID] += ssd_array[threadID + stride];
__syncthreads();
}
/* Store result to global ssd. */
if (threadID == 0) {
lock(mutex);
*ssd += ssd_array[0];
unlock(mutex);
}
return;
}
|
f43af5808cddd028413de131643a174fa285d2f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <float.h>
#include <THH/THHAtomics.cuh>
#include "cuda_helpers.h"
#include "roi_pool_kernel.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__global__ void roi_pool_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
const T* rois,
T* output,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
if (offset_input[input_index] > maxval) {
maxval = offset_input[input_index];
maxidx = input_index;
}
}
}
output[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void roi_pool_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* argmax_data,
int num_rois,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
T* grad_input,
const T* rois,
int n_stride,
int c_stride,
int h_stride,
int w_stride) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
T* grad_input_offset =
grad_input + ((roi_batch_ind * channels + c) * height * width);
int output_offset = n * n_stride + c * c_stride;
const int* argmax_data_offset =
argmax_data + (n * channels + c) * pooled_height * pooled_width;
int argmax = argmax_data_offset[ph * pooled_width + pw];
if (argmax != -1) {
atomicAdd(
grad_input_offset + argmax,
static_cast<T>(
grad_output[output_offset + ph * h_stride + pw * w_stride]));
}
}
}
} // namespace
std::tuple<at::Tensor, at::Tensor> roi_pool_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "roi_pool_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width}, input.options());
at::Tensor argmax = at::zeros(
{num_rois, channels, pooled_height, pooled_width},
input.options().dtype(at::kInt));
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, argmax);
}
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "roi_pool_forward_cuda", [&] {
hipLaunchKernelGGL(( roi_pool_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
argmax.data_ptr<int>());
});
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, argmax);
}
at::Tensor roi_pool_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& argmax,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(argmax.is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3};
at::CheckedFrom c = "roi_pool_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "roi_pool_backward_cuda", [&] {
hipLaunchKernelGGL(( roi_pool_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.data_ptr<scalar_t>(),
argmax_.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace ops
} // namespace vision
| f43af5808cddd028413de131643a174fa285d2f0.cu | #include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <float.h>
#include <THC/THCAtomics.cuh>
#include "cuda_helpers.h"
#include "roi_pool_kernel.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__global__ void roi_pool_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
const T* rois,
T* output,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
if (offset_input[input_index] > maxval) {
maxval = offset_input[input_index];
maxidx = input_index;
}
}
}
output[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void roi_pool_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* argmax_data,
int num_rois,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
T* grad_input,
const T* rois,
int n_stride,
int c_stride,
int h_stride,
int w_stride) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
T* grad_input_offset =
grad_input + ((roi_batch_ind * channels + c) * height * width);
int output_offset = n * n_stride + c * c_stride;
const int* argmax_data_offset =
argmax_data + (n * channels + c) * pooled_height * pooled_width;
int argmax = argmax_data_offset[ph * pooled_width + pw];
if (argmax != -1) {
atomicAdd(
grad_input_offset + argmax,
static_cast<T>(
grad_output[output_offset + ph * h_stride + pw * w_stride]));
}
}
}
} // namespace
std::tuple<at::Tensor, at::Tensor> roi_pool_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "roi_pool_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width}, input.options());
at::Tensor argmax = at::zeros(
{num_rois, channels, pooled_height, pooled_width},
input.options().dtype(at::kInt));
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, argmax);
}
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "roi_pool_forward_cuda", [&] {
roi_pool_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
argmax.data_ptr<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, argmax);
}
at::Tensor roi_pool_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& argmax,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(argmax.is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3};
at::CheckedFrom c = "roi_pool_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "roi_pool_backward_cuda", [&] {
roi_pool_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.data_ptr<scalar_t>(),
argmax_.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace ops
} // namespace vision
|
a214eafc3d2d364d612b233fabd134b101a91ab9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
#define BASETYPE float
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void matDet(BASETYPE *d_matA, BASETYPE *detM, int desp){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE datos[];
BASETYPE *s_mat = &datos[0];
BASETYPE *s_detAux = &datos[desp];
int offset = (threadIdx.x)*16;
unsigned int i;
for(i = 0; i < 16; i++){
s_mat[(threadIdx.x) * 16 + i]=d_matA[global_id * 16 + i];
}
__syncthreads();
for(i = 0; i < 4; i++){
s_detAux[(threadIdx.x) * 4+i]=0;
}
__syncthreads();
// printf("globalId:%d|%d|%d|%d|%d\n",global_id,(threadIdx.x)*4,(threadIdx.x)*4+1,(threadIdx.x)*4+2,(threadIdx.x)*4+3);
s_detAux[(threadIdx.x)*4] += s_mat[offset] * ( (s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+13])+(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+1] += (-1*s_mat[offset+1]) * ( (s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+2] += s_mat[offset+2] * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+15])+(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+3] += (-1*s_mat[offset+3]) * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+14])+(s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+12])+(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+14])) );
detM[blockIdx.x * blockDim.x + (threadIdx.x)] = s_detAux[(threadIdx.x)*4] + s_detAux[(threadIdx.x)*4+1] + s_detAux[(threadIdx.x)*4+2] + s_detAux[(threadIdx.x)*4+3];
__syncthreads();
}
__global__ void vecMult(BASETYPE *d_matA,unsigned long n){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
BASETYPE r_matA1,r_matA2;
extern __shared__ BASETYPE s_mat[];
unsigned int j;
r_matA1=d_matA[global_id * 16];
r_matA2=d_matA[global_id * 16 + 1];
s_mat[threadIdx.x * 16]=r_matA1;
s_mat[threadIdx.x * 16 + 1]=r_matA2;
r_matA1=d_matA[global_id * 16 + 2];
r_matA2=d_matA[global_id * 16 + 3];
s_mat[threadIdx.x * 16 + 2]=r_matA1;
s_mat[threadIdx.x * 16 + 3]=r_matA2;
r_matA1=d_matA[global_id * 16 + 4];
r_matA2=d_matA[global_id * 16 + 5];
s_mat[threadIdx.x * 16 + 4]=r_matA1;
s_mat[threadIdx.x * 16 + 5]=r_matA2;
r_matA1=d_matA[global_id * 16 + 6];
r_matA2=d_matA[global_id * 16 + 7];
s_mat[threadIdx.x * 16 + 6]=r_matA1;
s_mat[threadIdx.x * 16 + 7]=r_matA2;
r_matA1=d_matA[global_id * 16 + 8];
r_matA2=d_matA[global_id * 16 + 9];
s_mat[threadIdx.x * 16 + 8]=r_matA1;
s_mat[threadIdx.x * 16 + 9]=r_matA2;
r_matA1=d_matA[global_id * 16 + 10];
r_matA2=d_matA[global_id * 16 + 11];
s_mat[threadIdx.x * 16 + 10]=r_matA1;
s_mat[threadIdx.x * 16 + 11]=r_matA2;
r_matA1=d_matA[global_id * 16 + 12];
r_matA2=d_matA[global_id * 16 + 13];
s_mat[threadIdx.x * 16 + 12]=r_matA1;
s_mat[threadIdx.x * 16 + 13]=r_matA2;
r_matA1=d_matA[global_id * 16 + 14];
r_matA2=d_matA[global_id * 16 + 15];
s_mat[threadIdx.x * 16 + 14]=r_matA1;
s_mat[threadIdx.x * 16 + 15]=r_matA2;
__syncthreads();
for( j = 1; j < blockDim.x; j *= 2 ){
if( threadIdx.x < blockDim.x / (j * 2)){
s_mat[(threadIdx.x) * 16] += s_mat[((threadIdx.x) * 16) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 1] += s_mat[((threadIdx.x) * 16 + 1) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 2] += s_mat[((threadIdx.x) * 16 + 2) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 3] += s_mat[((threadIdx.x) * 16 + 3) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 4] += s_mat[((threadIdx.x) * 16 + 4) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 5] += s_mat[((threadIdx.x) * 16 + 5) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 6] += s_mat[((threadIdx.x) * 16 + 6) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 7] += s_mat[((threadIdx.x) * 16 + 7) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 8] += s_mat[((threadIdx.x) * 16 + 8) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 9] += s_mat[((threadIdx.x) * 16 + 9) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 10] += s_mat[((threadIdx.x) * 16 + 10) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 11] += s_mat[((threadIdx.x) * 16 + 11) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 12] += s_mat[((threadIdx.x) * 16 + 12) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 13] += s_mat[((threadIdx.x) * 16 + 13) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 14] += s_mat[((threadIdx.x) * 16 + 14) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 15] += s_mat[((threadIdx.x) * 16 + 15) + (blockDim.x / (j * 2)) * 16];
}
__syncthreads();
}
if ((threadIdx.x) == 0){
d_matA[(blockIdx.x * 16)] = s_mat[0];
d_matA[(blockIdx.x * 16) + 1] = s_mat[1];
d_matA[(blockIdx.x * 16) + 2] = s_mat[2];
d_matA[(blockIdx.x * 16) + 3] = s_mat[3];
d_matA[(blockIdx.x * 16) + 4] = s_mat[4];
d_matA[(blockIdx.x * 16) + 5] = s_mat[5];
d_matA[(blockIdx.x * 16) + 6] = s_mat[6];
d_matA[(blockIdx.x * 16) + 7] = s_mat[7];
d_matA[(blockIdx.x * 16) + 8] = s_mat[8];
d_matA[(blockIdx.x * 16) + 9] = s_mat[9];
d_matA[(blockIdx.x * 16) + 10] = s_mat[10];
d_matA[(blockIdx.x * 16) + 11] = s_mat[11];
d_matA[(blockIdx.x * 16) + 12] = s_mat[12];
d_matA[(blockIdx.x * 16) + 13] = s_mat[13];
d_matA[(blockIdx.x * 16) + 14] = s_mat[14];
d_matA[(blockIdx.x * 16) + 15] = s_mat[15];
}
}
__global__ void vecMult2(BASETYPE *d_matA,unsigned long n,int offset_m,int cant_m ){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE s_mat[];
unsigned int j;
if( global_id < n){
s_mat[threadIdx.x * 16]=d_matA[(offset_m * 16) +( global_id * 16) ];
s_mat[threadIdx.x * 16 + 1]=d_matA[(offset_m * 16) +( global_id * 16 + 1) ];
s_mat[threadIdx.x * 16 + 2]=d_matA[(offset_m * 16) +( global_id * 16 + 2) ];
s_mat[threadIdx.x * 16 + 3]=d_matA[(offset_m * 16) +( global_id * 16 + 3) ];
s_mat[threadIdx.x * 16 + 4]=d_matA[(offset_m * 16) +( global_id * 16 + 4) ];
s_mat[threadIdx.x * 16 + 5]=d_matA[(offset_m * 16) +( global_id * 16 + 5) ];
s_mat[threadIdx.x * 16 + 6]=d_matA[(offset_m * 16) +( global_id * 16 + 6) ];
s_mat[threadIdx.x * 16 + 7]=d_matA[(offset_m * 16) +( global_id * 16 + 7) ];
s_mat[threadIdx.x * 16 + 8]=d_matA[(offset_m * 16) +( global_id * 16 + 8) ];
s_mat[threadIdx.x * 16 + 9]=d_matA[(offset_m * 16) +( global_id * 16 + 9) ];
s_mat[threadIdx.x * 16 + 10]=d_matA[(offset_m * 16) +( global_id * 16 + 10) ];
s_mat[threadIdx.x * 16 + 11]=d_matA[(offset_m * 16) +( global_id * 16 + 11) ];
s_mat[threadIdx.x * 16 + 12]=d_matA[(offset_m * 16) +( global_id * 16 + 12) ];
s_mat[threadIdx.x * 16 + 13]=d_matA[(offset_m * 16) +( global_id * 16 + 13) ];
s_mat[threadIdx.x * 16 + 14]=d_matA[(offset_m * 16) +( global_id * 16 + 14) ];
s_mat[threadIdx.x * 16 + 15]=d_matA[(offset_m * 16) +( global_id * 16 + 15) ];
__syncthreads();
for( j = 1; j < cant_m; j *= 2 ){
if( threadIdx.x < cant_m / (j * 2)){
s_mat[(threadIdx.x) * 16] += s_mat[((threadIdx.x) * 16) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 1] += s_mat[((threadIdx.x) * 16 + 1) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 2] += s_mat[((threadIdx.x) * 16 + 2) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 3] += s_mat[((threadIdx.x) * 16 + 3) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 4] += s_mat[((threadIdx.x) * 16 + 4) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 5] += s_mat[((threadIdx.x) * 16 + 5) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 6] += s_mat[((threadIdx.x) * 16 + 6) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 7] += s_mat[((threadIdx.x) * 16 + 7) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 8] += s_mat[((threadIdx.x) * 16 + 8) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 9] += s_mat[((threadIdx.x) * 16 + 9) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 10] += s_mat[((threadIdx.x) * 16 + 10) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 11] += s_mat[((threadIdx.x) * 16 + 11) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 12] += s_mat[((threadIdx.x) * 16 + 12) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 13] += s_mat[((threadIdx.x) * 16 + 13) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 14] += s_mat[((threadIdx.x) * 16 + 14) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 15] += s_mat[((threadIdx.x) * 16 + 15) + (cant_m / (j * 2)) * 16];
}
__syncthreads();
}
if ((threadIdx.x) == 0){
d_matA[(offset_m / blockDim.x) * 16 + (blockIdx.x * 16)] = s_mat[0];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 1] = s_mat[1];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 2] = s_mat[2];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 3] = s_mat[3];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 4] = s_mat[4];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 5] = s_mat[5];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 6] = s_mat[6];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 7] = s_mat[7];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 8] = s_mat[8];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 9] = s_mat[9];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 10] = s_mat[10];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 11] = s_mat[11];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 12] = s_mat[12];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 13] = s_mat[13];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 14] = s_mat[14];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 15] = s_mat[15];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N\n");
printf("Falta argumento: CUDA_BLK \n");
return 0;
}
//declaracion de variables
hipError_t error;
unsigned long N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi(argv[2]),GRID_BLK,cant_blk;
unsigned long numBytes = sizeof(BASETYPE)*4*4;
BASETYPE *matrices,*d_matrices,*d_detM,*detM;
double timetick;
unsigned long i,j;
int datos_matDet,datos_vecMult,matDet_desp;
matrices = (BASETYPE *)malloc(numBytes*N);
detM = (BASETYPE *)malloc(sizeof(BASETYPE)*N);
for (i = 0; i < 4*4*N; i++){
matrices[i] = 1;
}
for (i = 0; i < N; i++){
detM[i] = 0;
}
matrices[2] = 220;
matrices[13] = 220;
matrices[7] = 6;
matrices[14] = 6;
//comment
hipMalloc((void **) &d_matrices, numBytes*N);
hipMalloc((void **) &d_detM, sizeof(BASETYPE)*N);
datos_matDet = numBytes * CUDA_BLK + sizeof(BASETYPE) * 4 * CUDA_BLK;
datos_vecMult = numBytes * CUDA_BLK;
matDet_desp = CUDA_BLK * 16;
cant_blk = N / CUDA_BLK;
dim3 dimBlock(CUDA_BLK);
dim3 dimGrid(cant_blk);
timetick = dwalltime();
hipMemcpy(d_matrices, matrices, numBytes*N, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_detM, detM, sizeof(BASETYPE)*N, hipMemcpyHostToDevice); // CPU -> GPU
hipLaunchKernelGGL(( matDet), dim3(dimGrid), dim3(dimBlock),datos_matDet, 0, d_matrices,d_detM,matDet_desp);
hipDeviceSynchronize();
for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
if ((i % CUDA_BLK) == 0){
// printf("primero---------------------------------\n");
dim3 dimGrid(GRID_BLK);
hipLaunchKernelGGL(( vecMult), dim3(dimGrid), dim3(dimBlock),datos_vecMult, 0, d_matrices,i);
hipDeviceSynchronize();
} else{
if(GRID_BLK != 0){
hipLaunchKernelGGL(( vecMult), dim3(dimGrid), dim3(dimBlock),datos_vecMult, 0, d_matrices,i);
hipDeviceSynchronize();
}
// printf("segundo---------------------------------\n");
dim3 dimGrid2(1);
hipLaunchKernelGGL(( vecMult2), dim3(dimGrid2), dim3(dimBlock),datos_vecMult, 0, d_matrices,(i % CUDA_BLK),GRID_BLK * CUDA_BLK,(i % CUDA_BLK));
hipDeviceSynchronize();
i = i + (i % CUDA_BLK);
}
}
/* for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
dim3 dimGrid(GRID_BLK);
hipLaunchKernelGGL(( vecMult), dim3(dimGrid), dim3(dimBlock),datos_vecMult, 0, d_matrices,i);
hipDeviceSynchronize();
}*/
hipMemcpy(matrices, d_matrices, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
hipMemcpy(detM, d_detM, sizeof(BASETYPE)*N, hipMemcpyDeviceToHost); // GPU -> CPU
for(i = 1; i < N ; i++){
detM[0] += detM[i];
}
detM[0] = detM[0] / N;
for (i = 0; i < 4*4; i++){
matrices[i] *= detM[0];
}
printf("Tiempo para la GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n",error);
printf("%.2lf|\n",detM[0]);
for(i=0; i < 4; i++){
for(j=0; j < 4; j++){
printf("%.2lf|",matrices[i*4+j]);
}
printf("\n");
}
hipFree(d_matrices);
hipFree(d_detM);
free(matrices);
free(detM);
return 0;
}
| a214eafc3d2d364d612b233fabd134b101a91ab9.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
#define BASETYPE float
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void matDet(BASETYPE *d_matA, BASETYPE *detM, int desp){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE datos[];
BASETYPE *s_mat = &datos[0];
BASETYPE *s_detAux = &datos[desp];
int offset = (threadIdx.x)*16;
unsigned int i;
for(i = 0; i < 16; i++){
s_mat[(threadIdx.x) * 16 + i]=d_matA[global_id * 16 + i];
}
__syncthreads();
for(i = 0; i < 4; i++){
s_detAux[(threadIdx.x) * 4+i]=0;
}
__syncthreads();
// printf("globalId:%d|%d|%d|%d|%d\n",global_id,(threadIdx.x)*4,(threadIdx.x)*4+1,(threadIdx.x)*4+2,(threadIdx.x)*4+3);
s_detAux[(threadIdx.x)*4] += s_mat[offset] * ( (s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+13])+(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+1] += (-1*s_mat[offset+1]) * ( (s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+2] += s_mat[offset+2] * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+15])+(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+3] += (-1*s_mat[offset+3]) * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+14])+(s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+12])+(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+14])) );
detM[blockIdx.x * blockDim.x + (threadIdx.x)] = s_detAux[(threadIdx.x)*4] + s_detAux[(threadIdx.x)*4+1] + s_detAux[(threadIdx.x)*4+2] + s_detAux[(threadIdx.x)*4+3];
__syncthreads();
}
__global__ void vecMult(BASETYPE *d_matA,unsigned long n){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
BASETYPE r_matA1,r_matA2;
extern __shared__ BASETYPE s_mat[];
unsigned int j;
r_matA1=d_matA[global_id * 16];
r_matA2=d_matA[global_id * 16 + 1];
s_mat[threadIdx.x * 16]=r_matA1;
s_mat[threadIdx.x * 16 + 1]=r_matA2;
r_matA1=d_matA[global_id * 16 + 2];
r_matA2=d_matA[global_id * 16 + 3];
s_mat[threadIdx.x * 16 + 2]=r_matA1;
s_mat[threadIdx.x * 16 + 3]=r_matA2;
r_matA1=d_matA[global_id * 16 + 4];
r_matA2=d_matA[global_id * 16 + 5];
s_mat[threadIdx.x * 16 + 4]=r_matA1;
s_mat[threadIdx.x * 16 + 5]=r_matA2;
r_matA1=d_matA[global_id * 16 + 6];
r_matA2=d_matA[global_id * 16 + 7];
s_mat[threadIdx.x * 16 + 6]=r_matA1;
s_mat[threadIdx.x * 16 + 7]=r_matA2;
r_matA1=d_matA[global_id * 16 + 8];
r_matA2=d_matA[global_id * 16 + 9];
s_mat[threadIdx.x * 16 + 8]=r_matA1;
s_mat[threadIdx.x * 16 + 9]=r_matA2;
r_matA1=d_matA[global_id * 16 + 10];
r_matA2=d_matA[global_id * 16 + 11];
s_mat[threadIdx.x * 16 + 10]=r_matA1;
s_mat[threadIdx.x * 16 + 11]=r_matA2;
r_matA1=d_matA[global_id * 16 + 12];
r_matA2=d_matA[global_id * 16 + 13];
s_mat[threadIdx.x * 16 + 12]=r_matA1;
s_mat[threadIdx.x * 16 + 13]=r_matA2;
r_matA1=d_matA[global_id * 16 + 14];
r_matA2=d_matA[global_id * 16 + 15];
s_mat[threadIdx.x * 16 + 14]=r_matA1;
s_mat[threadIdx.x * 16 + 15]=r_matA2;
__syncthreads();
for( j = 1; j < blockDim.x; j *= 2 ){
if( threadIdx.x < blockDim.x / (j * 2)){
s_mat[(threadIdx.x) * 16] += s_mat[((threadIdx.x) * 16) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 1] += s_mat[((threadIdx.x) * 16 + 1) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 2] += s_mat[((threadIdx.x) * 16 + 2) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 3] += s_mat[((threadIdx.x) * 16 + 3) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 4] += s_mat[((threadIdx.x) * 16 + 4) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 5] += s_mat[((threadIdx.x) * 16 + 5) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 6] += s_mat[((threadIdx.x) * 16 + 6) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 7] += s_mat[((threadIdx.x) * 16 + 7) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 8] += s_mat[((threadIdx.x) * 16 + 8) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 9] += s_mat[((threadIdx.x) * 16 + 9) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 10] += s_mat[((threadIdx.x) * 16 + 10) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 11] += s_mat[((threadIdx.x) * 16 + 11) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 12] += s_mat[((threadIdx.x) * 16 + 12) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 13] += s_mat[((threadIdx.x) * 16 + 13) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 14] += s_mat[((threadIdx.x) * 16 + 14) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 15] += s_mat[((threadIdx.x) * 16 + 15) + (blockDim.x / (j * 2)) * 16];
}
__syncthreads();
}
if ((threadIdx.x) == 0){
d_matA[(blockIdx.x * 16)] = s_mat[0];
d_matA[(blockIdx.x * 16) + 1] = s_mat[1];
d_matA[(blockIdx.x * 16) + 2] = s_mat[2];
d_matA[(blockIdx.x * 16) + 3] = s_mat[3];
d_matA[(blockIdx.x * 16) + 4] = s_mat[4];
d_matA[(blockIdx.x * 16) + 5] = s_mat[5];
d_matA[(blockIdx.x * 16) + 6] = s_mat[6];
d_matA[(blockIdx.x * 16) + 7] = s_mat[7];
d_matA[(blockIdx.x * 16) + 8] = s_mat[8];
d_matA[(blockIdx.x * 16) + 9] = s_mat[9];
d_matA[(blockIdx.x * 16) + 10] = s_mat[10];
d_matA[(blockIdx.x * 16) + 11] = s_mat[11];
d_matA[(blockIdx.x * 16) + 12] = s_mat[12];
d_matA[(blockIdx.x * 16) + 13] = s_mat[13];
d_matA[(blockIdx.x * 16) + 14] = s_mat[14];
d_matA[(blockIdx.x * 16) + 15] = s_mat[15];
}
}
__global__ void vecMult2(BASETYPE *d_matA,unsigned long n,int offset_m,int cant_m ){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE s_mat[];
unsigned int j;
if( global_id < n){
s_mat[threadIdx.x * 16]=d_matA[(offset_m * 16) +( global_id * 16) ];
s_mat[threadIdx.x * 16 + 1]=d_matA[(offset_m * 16) +( global_id * 16 + 1) ];
s_mat[threadIdx.x * 16 + 2]=d_matA[(offset_m * 16) +( global_id * 16 + 2) ];
s_mat[threadIdx.x * 16 + 3]=d_matA[(offset_m * 16) +( global_id * 16 + 3) ];
s_mat[threadIdx.x * 16 + 4]=d_matA[(offset_m * 16) +( global_id * 16 + 4) ];
s_mat[threadIdx.x * 16 + 5]=d_matA[(offset_m * 16) +( global_id * 16 + 5) ];
s_mat[threadIdx.x * 16 + 6]=d_matA[(offset_m * 16) +( global_id * 16 + 6) ];
s_mat[threadIdx.x * 16 + 7]=d_matA[(offset_m * 16) +( global_id * 16 + 7) ];
s_mat[threadIdx.x * 16 + 8]=d_matA[(offset_m * 16) +( global_id * 16 + 8) ];
s_mat[threadIdx.x * 16 + 9]=d_matA[(offset_m * 16) +( global_id * 16 + 9) ];
s_mat[threadIdx.x * 16 + 10]=d_matA[(offset_m * 16) +( global_id * 16 + 10) ];
s_mat[threadIdx.x * 16 + 11]=d_matA[(offset_m * 16) +( global_id * 16 + 11) ];
s_mat[threadIdx.x * 16 + 12]=d_matA[(offset_m * 16) +( global_id * 16 + 12) ];
s_mat[threadIdx.x * 16 + 13]=d_matA[(offset_m * 16) +( global_id * 16 + 13) ];
s_mat[threadIdx.x * 16 + 14]=d_matA[(offset_m * 16) +( global_id * 16 + 14) ];
s_mat[threadIdx.x * 16 + 15]=d_matA[(offset_m * 16) +( global_id * 16 + 15) ];
__syncthreads();
for( j = 1; j < cant_m; j *= 2 ){
if( threadIdx.x < cant_m / (j * 2)){
s_mat[(threadIdx.x) * 16] += s_mat[((threadIdx.x) * 16) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 1] += s_mat[((threadIdx.x) * 16 + 1) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 2] += s_mat[((threadIdx.x) * 16 + 2) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 3] += s_mat[((threadIdx.x) * 16 + 3) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 4] += s_mat[((threadIdx.x) * 16 + 4) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 5] += s_mat[((threadIdx.x) * 16 + 5) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 6] += s_mat[((threadIdx.x) * 16 + 6) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 7] += s_mat[((threadIdx.x) * 16 + 7) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 8] += s_mat[((threadIdx.x) * 16 + 8) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 9] += s_mat[((threadIdx.x) * 16 + 9) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 10] += s_mat[((threadIdx.x) * 16 + 10) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 11] += s_mat[((threadIdx.x) * 16 + 11) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 12] += s_mat[((threadIdx.x) * 16 + 12) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 13] += s_mat[((threadIdx.x) * 16 + 13) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 14] += s_mat[((threadIdx.x) * 16 + 14) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 15] += s_mat[((threadIdx.x) * 16 + 15) + (cant_m / (j * 2)) * 16];
}
__syncthreads();
}
if ((threadIdx.x) == 0){
d_matA[(offset_m / blockDim.x) * 16 + (blockIdx.x * 16)] = s_mat[0];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 1] = s_mat[1];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 2] = s_mat[2];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 3] = s_mat[3];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 4] = s_mat[4];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 5] = s_mat[5];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 6] = s_mat[6];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 7] = s_mat[7];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 8] = s_mat[8];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 9] = s_mat[9];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 10] = s_mat[10];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 11] = s_mat[11];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 12] = s_mat[12];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 13] = s_mat[13];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 14] = s_mat[14];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 15] = s_mat[15];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N\n");
printf("Falta argumento: CUDA_BLK \n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned long N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi(argv[2]),GRID_BLK,cant_blk;
unsigned long numBytes = sizeof(BASETYPE)*4*4;
BASETYPE *matrices,*d_matrices,*d_detM,*detM;
double timetick;
unsigned long i,j;
int datos_matDet,datos_vecMult,matDet_desp;
matrices = (BASETYPE *)malloc(numBytes*N);
detM = (BASETYPE *)malloc(sizeof(BASETYPE)*N);
for (i = 0; i < 4*4*N; i++){
matrices[i] = 1;
}
for (i = 0; i < N; i++){
detM[i] = 0;
}
matrices[2] = 220;
matrices[13] = 220;
matrices[7] = 6;
matrices[14] = 6;
//comment
cudaMalloc((void **) &d_matrices, numBytes*N);
cudaMalloc((void **) &d_detM, sizeof(BASETYPE)*N);
datos_matDet = numBytes * CUDA_BLK + sizeof(BASETYPE) * 4 * CUDA_BLK;
datos_vecMult = numBytes * CUDA_BLK;
matDet_desp = CUDA_BLK * 16;
cant_blk = N / CUDA_BLK;
dim3 dimBlock(CUDA_BLK);
dim3 dimGrid(cant_blk);
timetick = dwalltime();
cudaMemcpy(d_matrices, matrices, numBytes*N, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_detM, detM, sizeof(BASETYPE)*N, cudaMemcpyHostToDevice); // CPU -> GPU
matDet<<<dimGrid, dimBlock,datos_matDet>>>(d_matrices,d_detM,matDet_desp);
cudaThreadSynchronize();
for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
if ((i % CUDA_BLK) == 0){
// printf("primero---------------------------------\n");
dim3 dimGrid(GRID_BLK);
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
} else{
if(GRID_BLK != 0){
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
}
// printf("segundo---------------------------------\n");
dim3 dimGrid2(1);
vecMult2<<<dimGrid2, dimBlock,datos_vecMult>>>(d_matrices,(i % CUDA_BLK),GRID_BLK * CUDA_BLK,(i % CUDA_BLK));
cudaThreadSynchronize();
i = i + (i % CUDA_BLK);
}
}
/* for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
dim3 dimGrid(GRID_BLK);
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
}*/
cudaMemcpy(matrices, d_matrices, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
cudaMemcpy(detM, d_detM, sizeof(BASETYPE)*N, cudaMemcpyDeviceToHost); // GPU -> CPU
for(i = 1; i < N ; i++){
detM[0] += detM[i];
}
detM[0] = detM[0] / N;
for (i = 0; i < 4*4; i++){
matrices[i] *= detM[0];
}
printf("Tiempo para la GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n",error);
printf("%.2lf|\n",detM[0]);
for(i=0; i < 4; i++){
for(j=0; j < 4; j++){
printf("%.2lf|",matrices[i*4+j]);
}
printf("\n");
}
cudaFree(d_matrices);
cudaFree(d_detM);
free(matrices);
free(detM);
return 0;
}
|
d688e907065458b47dfe501ac91c984fc6b97224.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright by Contributors
#include <xgboost/linear_updater.h>
#include "../helpers.h"
#include "xgboost/gbm.h"
namespace xgboost {
TEST(Linear, GPUCoordinate) {
dh::safe_cuda(hipSetDevice(0));
auto mat = xgboost::CreateDMatrix(10, 10, 0);
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create("gpu_coord_descent"));
updater->Init({{"eta", "1."}, {"n_gpus", "1"}});
xgboost::HostDeviceVector<xgboost::GradientPair> gpair(
(*mat)->Info().num_row_, xgboost::GradientPair(-5, 1.0));
xgboost::gbm::GBLinearModel model;
model.param.num_feature = (*mat)->Info().num_col_;
model.param.num_output_group = 1;
model.LazyInitModel();
updater->Update(&gpair, (*mat).get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
delete mat;
}
#if defined(XGBOOST_USE_NCCL)
TEST(Linear, MGPU_GPUCoordinate) {
dh::safe_cuda(hipSetDevice(0));
{
auto mat = xgboost::CreateDMatrix(10, 10, 0);
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create("gpu_coord_descent"));
updater->Init({{"eta", "1."}, {"n_gpus", "-1"}});
xgboost::HostDeviceVector<xgboost::GradientPair> gpair(
(*mat)->Info().num_row_, xgboost::GradientPair(-5, 1.0));
xgboost::gbm::GBLinearModel model;
model.param.num_feature = (*mat)->Info().num_col_;
model.param.num_output_group = 1;
model.LazyInitModel();
updater->Update(&gpair, (*mat).get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
delete mat;
}
dh::safe_cuda(hipSetDevice(0));
{
auto mat = xgboost::CreateDMatrix(10, 10, 0);
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create("gpu_coord_descent"));
updater->Init({
{"eta", "1."},
{"n_gpus", "-1"},
{"gpu_id", "1"}});
xgboost::HostDeviceVector<xgboost::GradientPair> gpair(
(*mat)->Info().num_row_, xgboost::GradientPair(-5, 1.0));
xgboost::gbm::GBLinearModel model;
model.param.num_feature = (*mat)->Info().num_col_;
model.param.num_output_group = 1;
model.LazyInitModel();
updater->Update(&gpair, (*mat).get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
delete mat;
}
}
#endif
} // namespace xgboost | d688e907065458b47dfe501ac91c984fc6b97224.cu | // Copyright by Contributors
#include <xgboost/linear_updater.h>
#include "../helpers.h"
#include "xgboost/gbm.h"
namespace xgboost {
TEST(Linear, GPUCoordinate) {
dh::safe_cuda(cudaSetDevice(0));
auto mat = xgboost::CreateDMatrix(10, 10, 0);
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create("gpu_coord_descent"));
updater->Init({{"eta", "1."}, {"n_gpus", "1"}});
xgboost::HostDeviceVector<xgboost::GradientPair> gpair(
(*mat)->Info().num_row_, xgboost::GradientPair(-5, 1.0));
xgboost::gbm::GBLinearModel model;
model.param.num_feature = (*mat)->Info().num_col_;
model.param.num_output_group = 1;
model.LazyInitModel();
updater->Update(&gpair, (*mat).get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
delete mat;
}
#if defined(XGBOOST_USE_NCCL)
TEST(Linear, MGPU_GPUCoordinate) {
dh::safe_cuda(cudaSetDevice(0));
{
auto mat = xgboost::CreateDMatrix(10, 10, 0);
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create("gpu_coord_descent"));
updater->Init({{"eta", "1."}, {"n_gpus", "-1"}});
xgboost::HostDeviceVector<xgboost::GradientPair> gpair(
(*mat)->Info().num_row_, xgboost::GradientPair(-5, 1.0));
xgboost::gbm::GBLinearModel model;
model.param.num_feature = (*mat)->Info().num_col_;
model.param.num_output_group = 1;
model.LazyInitModel();
updater->Update(&gpair, (*mat).get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
delete mat;
}
dh::safe_cuda(cudaSetDevice(0));
{
auto mat = xgboost::CreateDMatrix(10, 10, 0);
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create("gpu_coord_descent"));
updater->Init({
{"eta", "1."},
{"n_gpus", "-1"},
{"gpu_id", "1"}});
xgboost::HostDeviceVector<xgboost::GradientPair> gpair(
(*mat)->Info().num_row_, xgboost::GradientPair(-5, 1.0));
xgboost::gbm::GBLinearModel model;
model.param.num_feature = (*mat)->Info().num_col_;
model.param.num_output_group = 1;
model.LazyInitModel();
updater->Update(&gpair, (*mat).get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
delete mat;
}
}
#endif
} // namespace xgboost |
3511717fc08e7c75dedb7b14e61fe3fddaa37ac2.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
#define SOUTH_BORDER 32
#define EAST_BORDER 32
#define RELABELING_ROWS 16
#define RELABELING_COLS 16
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) { // n is an index but return value is a label
// You can now call Find on a background pixel
unsigned label = s_buf[n];
if (label) {
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
}
return n + 1;
}
else {
return 0;
}
}
// SetRoot procedure
__device__ void SetRoot(int *labels, unsigned label, unsigned eps) { // label and eps are both labels (not indexes; labels are shifted by one to the right wrt indexes)
int father = labels[label - 1];
if (father > eps) {
labels[label - 1] = eps;
}
}
// atomicRUF procedure
__device__ void atomicRUF(int *labels, unsigned label, unsigned eps) {
if (label > eps) {
unsigned minResult = atomicMin(labels + label - 1, eps);
if (eps > minResult) {
atomicRUF(labels, eps, minResult);
}
else {
if (label > minResult) {
atomicRUF(labels, minResult, eps);
}
}
}
}
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_index = threadIdx.y * BLOCK_COLS + threadIdx.x;
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = row * img.step + col;
__shared__ int s_buf[BLOCK_ROWS * BLOCK_COLS];
__shared__ unsigned char s_img[BLOCK_ROWS * BLOCK_COLS];
bool in_limits = (row < img.rows && col < img.cols); // borders aren't processed
// DLP-I
s_img[local_index] = in_limits ? img[img_index] : 0xFF;
unsigned char v = s_img[local_index];
s_buf[local_index] = v ? local_index + 1 : 0;
__syncthreads();
// DLP-SR (optional)
if (threadIdx.y < BLOCK_ROWS - 1 && threadIdx.x < BLOCK_COLS - 1 && row < labels.rows - 1 && col < labels.cols - 1) {
int min = INT32_MAX;
int a[4];
a[0] = s_buf[local_index];
if (a[0] != 0 && a[0] < min)
min = a[0];
a[1] = s_buf[local_index + 1];
if (a[1] != 0 && a[1] < min)
min = a[1];
a[2] = s_buf[local_index + BLOCK_COLS];
if (a[2] != 0 && a[2] < min)
min = a[2];
a[3] = s_buf[local_index + BLOCK_COLS + 1];
if (a[3] != 0 && a[3] < min)
min = a[3];
for (unsigned i = 0; i < 4; i++) {
unsigned int label = a[i];
if (label != 0 && label != min) {
SetRoot(s_buf, label, min);
}
}
}
__syncthreads();
// DLP-R (optional)
if (v && in_limits) {
s_buf[local_index] = Find(s_buf, local_index);
}
__syncthreads();
// DLP-RUF
if (threadIdx.y < BLOCK_ROWS - 1 && threadIdx.x < BLOCK_COLS - 1 && row < labels.rows - 1 && col < labels.cols - 1) {
int min = INT32_MAX;
int a[4];
a[0] = s_buf[local_index];
if (a[0] != 0 && a[0] < min)
min = a[0];
a[1] = s_buf[local_index + 1];
if (a[1] != 0 && a[1] < min)
min = a[1];
a[2] = s_buf[local_index + BLOCK_COLS];
if (a[2] != 0 && a[2] < min)
min = a[2];
a[3] = s_buf[local_index + BLOCK_COLS + 1];
if (a[3] != 0 && a[3] < min)
min = a[3];
for (unsigned i = 0; i < 4; i++) {
unsigned int label = a[i];
if (label != 0 && label != min) {
atomicRUF(s_buf, label, min);
}
}
}
__syncthreads();
// DLP-R
if (v && in_limits) {
s_buf[local_index] = Find(s_buf, local_index);
}
__syncthreads();
// Labeltranslation
if (in_limits) {
if (v) {
unsigned f = Find(s_buf, local_index) - 1;
unsigned f_row = f / BLOCK_COLS;
unsigned f_col = f % BLOCK_COLS;
unsigned global_f = (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * BLOCK_COLS + f_col);
labels.data[row * labels.step / sizeof(int) + col] = global_f + 1;
}
else {
labels.data[row * labels.step / sizeof(int) + col] = 0;
}
}
}
__global__ void SouthBorderMerge(cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y + 1) * BLOCK_ROWS - 1;
unsigned col = blockIdx.x * SOUTH_BORDER + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
bool in_limits = (col < labels.cols - 1);
if (in_limits) {
int min = INT32_MAX;
int a[4];
a[0] = labels[labels_index];
if (a[0] != 0 && a[0] < min)
min = a[0];
a[1] = labels[labels_index + 1];
if (a[1] != 0 && a[1] < min)
min = a[1];
a[2] = labels[labels_index + labels.step / labels.elem_size];
if (a[2] != 0 && a[2] < min)
min = a[2];
a[3] = labels[labels_index + labels.step / labels.elem_size + 1];
if (a[3] != 0 && a[3] < min)
min = a[3];
for (unsigned i = 0; i < 4; i++) {
unsigned int label = a[i];
if (label != 0 && label != min) {
atomicRUF(labels, label, min);
}
}
}
}
__global__ void EastBorderMerge(cuda::PtrStepSzi labels) {
unsigned col = (blockIdx.x + 1) * BLOCK_COLS - 1;
unsigned row = blockIdx.y * EAST_BORDER + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
bool in_limits = (row < labels.rows - 1);
if (in_limits) {
int min = INT32_MAX;
int a[4];
a[0] = labels[labels_index];
if (a[0] != 0 && a[0] < min)
min = a[0];
a[1] = labels[labels_index + 1];
if (a[1] != 0 && a[1] < min)
min = a[1];
a[2] = labels[labels_index + labels.step / labels.elem_size];
if (a[2] != 0 && a[2] < min)
min = a[2];
a[3] = labels[labels_index + labels.step / labels.elem_size + 1];
if (a[3] != 0 && a[3] < min)
min = a[3];
for (unsigned i = 0; i < 4; i++) {
unsigned int label = a[i];
if (label != 0 && label != min) {
atomicRUF(labels, label, min);
}
}
}
}
__global__ void Relabeling(cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * RELABELING_ROWS + threadIdx.y;
unsigned col = blockIdx.x * RELABELING_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
labels[labels_index] = Find(labels.data, labels_index);
}
}
}
class DLP : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
DLP() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
// Phase 1
// Etichetta i pixel localmente al blocco
LocalMerge << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
// Immagine di debug della prima fase
//Mat1i local_labels;
//d_img_labels_.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
SouthBorderMerge << <dim3((d_img_.cols + SOUTH_BORDER - 1) / SOUTH_BORDER, grid_size_.y - 1, 1), SOUTH_BORDER >> > (d_img_labels_);
EastBorderMerge << <dim3(grid_size_.x - 1, (d_img_.rows + EAST_BORDER - 1) / EAST_BORDER, 1), EAST_BORDER >> > (d_img_labels_);
//Mat1i border_labels;
//d_img_labels_.download(border_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
grid_size_ = dim3((d_img_.cols + RELABELING_COLS - 1) / RELABELING_COLS, (d_img_.rows + RELABELING_ROWS - 1) / RELABELING_ROWS, 1);
block_size_ = dim3(RELABELING_COLS, RELABELING_ROWS, 1);
Relabeling << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i final_labels;
//d_img_labels_.download(final_labels);
hipDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
hipDeviceSynchronize();
}
void GlobalScan() {
SouthBorderMerge << <dim3((d_img_.cols + SOUTH_BORDER - 1) / SOUTH_BORDER, grid_size_.y - 1, 1), SOUTH_BORDER >> > (d_img_labels_);
EastBorderMerge << <dim3(grid_size_.x - 1, (d_img_.rows + EAST_BORDER - 1) / EAST_BORDER, 1), EAST_BORDER >> > (d_img_labels_);
grid_size_ = dim3((d_img_.cols + RELABELING_COLS - 1) / RELABELING_COLS, (d_img_.rows + RELABELING_ROWS - 1) / RELABELING_ROWS, 1);
block_size_ = dim3(RELABELING_COLS, RELABELING_ROWS, 1);
Relabeling << <grid_size_, block_size_ >> > (d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
perf_.stop();
perf_.store(Step(StepType::FIRST_SCAN), perf_.last());
perf_.start();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::SECOND_SCAN), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(DLP);
| 3511717fc08e7c75dedb7b14e61fe3fddaa37ac2.cu | #include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
#define SOUTH_BORDER 32
#define EAST_BORDER 32
#define RELABELING_ROWS 16
#define RELABELING_COLS 16
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) { // n is an index but return value is a label
// You can now call Find on a background pixel
unsigned label = s_buf[n];
if (label) {
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
}
return n + 1;
}
else {
return 0;
}
}
// SetRoot procedure
__device__ void SetRoot(int *labels, unsigned label, unsigned eps) { // label and eps are both labels (not indexes; labels are shifted by one to the right wrt indexes)
int father = labels[label - 1];
if (father > eps) {
labels[label - 1] = eps;
}
}
// atomicRUF procedure
__device__ void atomicRUF(int *labels, unsigned label, unsigned eps) {
if (label > eps) {
unsigned minResult = atomicMin(labels + label - 1, eps);
if (eps > minResult) {
atomicRUF(labels, eps, minResult);
}
else {
if (label > minResult) {
atomicRUF(labels, minResult, eps);
}
}
}
}
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_index = threadIdx.y * BLOCK_COLS + threadIdx.x;
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = row * img.step + col;
__shared__ int s_buf[BLOCK_ROWS * BLOCK_COLS];
__shared__ unsigned char s_img[BLOCK_ROWS * BLOCK_COLS];
bool in_limits = (row < img.rows && col < img.cols); // borders aren't processed
// DLP-I
s_img[local_index] = in_limits ? img[img_index] : 0xFF;
unsigned char v = s_img[local_index];
s_buf[local_index] = v ? local_index + 1 : 0;
__syncthreads();
// DLP-SR (optional)
if (threadIdx.y < BLOCK_ROWS - 1 && threadIdx.x < BLOCK_COLS - 1 && row < labels.rows - 1 && col < labels.cols - 1) {
int min = INT32_MAX;
int a[4];
a[0] = s_buf[local_index];
if (a[0] != 0 && a[0] < min)
min = a[0];
a[1] = s_buf[local_index + 1];
if (a[1] != 0 && a[1] < min)
min = a[1];
a[2] = s_buf[local_index + BLOCK_COLS];
if (a[2] != 0 && a[2] < min)
min = a[2];
a[3] = s_buf[local_index + BLOCK_COLS + 1];
if (a[3] != 0 && a[3] < min)
min = a[3];
for (unsigned i = 0; i < 4; i++) {
unsigned int label = a[i];
if (label != 0 && label != min) {
SetRoot(s_buf, label, min);
}
}
}
__syncthreads();
// DLP-R (optional)
if (v && in_limits) {
s_buf[local_index] = Find(s_buf, local_index);
}
__syncthreads();
// DLP-RUF
if (threadIdx.y < BLOCK_ROWS - 1 && threadIdx.x < BLOCK_COLS - 1 && row < labels.rows - 1 && col < labels.cols - 1) {
int min = INT32_MAX;
int a[4];
a[0] = s_buf[local_index];
if (a[0] != 0 && a[0] < min)
min = a[0];
a[1] = s_buf[local_index + 1];
if (a[1] != 0 && a[1] < min)
min = a[1];
a[2] = s_buf[local_index + BLOCK_COLS];
if (a[2] != 0 && a[2] < min)
min = a[2];
a[3] = s_buf[local_index + BLOCK_COLS + 1];
if (a[3] != 0 && a[3] < min)
min = a[3];
for (unsigned i = 0; i < 4; i++) {
unsigned int label = a[i];
if (label != 0 && label != min) {
atomicRUF(s_buf, label, min);
}
}
}
__syncthreads();
// DLP-R
if (v && in_limits) {
s_buf[local_index] = Find(s_buf, local_index);
}
__syncthreads();
// Labeltranslation
if (in_limits) {
if (v) {
unsigned f = Find(s_buf, local_index) - 1;
unsigned f_row = f / BLOCK_COLS;
unsigned f_col = f % BLOCK_COLS;
unsigned global_f = (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * BLOCK_COLS + f_col);
labels.data[row * labels.step / sizeof(int) + col] = global_f + 1;
}
else {
labels.data[row * labels.step / sizeof(int) + col] = 0;
}
}
}
__global__ void SouthBorderMerge(cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y + 1) * BLOCK_ROWS - 1;
unsigned col = blockIdx.x * SOUTH_BORDER + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
bool in_limits = (col < labels.cols - 1);
if (in_limits) {
int min = INT32_MAX;
int a[4];
a[0] = labels[labels_index];
if (a[0] != 0 && a[0] < min)
min = a[0];
a[1] = labels[labels_index + 1];
if (a[1] != 0 && a[1] < min)
min = a[1];
a[2] = labels[labels_index + labels.step / labels.elem_size];
if (a[2] != 0 && a[2] < min)
min = a[2];
a[3] = labels[labels_index + labels.step / labels.elem_size + 1];
if (a[3] != 0 && a[3] < min)
min = a[3];
for (unsigned i = 0; i < 4; i++) {
unsigned int label = a[i];
if (label != 0 && label != min) {
atomicRUF(labels, label, min);
}
}
}
}
__global__ void EastBorderMerge(cuda::PtrStepSzi labels) {
unsigned col = (blockIdx.x + 1) * BLOCK_COLS - 1;
unsigned row = blockIdx.y * EAST_BORDER + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
bool in_limits = (row < labels.rows - 1);
if (in_limits) {
int min = INT32_MAX;
int a[4];
a[0] = labels[labels_index];
if (a[0] != 0 && a[0] < min)
min = a[0];
a[1] = labels[labels_index + 1];
if (a[1] != 0 && a[1] < min)
min = a[1];
a[2] = labels[labels_index + labels.step / labels.elem_size];
if (a[2] != 0 && a[2] < min)
min = a[2];
a[3] = labels[labels_index + labels.step / labels.elem_size + 1];
if (a[3] != 0 && a[3] < min)
min = a[3];
for (unsigned i = 0; i < 4; i++) {
unsigned int label = a[i];
if (label != 0 && label != min) {
atomicRUF(labels, label, min);
}
}
}
}
__global__ void Relabeling(cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * RELABELING_ROWS + threadIdx.y;
unsigned col = blockIdx.x * RELABELING_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
labels[labels_index] = Find(labels.data, labels_index);
}
}
}
class DLP : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
DLP() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
// Phase 1
// Etichetta i pixel localmente al blocco
LocalMerge << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
// Immagine di debug della prima fase
//Mat1i local_labels;
//d_img_labels_.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
SouthBorderMerge << <dim3((d_img_.cols + SOUTH_BORDER - 1) / SOUTH_BORDER, grid_size_.y - 1, 1), SOUTH_BORDER >> > (d_img_labels_);
EastBorderMerge << <dim3(grid_size_.x - 1, (d_img_.rows + EAST_BORDER - 1) / EAST_BORDER, 1), EAST_BORDER >> > (d_img_labels_);
//Mat1i border_labels;
//d_img_labels_.download(border_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
grid_size_ = dim3((d_img_.cols + RELABELING_COLS - 1) / RELABELING_COLS, (d_img_.rows + RELABELING_ROWS - 1) / RELABELING_ROWS, 1);
block_size_ = dim3(RELABELING_COLS, RELABELING_ROWS, 1);
Relabeling << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i final_labels;
//d_img_labels_.download(final_labels);
cudaDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
void GlobalScan() {
SouthBorderMerge << <dim3((d_img_.cols + SOUTH_BORDER - 1) / SOUTH_BORDER, grid_size_.y - 1, 1), SOUTH_BORDER >> > (d_img_labels_);
EastBorderMerge << <dim3(grid_size_.x - 1, (d_img_.rows + EAST_BORDER - 1) / EAST_BORDER, 1), EAST_BORDER >> > (d_img_labels_);
grid_size_ = dim3((d_img_.cols + RELABELING_COLS - 1) / RELABELING_COLS, (d_img_.rows + RELABELING_ROWS - 1) / RELABELING_ROWS, 1);
block_size_ = dim3(RELABELING_COLS, RELABELING_ROWS, 1);
Relabeling << <grid_size_, block_size_ >> > (d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
perf_.stop();
perf_.store(Step(StepType::FIRST_SCAN), perf_.last());
perf_.start();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::SECOND_SCAN), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(DLP);
|
d5c412f7ffe395cf57b5b70e309b9cf1dbbba9dd.hip | // !!! This is a file automatically generated by hipify!!!
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "sparsematrix.h"
#include "utils.h"
static void *cuda_chk_alloc(const size_t n, /* Number of elements */
const size_t size /* Size of each element */
)
{
void *p;
hipError_t error = hipMalloc((void **)&p, n * size);
if (error != hipSuccess)
{
fprintf(stderr, "hipMalloc returned error code %d, line(%d)\n", error,
__LINE__);
exit(EXIT_FAILURE);
}
return p;
}
// x and y is sorted by row
__global__ void initVariableNode(sparseMatrix * const H, float* const l)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_cols)
return;
for (entry *e = sm_first_in_col(H, idx);
!sm_at_end(e);
e = sm_next_in_col(H, e))
{
e->q = l[idx];
}
}
__global__ void iterCheckNode(sparseMatrix * const H)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_rows)
return;
for (entry * e = sm_first_in_row(H, idx);
!sm_at_end(e);
e = sm_next_in_row(H, e))
{
float product = 1;
for (entry * p = sm_first_in_row(H, idx);
!sm_at_end(p);
p = sm_next_in_row(H, p))
{
if (p == e)
continue;
product *= tanh(p->q / 2);
}
e->r = 2 * atanh(product);
if (isinf(e->r))
{
if (e->r < 0)
e->r = -150;
else
e->r = 150;
}
}
}
__global__ void iterVariableNode(sparseMatrix * const H, float* const l)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_cols)
return;
float sum = l[idx];
for (entry *e = sm_first_in_col(H, idx);
!sm_at_end(e);
e = sm_next_in_col(H, e))
{
sum += e->r;
}
for (entry *e = sm_first_in_col(H, idx);
!sm_at_end(e);
e = sm_next_in_col(H, e))
{
e->q = sum - e->r;
}
}
__global__ void updateLikelihood(sparseMatrix * const H, float* const l, float* const Q)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_cols)
return;
float sum = l[idx];
for (entry *e = sm_first_in_col(H, idx);
!sm_at_end(e);
e = sm_next_in_col(H, e))
{
sum += e->r;
}
Q[idx] = sum;
}
__global__ void hardDecision(size_t const N, float* const Q, char* const codeword)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
if (Q[idx] >= 0){
codeword[idx] = '0';
}
else{
codeword[idx] = '1';
}
}
__global__ void check(sparseMatrix * const H, char* const codeword, int* const c)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_rows)
return;
int sum = 0;
for (entry * e = sm_first_in_row(H, idx);
!sm_at_end(e);
e = sm_next_in_row(H, e))
{
sum += codeword[e->col] - '0';
}
atomicAdd(c, sum % 2);
}
size_t prprp_decode(size_t M, size_t N, size_t nnz, sparseMatrix * const d_H, float* const d_lratio, float *d_Q, char* const d_codeword, size_t const max_iter)
{
// check output
int *d_c = (int *)cuda_chk_alloc(1, sizeof(int));
/* launch kernel */
size_t i;
initVariableNode << <ceil(static_cast<float>(N) / 512.0f), 512 >> >(d_H, d_lratio);
for (i = 0;; i++)
{
iterCheckNode << <ceil(static_cast<float>(M) / 512.0f), 512 >> >(d_H);
checkCudaErrors(hipGetLastError());
iterVariableNode << <ceil(static_cast<float>(N) / 512.0f), 512 >> >(d_H, d_lratio);
checkCudaErrors(hipGetLastError());
updateLikelihood << <ceil(static_cast<float>(N) / 512.0f), 512 >> >(d_H, d_lratio, d_Q);
checkCudaErrors(hipGetLastError());
hardDecision << <ceil(static_cast<float>(N) / 512.0f), 512 >> >(N, d_Q, d_codeword);
checkCudaErrors(hipGetLastError());
int c = 0;
checkCudaErrors(hipMemset(d_c, 0, sizeof(int)));
check << <ceil(static_cast<float>(M) / 512.0f), 512 >> >(d_H, d_codeword, d_c);
checkCudaErrors(hipGetLastError());;
checkCudaErrors(hipMemcpy(&c, d_c, sizeof(int), hipMemcpyDeviceToHost));
printf("c is %d\n", c);
if (i == max_iter || c == 0)
{
break;
}
}
/* free device memory */
checkCudaErrors(hipFree(d_c));
return i;
}
sparseMatrix *readSparseMatrix(char *path){
FILE *f = fopen(path, "r");
if (f == NULL)
{
fprintf(stderr, "Can't open parity check file: %s\n", path);
fclose(f);
exit(1);
}
int M, N, nnz;
fscanf(f, "%d %d %d", &M, &N, &nnz);
int *x = (int *)malloc(nnz*sizeof(int));
int *y = (int *)malloc(nnz*sizeof(int));
for (int i = 0; i < nnz; i++)
{
fscanf(f, "%d %d", x + i, y + i);
}
fclose(f);
return createSparseMatrix(x, y, M, N, nnz);
}
int main(int argc, char **argv)
{
// check arg count
if (argc != 4)
{
return 1;
}
char *pchk_path = argv[1];
char *rfile_path = argv[2];
char *dfile_path = argv[3];
// open input file
FILE *rfile, *dfile;
rfile = fopen(rfile_path, "r");
if (rfile == NULL)
{
fclose(rfile);
exit(EXIT_FAILURE);
}
dfile = fopen(dfile_path, "w+");
if (dfile == NULL)
{
fclose(dfile);
exit(EXIT_FAILURE);
}
// read parity check file into host memory
sparseMatrix *H = readSparseMatrix(pchk_path);
// abbreviations
int M = H->n_rows;
int N = H->n_cols;
int nnz = H->nnz;
/* allocate host memory */
char *codeword = (char *)malloc(N * sizeof(char)+1);
float *lratio = (float *)malloc(N * sizeof(float));
/* allocate device memory */
// sparse matrix
sparseMatrix *d_H = (sparseMatrix *)cuda_chk_alloc(1, sizeof(sparseMatrix));
// log-likelihood ratio
float *d_lratio = (float *)cuda_chk_alloc(N, sizeof(float));
// Q
float *d_Q = (float *)cuda_chk_alloc(N, sizeof(float));
// hard decision output
char *d_codeword = (char *)cuda_chk_alloc(N, sizeof(char));
// copy sparse matrix into device
cudaCopySparseMatrixH2D(d_H, H);
// read each block, decode and write
for (int block_id = 0;; block_id++)
{
// read likelihood ratio
for (int i = 0; i < N; i++)
{
int c = fscanf(rfile, "%f", &lratio[i]);
if (c == EOF)
{
if (i > 0)
{
printf("Warning: Short block (%d long) at end of received file ignored\n", i);
}
goto done;
}
}
/* copy from host to device */
checkCudaErrors(hipMemcpy(d_lratio, lratio, N * sizeof(float), hipMemcpyHostToDevice));
/* set initial values */
checkCudaErrors(hipMemset(d_codeword, 0, N * sizeof(char)));
checkCudaErrors(hipMemset(d_Q, 0, N * sizeof(float)));
// decode
size_t iters = prprp_decode(M, N, nnz, d_H, d_lratio, d_Q, d_codeword, 50);
// write output
checkCudaErrors(hipMemcpy(codeword, d_codeword, N * sizeof(char), hipMemcpyDeviceToHost));
fprintf(dfile, "%s\n", codeword);
}
done:
// free file handle
fclose(rfile);
fclose(dfile);
// free host memory
freeSparseMatrix(H);
free(codeword);
free(lratio);
// free device memory
cudaFreeSparseMatrix(d_H);
checkCudaErrors(hipFree(d_lratio));
checkCudaErrors(hipFree(d_Q));
checkCudaErrors(hipFree(d_codeword));
// reset device and wait for exit
hipDeviceReset();
return 0;
}
| d5c412f7ffe395cf57b5b70e309b9cf1dbbba9dd.cu | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "sparsematrix.h"
#include "utils.h"
static void *cuda_chk_alloc(const size_t n, /* Number of elements */
const size_t size /* Size of each element */
)
{
void *p;
cudaError_t error = cudaMalloc((void **)&p, n * size);
if (error != cudaSuccess)
{
fprintf(stderr, "cudaMalloc returned error code %d, line(%d)\n", error,
__LINE__);
exit(EXIT_FAILURE);
}
return p;
}
// x and y is sorted by row
__global__ void initVariableNode(sparseMatrix * const H, float* const l)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_cols)
return;
for (entry *e = sm_first_in_col(H, idx);
!sm_at_end(e);
e = sm_next_in_col(H, e))
{
e->q = l[idx];
}
}
__global__ void iterCheckNode(sparseMatrix * const H)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_rows)
return;
for (entry * e = sm_first_in_row(H, idx);
!sm_at_end(e);
e = sm_next_in_row(H, e))
{
float product = 1;
for (entry * p = sm_first_in_row(H, idx);
!sm_at_end(p);
p = sm_next_in_row(H, p))
{
if (p == e)
continue;
product *= tanh(p->q / 2);
}
e->r = 2 * atanh(product);
if (isinf(e->r))
{
if (e->r < 0)
e->r = -150;
else
e->r = 150;
}
}
}
__global__ void iterVariableNode(sparseMatrix * const H, float* const l)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_cols)
return;
float sum = l[idx];
for (entry *e = sm_first_in_col(H, idx);
!sm_at_end(e);
e = sm_next_in_col(H, e))
{
sum += e->r;
}
for (entry *e = sm_first_in_col(H, idx);
!sm_at_end(e);
e = sm_next_in_col(H, e))
{
e->q = sum - e->r;
}
}
__global__ void updateLikelihood(sparseMatrix * const H, float* const l, float* const Q)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_cols)
return;
float sum = l[idx];
for (entry *e = sm_first_in_col(H, idx);
!sm_at_end(e);
e = sm_next_in_col(H, e))
{
sum += e->r;
}
Q[idx] = sum;
}
__global__ void hardDecision(size_t const N, float* const Q, char* const codeword)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
if (Q[idx] >= 0){
codeword[idx] = '0';
}
else{
codeword[idx] = '1';
}
}
__global__ void check(sparseMatrix * const H, char* const codeword, int* const c)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= H->n_rows)
return;
int sum = 0;
for (entry * e = sm_first_in_row(H, idx);
!sm_at_end(e);
e = sm_next_in_row(H, e))
{
sum += codeword[e->col] - '0';
}
atomicAdd(c, sum % 2);
}
size_t prprp_decode(size_t M, size_t N, size_t nnz, sparseMatrix * const d_H, float* const d_lratio, float *d_Q, char* const d_codeword, size_t const max_iter)
{
// check output
int *d_c = (int *)cuda_chk_alloc(1, sizeof(int));
/* launch kernel */
size_t i;
initVariableNode << <ceil(static_cast<float>(N) / 512.0f), 512 >> >(d_H, d_lratio);
for (i = 0;; i++)
{
iterCheckNode << <ceil(static_cast<float>(M) / 512.0f), 512 >> >(d_H);
checkCudaErrors(cudaGetLastError());
iterVariableNode << <ceil(static_cast<float>(N) / 512.0f), 512 >> >(d_H, d_lratio);
checkCudaErrors(cudaGetLastError());
updateLikelihood << <ceil(static_cast<float>(N) / 512.0f), 512 >> >(d_H, d_lratio, d_Q);
checkCudaErrors(cudaGetLastError());
hardDecision << <ceil(static_cast<float>(N) / 512.0f), 512 >> >(N, d_Q, d_codeword);
checkCudaErrors(cudaGetLastError());
int c = 0;
checkCudaErrors(cudaMemset(d_c, 0, sizeof(int)));
check << <ceil(static_cast<float>(M) / 512.0f), 512 >> >(d_H, d_codeword, d_c);
checkCudaErrors(cudaGetLastError());;
checkCudaErrors(cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost));
printf("c is %d\n", c);
if (i == max_iter || c == 0)
{
break;
}
}
/* free device memory */
checkCudaErrors(cudaFree(d_c));
return i;
}
sparseMatrix *readSparseMatrix(char *path){
FILE *f = fopen(path, "r");
if (f == NULL)
{
fprintf(stderr, "Can't open parity check file: %s\n", path);
fclose(f);
exit(1);
}
int M, N, nnz;
fscanf(f, "%d %d %d", &M, &N, &nnz);
int *x = (int *)malloc(nnz*sizeof(int));
int *y = (int *)malloc(nnz*sizeof(int));
for (int i = 0; i < nnz; i++)
{
fscanf(f, "%d %d", x + i, y + i);
}
fclose(f);
return createSparseMatrix(x, y, M, N, nnz);
}
int main(int argc, char **argv)
{
// check arg count
if (argc != 4)
{
return 1;
}
char *pchk_path = argv[1];
char *rfile_path = argv[2];
char *dfile_path = argv[3];
// open input file
FILE *rfile, *dfile;
rfile = fopen(rfile_path, "r");
if (rfile == NULL)
{
fclose(rfile);
exit(EXIT_FAILURE);
}
dfile = fopen(dfile_path, "w+");
if (dfile == NULL)
{
fclose(dfile);
exit(EXIT_FAILURE);
}
// read parity check file into host memory
sparseMatrix *H = readSparseMatrix(pchk_path);
// abbreviations
int M = H->n_rows;
int N = H->n_cols;
int nnz = H->nnz;
/* allocate host memory */
char *codeword = (char *)malloc(N * sizeof(char)+1);
float *lratio = (float *)malloc(N * sizeof(float));
/* allocate device memory */
// sparse matrix
sparseMatrix *d_H = (sparseMatrix *)cuda_chk_alloc(1, sizeof(sparseMatrix));
// log-likelihood ratio
float *d_lratio = (float *)cuda_chk_alloc(N, sizeof(float));
// Q
float *d_Q = (float *)cuda_chk_alloc(N, sizeof(float));
// hard decision output
char *d_codeword = (char *)cuda_chk_alloc(N, sizeof(char));
// copy sparse matrix into device
cudaCopySparseMatrixH2D(d_H, H);
// read each block, decode and write
for (int block_id = 0;; block_id++)
{
// read likelihood ratio
for (int i = 0; i < N; i++)
{
int c = fscanf(rfile, "%f", &lratio[i]);
if (c == EOF)
{
if (i > 0)
{
printf("Warning: Short block (%d long) at end of received file ignored\n", i);
}
goto done;
}
}
/* copy from host to device */
checkCudaErrors(cudaMemcpy(d_lratio, lratio, N * sizeof(float), cudaMemcpyHostToDevice));
/* set initial values */
checkCudaErrors(cudaMemset(d_codeword, 0, N * sizeof(char)));
checkCudaErrors(cudaMemset(d_Q, 0, N * sizeof(float)));
// decode
size_t iters = prprp_decode(M, N, nnz, d_H, d_lratio, d_Q, d_codeword, 50);
// write output
checkCudaErrors(cudaMemcpy(codeword, d_codeword, N * sizeof(char), cudaMemcpyDeviceToHost));
fprintf(dfile, "%s\n", codeword);
}
done:
// free file handle
fclose(rfile);
fclose(dfile);
// free host memory
freeSparseMatrix(H);
free(codeword);
free(lratio);
// free device memory
cudaFreeSparseMatrix(d_H);
checkCudaErrors(cudaFree(d_lratio));
checkCudaErrors(cudaFree(d_Q));
checkCudaErrors(cudaFree(d_codeword));
// reset device and wait for exit
cudaDeviceReset();
return 0;
}
|
0f3e8655d16b12cdba7af2c95803a7694e3831be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <glog/logging.h>
#include <iostream>
#include "float.h"
#include "raytracer.h"
#define EPSILON 0
#define PI 3.14159265358f
#define chunksize 8
struct rtconfig
{
rgb background;
camera cam;
int width;
int height;
point xgap;
point ygap;
point upperleft;
};
typedef struct rtconfig rtconfig;
__host__ __device__ point cross(const point& p1, const point& p2)
{
point result;
result.x = p1.y*p2.z - p1.z*p2.y;
result.y = p1.z*p2.x - p1.x*p2.z;
result.z = p1.x*p2.y - p1.y*p2.x;
return result;
}
__host__ __device__ float dot(const point& p1, const point& p2)
{
return p1.x*p2.x + p1.y*p2.y + p1.z*p2.z;
}
__host__ __device__ float norm(const point& p)
{
return sqrt(p.x*p.x + p.y*p.y + p.z*p.z);
}
__host__ __device__ float anglebetween(const point& p1, const point& p2)
{
return dot(p1, p2)/(norm(p1)*norm(p2));
}
__host__ __device__ void normalize(point& p)
{
float len = norm(p);
if(len != 0.0f)
{
p.x /= len;
p.y /= len;
p.z /= len;
}
}
__host__ __device__ point operator*(const point& vector, float scalar)
{
point result;
result.x = scalar*vector.x;
result.y = scalar*vector.y;
result.z = scalar*vector.z;
return result;
}
__host__ __device__ point operator*(float scalar, const point& vector)
{
return vector * scalar;
}
__host__ __device__ point operator+(const point& left, const point& right)
{
point result;
result.x = left.x+right.x;
result.y = left.y+right.y;
result.z = left.z+right.z;
return result;
}
__host__ __device__ point operator-(const point& left, const point& right)
{
point result;
result.x = left.x-right.x;
result.y = left.y-right.y;
result.z = left.z-right.z;
return result;
}
__device__ rgb shade(const rgb& color, float fraction)
{
if(fraction < 0.0f) fraction = 0.0f;
if(fraction > 1.0f) fraction = 1.0f;
rgb result;
result.x = color.x*fraction;
result.z = color.z*fraction;
result.y = color.y*fraction;
return result;
}
__host__ __device__ bool intersect(const point& location, const point& direction, const point& normal, const point& p, point& intersection)
{
float t = dot(normal, p-location) / dot(normal,direction);
//wrong direction
if(t < 0.0f)
{
return false;
}
intersection = location+t*direction;
return true;
}
// checks if point p is on the same side of the line AB as C
__host__ __device__ bool inside(const point& p, const point& c, const point& a, const point& b)
{
if( dot(cross(b-a, p-a), cross(b-a, c-a)) >= -EPSILON )
{
return true;
}
return false;
}
__host__ __device__ bool intersect(const ray& r, const triangle& t, point& intersection)
{
//calc intersection with triangle surface
if(!intersect(r.location, r.direction, t.norm, t.A, intersection))
{
return false;
}
//check if intersection is within triangle
if(inside(intersection, t.A, t.B, t.C) && inside(intersection, t.B, t.A, t.C) && inside(intersection, t.C, t.A, t.B) )
{
return true;
}
return false;
}
__device__ void initial_ray(const camera& c, const point& upperleft, int x, int y, point& xgap, point& ygap, ray& r)
{
//place the ray in the middle of the hole (not top left)
point p = upperleft + (x+0.5f) * xgap - (y+0.5f) * ygap;
r.location = p;
r.direction = p-c.location;
normalize(r.direction);
}
__device__ __host__ bool shootray(const ray& r, int tricount, triangle *triangles, triangle& nearest, point& intersec)
{
float min_distance = FLT_MAX;
bool hit;
float distance;
for(int i = 0; i < tricount; i++)
{
hit = intersect(r, triangles[i], intersec);
distance = norm(intersec-r.location);
if(hit && distance < min_distance && distance >= -EPSILON)
{
nearest = triangles[i];
min_distance = distance;
}
}
return min_distance != FLT_MAX;
}
#if __CPUVERSION__
void render_pixel(rtconfig *config, int tricount, triangle *triangles, int lightcount, point *lights, rgb *resultpixels, int x, int y)
{
#else
__global__ void render_pixel(rtconfig *config, int tricount, triangle *triangles, int lightcount, point *lights, rgb *resultpixels)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
#endif
if(x < config->width && y < config->height)
{
ray r;
initial_ray(config->cam, config->upperleft, x, y, config->xgap, config->ygap, r);
//find nearest intersect triangle
point intersec;
triangle nearest;
resultpixels[y*config->width+x] = config->background;
if(shootray(r, tricount,triangles, nearest, intersec))
{
//set pixel color to color of nearest intersecting triangle
float angle = anglebetween(nearest.norm, r.direction);
float lightintense = fabs(angle);
resultpixels[y*config->width+x] = shade(nearest.color, lightintense);
}
}
}
//calculates the norm for every triangle
__global__ void init_norms(int count, triangle *triangles)
{
#if __GPUVERSION__
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid < count)
#else
for(int tid = 0; tid < count; tid++)
#endif
{
triangle t = triangles[tid];
t.norm = cross(t.A - t.C, t.A - t.B);
normalize( t.norm );
triangles[tid].norm = t.norm;
}
}
void init_ray_gap(const camera& c, int width, int height, point &xgap, point &ygap, point& upperleft)
{
point right = cross(c.up, c.direction);
normalize(right);
point dx = tan(c.hor_angle*PI/360) * c.distance * right;
point dy = tan(c.vert_angle*PI/360) * c.distance * c.up;
point dir = c.direction;
normalize(dir);
dir = dir*c.distance;
upperleft = c.location + dir - dx + dy ;
xgap = dx*(2.0f/width);
ygap = dy*(2.0f/height);
}
void render_image(const scene& s, const int& height, const int& width, rgb* image)
{
//init config
rtconfig config;
config.background = s.background;
config.cam = s.cam;
config.width = width;
config.height = height;
int tricount = s.objects.count;
int lightcount = s.light.count;
#if __GPUVERSION__
hipError_t error;
dim3 threadsPerBlock(chunksize,chunksize);
dim3 blocksPerGrid((width+chunksize-1)/chunksize, (height+chunksize-1)/chunksize);
//copy primitives to device
triangle *d_triangles = NULL;
if(tricount > 0)
{
error = hipMalloc(&d_triangles, tricount*sizeof(triangle));
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
CHECK_NOTNULL(d_triangles);
error = hipMemcpyAsync(d_triangles, s.objects.triangles, tricount*sizeof(triangle), hipMemcpyHostToDevice);
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
}
//copy lights to device
point *d_lights = NULL;
if(lightcount > 0){
error = hipMalloc(&d_lights, lightcount*sizeof(point));
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
CHECK_NOTNULL(d_lights);
error = hipMemcpyAsync(d_lights, s.light.lights, lightcount*sizeof(point), hipMemcpyHostToDevice);
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
}
//calc ray gaps
init_ray_gap(config.cam, config.width, config.height, config.xgap, config.ygap, config.upperleft);
//copy config to device
int csize = sizeof(rtconfig);
rtconfig *d_config;
error = hipMalloc(&d_config, csize);
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
CHECK_NOTNULL(d_config);
error = hipMemcpyAsync(d_config, &config, csize, hipMemcpyHostToDevice);
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
//alloc memory for result
rgb *d_resultcolors;
error = hipMalloc(&d_resultcolors, width*height*sizeof(rgb));
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
CHECK_NOTNULL(d_resultcolors);
error = hipDeviceSynchronize();
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
//calc primitives norms
int n = 512;
dim3 normThreadsPerBlock(n);
dim3 normBlocksPerGrid((tricount + n - 1) / n);
hipLaunchKernelGGL(( init_norms), dim3(normBlocksPerGrid), dim3(normThreadsPerBlock), 0, 0, tricount, d_triangles);
error = hipGetLastError();
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
error = hipDeviceSynchronize();
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
//launch main kernel
hipLaunchKernelGGL(( render_pixel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_config, tricount, d_triangles, lightcount, d_lights, d_resultcolors);
error = hipGetLastError();
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
//copy back results
error = hipMemcpy(image, d_resultcolors, width*height*sizeof(rgb), hipMemcpyDeviceToHost);
CHECK_EQ(hipSuccess, error) << "Error at line "<< __LINE__ << ": " << hipGetErrorString(error);
hipFree(d_triangles);
hipFree(d_lights);
hipFree(d_config);
hipFree(d_resultcolors);
#else
//calc ray gaps
init_norms(tricount, s.objects.triangles);
init_ray_gap(config.cam, config.width, config.height, config.xgap, config.ygap, config.upperleft);
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width; x++)
{
render_pixel(&config, tricount, s.objects.triangles, lightcount, s.light.lights, image, x, y);
}
}
#endif
}
| 0f3e8655d16b12cdba7af2c95803a7694e3831be.cu | #include <cmath>
#include <glog/logging.h>
#include <iostream>
#include "float.h"
#include "raytracer.h"
#define EPSILON 0
#define PI 3.14159265358f
#define chunksize 8
struct rtconfig
{
rgb background;
camera cam;
int width;
int height;
point xgap;
point ygap;
point upperleft;
};
typedef struct rtconfig rtconfig;
__host__ __device__ point cross(const point& p1, const point& p2)
{
point result;
result.x = p1.y*p2.z - p1.z*p2.y;
result.y = p1.z*p2.x - p1.x*p2.z;
result.z = p1.x*p2.y - p1.y*p2.x;
return result;
}
__host__ __device__ float dot(const point& p1, const point& p2)
{
return p1.x*p2.x + p1.y*p2.y + p1.z*p2.z;
}
__host__ __device__ float norm(const point& p)
{
return sqrt(p.x*p.x + p.y*p.y + p.z*p.z);
}
__host__ __device__ float anglebetween(const point& p1, const point& p2)
{
return dot(p1, p2)/(norm(p1)*norm(p2));
}
__host__ __device__ void normalize(point& p)
{
float len = norm(p);
if(len != 0.0f)
{
p.x /= len;
p.y /= len;
p.z /= len;
}
}
__host__ __device__ point operator*(const point& vector, float scalar)
{
point result;
result.x = scalar*vector.x;
result.y = scalar*vector.y;
result.z = scalar*vector.z;
return result;
}
__host__ __device__ point operator*(float scalar, const point& vector)
{
return vector * scalar;
}
__host__ __device__ point operator+(const point& left, const point& right)
{
point result;
result.x = left.x+right.x;
result.y = left.y+right.y;
result.z = left.z+right.z;
return result;
}
__host__ __device__ point operator-(const point& left, const point& right)
{
point result;
result.x = left.x-right.x;
result.y = left.y-right.y;
result.z = left.z-right.z;
return result;
}
__device__ rgb shade(const rgb& color, float fraction)
{
if(fraction < 0.0f) fraction = 0.0f;
if(fraction > 1.0f) fraction = 1.0f;
rgb result;
result.x = color.x*fraction;
result.z = color.z*fraction;
result.y = color.y*fraction;
return result;
}
__host__ __device__ bool intersect(const point& location, const point& direction, const point& normal, const point& p, point& intersection)
{
float t = dot(normal, p-location) / dot(normal,direction);
//wrong direction
if(t < 0.0f)
{
return false;
}
intersection = location+t*direction;
return true;
}
// checks if point p is on the same side of the line AB as C
__host__ __device__ bool inside(const point& p, const point& c, const point& a, const point& b)
{
if( dot(cross(b-a, p-a), cross(b-a, c-a)) >= -EPSILON )
{
return true;
}
return false;
}
__host__ __device__ bool intersect(const ray& r, const triangle& t, point& intersection)
{
//calc intersection with triangle surface
if(!intersect(r.location, r.direction, t.norm, t.A, intersection))
{
return false;
}
//check if intersection is within triangle
if(inside(intersection, t.A, t.B, t.C) && inside(intersection, t.B, t.A, t.C) && inside(intersection, t.C, t.A, t.B) )
{
return true;
}
return false;
}
__device__ void initial_ray(const camera& c, const point& upperleft, int x, int y, point& xgap, point& ygap, ray& r)
{
//place the ray in the middle of the hole (not top left)
point p = upperleft + (x+0.5f) * xgap - (y+0.5f) * ygap;
r.location = p;
r.direction = p-c.location;
normalize(r.direction);
}
__device__ __host__ bool shootray(const ray& r, int tricount, triangle *triangles, triangle& nearest, point& intersec)
{
float min_distance = FLT_MAX;
bool hit;
float distance;
for(int i = 0; i < tricount; i++)
{
hit = intersect(r, triangles[i], intersec);
distance = norm(intersec-r.location);
if(hit && distance < min_distance && distance >= -EPSILON)
{
nearest = triangles[i];
min_distance = distance;
}
}
return min_distance != FLT_MAX;
}
#if __CPUVERSION__
void render_pixel(rtconfig *config, int tricount, triangle *triangles, int lightcount, point *lights, rgb *resultpixels, int x, int y)
{
#else
__global__ void render_pixel(rtconfig *config, int tricount, triangle *triangles, int lightcount, point *lights, rgb *resultpixels)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
#endif
if(x < config->width && y < config->height)
{
ray r;
initial_ray(config->cam, config->upperleft, x, y, config->xgap, config->ygap, r);
//find nearest intersect triangle
point intersec;
triangle nearest;
resultpixels[y*config->width+x] = config->background;
if(shootray(r, tricount,triangles, nearest, intersec))
{
//set pixel color to color of nearest intersecting triangle
float angle = anglebetween(nearest.norm, r.direction);
float lightintense = fabs(angle);
resultpixels[y*config->width+x] = shade(nearest.color, lightintense);
}
}
}
//calculates the norm for every triangle
__global__ void init_norms(int count, triangle *triangles)
{
#if __GPUVERSION__
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid < count)
#else
for(int tid = 0; tid < count; tid++)
#endif
{
triangle t = triangles[tid];
t.norm = cross(t.A - t.C, t.A - t.B);
normalize( t.norm );
triangles[tid].norm = t.norm;
}
}
void init_ray_gap(const camera& c, int width, int height, point &xgap, point &ygap, point& upperleft)
{
point right = cross(c.up, c.direction);
normalize(right);
point dx = tan(c.hor_angle*PI/360) * c.distance * right;
point dy = tan(c.vert_angle*PI/360) * c.distance * c.up;
point dir = c.direction;
normalize(dir);
dir = dir*c.distance;
upperleft = c.location + dir - dx + dy ;
xgap = dx*(2.0f/width);
ygap = dy*(2.0f/height);
}
void render_image(const scene& s, const int& height, const int& width, rgb* image)
{
//init config
rtconfig config;
config.background = s.background;
config.cam = s.cam;
config.width = width;
config.height = height;
int tricount = s.objects.count;
int lightcount = s.light.count;
#if __GPUVERSION__
cudaError_t error;
dim3 threadsPerBlock(chunksize,chunksize);
dim3 blocksPerGrid((width+chunksize-1)/chunksize, (height+chunksize-1)/chunksize);
//copy primitives to device
triangle *d_triangles = NULL;
if(tricount > 0)
{
error = cudaMalloc(&d_triangles, tricount*sizeof(triangle));
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
CHECK_NOTNULL(d_triangles);
error = cudaMemcpyAsync(d_triangles, s.objects.triangles, tricount*sizeof(triangle), cudaMemcpyHostToDevice);
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
}
//copy lights to device
point *d_lights = NULL;
if(lightcount > 0){
error = cudaMalloc(&d_lights, lightcount*sizeof(point));
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
CHECK_NOTNULL(d_lights);
error = cudaMemcpyAsync(d_lights, s.light.lights, lightcount*sizeof(point), cudaMemcpyHostToDevice);
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
}
//calc ray gaps
init_ray_gap(config.cam, config.width, config.height, config.xgap, config.ygap, config.upperleft);
//copy config to device
int csize = sizeof(rtconfig);
rtconfig *d_config;
error = cudaMalloc(&d_config, csize);
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
CHECK_NOTNULL(d_config);
error = cudaMemcpyAsync(d_config, &config, csize, cudaMemcpyHostToDevice);
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
//alloc memory for result
rgb *d_resultcolors;
error = cudaMalloc(&d_resultcolors, width*height*sizeof(rgb));
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
CHECK_NOTNULL(d_resultcolors);
error = cudaThreadSynchronize();
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
//calc primitives norms
int n = 512;
dim3 normThreadsPerBlock(n);
dim3 normBlocksPerGrid((tricount + n - 1) / n);
init_norms<<<normBlocksPerGrid, normThreadsPerBlock>>>(tricount, d_triangles);
error = cudaGetLastError();
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
error = cudaThreadSynchronize();
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
//launch main kernel
render_pixel<<<blocksPerGrid, threadsPerBlock>>>(d_config, tricount, d_triangles, lightcount, d_lights, d_resultcolors);
error = cudaGetLastError();
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
//copy back results
error = cudaMemcpy(image, d_resultcolors, width*height*sizeof(rgb), cudaMemcpyDeviceToHost);
CHECK_EQ(cudaSuccess, error) << "Error at line "<< __LINE__ << ": " << cudaGetErrorString(error);
cudaFree(d_triangles);
cudaFree(d_lights);
cudaFree(d_config);
cudaFree(d_resultcolors);
#else
//calc ray gaps
init_norms(tricount, s.objects.triangles);
init_ray_gap(config.cam, config.width, config.height, config.xgap, config.ygap, config.upperleft);
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width; x++)
{
render_pixel(&config, tricount, s.objects.triangles, lightcount, s.light.lights, image, x, y);
}
}
#endif
}
|
194f3bf93b28335074a10e8eacb31090fe9598e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <vector>
#include <list>
#include <stdio.h>
#include "hello.cuh"
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
using namespace std;
class Particle
{
public:
int particle_id;
float particle_velocity;
float particle_density;
};
__global__ void kernel_function(Particle *pointer, int total)
{
int index=blockIdx.x;
if(index < total)
{
pointer[index].particle_id=pointer[index].particle_id+2;
pointer[index].particle_velocity=pointer[index].particle_velocity*2;
pointer[index].particle_density=pointer[index].particle_density*3;
}
}
int mainForClass()
{
cout<<endl<<"Main FOR CLASS AAAAAAAAA "<<endl;
std::list<Particle> particle_list;
Particle particle_a;
Particle particle_b;
Particle particle_c;
particle_a.particle_id=1;
particle_a.particle_velocity=1.1;
particle_a.particle_density=1.22;
particle_b.particle_id=2;
particle_b.particle_velocity=2.1;
particle_b.particle_density=2.22;
particle_c.particle_id=3;
particle_c.particle_velocity=3.1;
particle_c.particle_density=3.22;
particle_list.push_back(particle_a);
particle_list.push_back(particle_b);
particle_list.push_back(particle_c);
thrust::device_vector<Particle> device_list(particle_list.begin(), particle_list.end());
Particle *raw_pointer;
raw_pointer=thrust::raw_pointer_cast(&device_list[0]);
hipLaunchKernelGGL(( kernel_function), dim3(3), dim3(1), 0, 0, raw_pointer, device_list.size());
thrust::host_vector<Particle> host_list=device_list;
for(int count=0; count<host_list.size(); count++)
{
printf("%d, %f, %f\n", host_list[count].particle_id, host_list[count].particle_velocity, host_list[count].particle_density);
}
return 0;
}
int mainForFFFF()
{
cout<<endl<<"FFFFFFFFFFFFFUCK "<<endl;
// initialize all ten integers of a device_vector to 1
thrust :: device_vector <int > D(10 , 1);
// set the first seven elements of a vector to 9
thrust :: fill (D. begin () , D. begin () + 7, 9);
// initialize a host_vector with the first five elements of D
thrust :: host_vector <int > H(D. begin () , D. begin () + 5);
// set the elements of H to 0, 1, 2, 3, ...
thrust :: sequence (H. begin () , H. end ());
// copy all of H back to the beginning of D
thrust :: copy (H. begin () , H. end () , D. begin ());
// print D
for ( int i = 0; i < D. size (); i ++)
std::cout << "D[" << i << "] = " << D[i] <<" ";
cout<<endl;
return 0;
}
| 194f3bf93b28335074a10e8eacb31090fe9598e7.cu | #include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <vector>
#include <list>
#include <stdio.h>
#include "hello.cuh"
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
using namespace std;
class Particle
{
public:
int particle_id;
float particle_velocity;
float particle_density;
};
__global__ void kernel_function(Particle *pointer, int total)
{
int index=blockIdx.x;
if(index < total)
{
pointer[index].particle_id=pointer[index].particle_id+2;
pointer[index].particle_velocity=pointer[index].particle_velocity*2;
pointer[index].particle_density=pointer[index].particle_density*3;
}
}
int mainForClass()
{
cout<<endl<<"Main FOR CLASS AAAAAAAAA "<<endl;
std::list<Particle> particle_list;
Particle particle_a;
Particle particle_b;
Particle particle_c;
particle_a.particle_id=1;
particle_a.particle_velocity=1.1;
particle_a.particle_density=1.22;
particle_b.particle_id=2;
particle_b.particle_velocity=2.1;
particle_b.particle_density=2.22;
particle_c.particle_id=3;
particle_c.particle_velocity=3.1;
particle_c.particle_density=3.22;
particle_list.push_back(particle_a);
particle_list.push_back(particle_b);
particle_list.push_back(particle_c);
thrust::device_vector<Particle> device_list(particle_list.begin(), particle_list.end());
Particle *raw_pointer;
raw_pointer=thrust::raw_pointer_cast(&device_list[0]);
kernel_function<<<3, 1>>>(raw_pointer, device_list.size());
thrust::host_vector<Particle> host_list=device_list;
for(int count=0; count<host_list.size(); count++)
{
printf("%d, %f, %f\n", host_list[count].particle_id, host_list[count].particle_velocity, host_list[count].particle_density);
}
return 0;
}
int mainForFFFF()
{
cout<<endl<<"FFFFFFFFFFFFFUCK "<<endl;
// initialize all ten integers of a device_vector to 1
thrust :: device_vector <int > D(10 , 1);
// set the first seven elements of a vector to 9
thrust :: fill (D. begin () , D. begin () + 7, 9);
// initialize a host_vector with the first five elements of D
thrust :: host_vector <int > H(D. begin () , D. begin () + 5);
// set the elements of H to 0, 1, 2, 3, ...
thrust :: sequence (H. begin () , H. end ());
// copy all of H back to the beginning of D
thrust :: copy (H. begin () , H. end () , D. begin ());
// print D
for ( int i = 0; i < D. size (); i ++)
std::cout << "D[" << i << "] = " << D[i] <<" ";
cout<<endl;
return 0;
}
|
0aca2b81015024d187c5160b76b6a778949e18e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is a personal academic project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
#include <stdio.h>
#include <type_traits>
#include "graph.h"
#include "GPUutils/GPUutils.h"
using namespace std;
namespace Graph_k {
template<typename nodeW, typename edgeW> __global__ void print_d(GraphStruct<nodeW, edgeW>*, bool);
};
/**
* Set the CUDA Unified Memory for nodes and edges
* @param memType node or edge memory type
*/
// TODO: sistemare le dimensioni in modo che siano passate come primo argomento
template<typename nodeW, typename edgeW>
void Graph<nodeW, edgeW>::setMemGPU(node_sz nn, int mode) {
hipError_t cuSts;
if (mode == GPUINIT_NODES) {
str = new GraphStruct<nodeW, edgeW>();
cuSts = hipMalloc(&(str->cumulDegs), (nn + 1) * sizeof(node_sz)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphStructSize = sizeof(GraphStruct<nodeW,edgeW>);
//GPUMemTracker::graphDegsSize = (nn+1)*sizeof(node_sz);
}
else if (mode == GPUINIT_EDGES) {
cuSts = hipMalloc(&(str->neighs), str->nEdges * sizeof(node)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphNeighsSize = str->nEdges*sizeof(node);
}
else if (mode == GPUINIT_NODEW) {
cuSts = hipMalloc(&(str->nodeWeights), str->nEdges * sizeof(nodeW)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphNodeWSize = str->nEdges*sizeof(nodeW);
}
else if (mode == GPUINIT_EDGEW) {
cuSts = hipMalloc(&(str->edgeWeights), str->nEdges * sizeof(edgeW)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphEdgeWSize = str->nEdges*sizeof(edgeW);
}
else if (mode == GPUINIT_NODET) {
cuSts = hipMalloc(&(str->nodeThresholds), str->nNodes * sizeof(nodeW)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphNodeTSize = str->nNodes*sizeof(nodeW);
}
}
template<typename nodeW, typename edgeW>
void Graph<nodeW, edgeW>::setupImporterGPU() {
uint32_t nn = fImport->nNodes;
setMemGPU(nn, GPUINIT_NODES);
str->nNodes = nn;
std::unique_ptr<node_sz[]> temp_cumulDegs(new node_sz[nn + 1]);
#ifdef VERBOSEGRAPH
std::cout << "Creazione liste temporanee..." << std::endl;
#endif
std::list<uint32_t> ** tempN = new std::list<uint32_t>*[nn];
std::list<edgeW> ** tempW = new std::list<edgeW>*[nn];
for (uint32_t i = 0; i < nn; i++) {
tempN[i] = new std::list<uint32_t>;
tempW[i] = new std::list<edgeW>;
}
// Leggo gli archi dal file del grafo
fImport->fRewind();
while (fImport->getNextEdge()) {
if (fImport->edgeIsValid) {
tempN[fImport->srcIdx]->push_back(fImport->dstIdx);
tempW[fImport->srcIdx]->push_back((edgeW)fImport->edgeWgh);
str->nEdges++;
// anche l'arco di ritorno!
tempN[fImport->dstIdx]->push_back(fImport->srcIdx);
tempW[fImport->dstIdx]->push_back((edgeW)fImport->edgeWgh);
str->nEdges++;
}
}
// Ora in tempN e tempW ho tutto quello che serve per costruire il grafo
// Inizio con i cumulDegs
std::fill(temp_cumulDegs.get(), temp_cumulDegs.get() + (nn + 1), 0);
for (uint32_t i = 1; i < (nn + 1); i++)
temp_cumulDegs[i] += (temp_cumulDegs[i - 1] + (uint32_t)(tempN[i - 1]->size()));
setMemGPU(str->nEdges, GPUINIT_EDGES);
setMemGPU(str->nEdges, GPUINIT_EDGEW);
setMemGPU(nn, GPUINIT_NODET);
std::unique_ptr<node[]> temp_neighs(new node[str->nEdges]);
std::unique_ptr<edgeW[]> temp_edgeWeights(new edgeW[str->nEdges]);
std::unique_ptr<nodeW[]> temp_nodeThresholds(new nodeW[str->nNodes]);
for (uint32_t i = 0; i < nn; i++) {
uint32_t j = 0;
for (auto it = tempN[i]->begin(); it != tempN[i]->end(); ++it) {
temp_neighs[temp_cumulDegs[i] + j] = *it;
j++;
}
j = 0;
for (auto it = tempW[i]->begin(); it != tempW[i]->end(); ++it) {
temp_edgeWeights[temp_cumulDegs[i] + j] = *it;
j++;
}
}
// max, min, mean deg
maxDeg = 0;
minDeg = nn;
for (uint32_t i = 0; i < nn; i++) {
if ((temp_cumulDegs[i + 1] - temp_cumulDegs[i]) > maxDeg)
maxDeg = (uint32_t)str->deg(i);
if ((temp_cumulDegs[i + 1] - temp_cumulDegs[i]) < minDeg)
minDeg = (uint32_t)str->deg(i);
}
density = (float)str->nEdges / (float)(nn * (nn - 1) / 2);
meanDeg = (float)str->nEdges / (float)nn;
if (minDeg == 0)
connected = false;
else
connected = true;
// Copio su GPU
hipMemcpy(str->cumulDegs, temp_cumulDegs.get(), (str->nNodes + 1) * sizeof(node_sz), hipMemcpyHostToDevice);
hipMemcpy(str->neighs, temp_neighs.get(), str->nEdges * sizeof(node), hipMemcpyHostToDevice);
hipMemcpy(str->edgeWeights, temp_edgeWeights.get(), str->nEdges * sizeof(edgeW), hipMemcpyHostToDevice);
hipMemcpy(str->nodeThresholds, temp_nodeThresholds.get(), str->nNodes * sizeof(nodeW), hipMemcpyHostToDevice);
// elimino le strutture temporanee
for (uint32_t i = 0; i < nn; i++) {
delete tempW[i];
delete tempN[i];
}
delete[] tempW;
delete[] tempN;
}
// Questo setup su con lo sputo. E' un miracolo se funziona.
template<typename nodeW, typename edgeW>
void Graph<nodeW, edgeW>::setupReduxGPU(const uint32_t * const unlabelled, const uint32_t unlabSize, const int32_t * const labels,
GraphStruct<nodeW, edgeW> * const fullGraphStruct, const uint32_t * const f2R, const uint32_t * const r2F, const float * const thresholds) {
setMemGPU(unlabSize, GPUINIT_NODES);
str->nNodes = unlabSize;
str->nEdges = 0;
std::unique_ptr<node_sz[]> temp_cumulDegs(new node_sz[unlabSize + 1]);
std::fill(temp_cumulDegs.get(), temp_cumulDegs.get() + str->nNodes + 1, 0);
for (uint32_t i = 0; i < unlabSize; i++) {
uint32_t nodeInFullGraph = unlabelled[i];
uint32_t nodeInFullGraphDeg = fullGraphStruct->deg(nodeInFullGraph);
uint32_t neighIdxInFullGraphStruct = fullGraphStruct->cumulDegs[nodeInFullGraph];
// Nuova valutazione dei gradi del grafo redux
uint32_t tempDeg = 0;
for (uint32_t j = 0; j < nodeInFullGraphDeg; j++) {
// se label del vicino == 0...
if (!labels[fullGraphStruct->neighs[neighIdxInFullGraphStruct + j]])
tempDeg++;
}
temp_cumulDegs[i + 1] += (tempDeg + temp_cumulDegs[i]);
}
// Ora posso allocare le restanti strutture del grafo ridotto
str->nEdges = temp_cumulDegs[str->nNodes];
setMemGPU(str->nEdges, GPUINIT_EDGES);
setMemGPU(str->nEdges, GPUINIT_EDGEW);
setMemGPU(str->nNodes, GPUINIT_NODET);
std::unique_ptr<node[]> temp_neighs(new node[str->nEdges]);
std::unique_ptr<edgeW[]> temp_edgeWeights(new edgeW[str->nEdges]);
std::unique_ptr<nodeW[]> temp_nodeThresholds(new nodeW[str->nNodes]);
// Altro ciclo per riempire la lista dei vicini e dei pesi degli archi associati
for (uint32_t i = 0; i < unlabSize; i++) {
uint32_t nodeInFullGraph = unlabelled[i];
uint32_t nodeInFullGraphDeg = fullGraphStruct->deg(nodeInFullGraph);
uint32_t neighIdxInFullGraphStruct = fullGraphStruct->cumulDegs[nodeInFullGraph];
uint32_t tempNeighIdx = temp_cumulDegs[i];
for (uint32_t j = 0; j < nodeInFullGraphDeg; j++) {
uint32_t neighInFullGraph = fullGraphStruct->neighs[neighIdxInFullGraphStruct + j];
if (!labels[neighInFullGraph]) {
temp_neighs[tempNeighIdx] = f2R[neighInFullGraph];
temp_edgeWeights[tempNeighIdx] = fullGraphStruct->edgeWeights[neighIdxInFullGraphStruct + j];
tempNeighIdx++;
}
}
// Ora, la soglia presa dal vettore locale ad ogni thread
// Occhio, thresholds gi rimappato full-->>redux
temp_nodeThresholds[i] = thresholds[i];
}
// Copio su GPU
hipMemcpy(str->cumulDegs, temp_cumulDegs.get(), (str->nNodes + 1) * sizeof(node_sz), hipMemcpyHostToDevice);
hipMemcpy(str->neighs, temp_neighs.get(), str->nEdges * sizeof(node), hipMemcpyHostToDevice);
hipMemcpy(str->edgeWeights, temp_edgeWeights.get(), str->nEdges * sizeof(edgeW), hipMemcpyHostToDevice);
hipMemcpy(str->nodeThresholds, temp_nodeThresholds.get(), str->nNodes * sizeof(nodeW), hipMemcpyHostToDevice);
// Devo fare altro? Altrimenti...
return;
}
template<typename nodeW, typename edgeW>
Graph<nodeW, edgeW>::Graph(Graph<nodeW, edgeW> * const graph_h) {
GraphStruct<nodeW, edgeW> * const graphStruct_h = graph_h->getStruct();
setMemGPU(graphStruct_h->nNodes, GPUINIT_NODES);
str->nNodes = graphStruct_h->nNodes;
str->nEdges = graphStruct_h->nEdges;
setMemGPU(graphStruct_h->nNodes, GPUINIT_EDGES);
hipMemcpy(str->cumulDegs, graphStruct_h->cumulDegs, (str->nNodes + 1) * sizeof(node_sz), hipMemcpyHostToDevice);
hipMemcpy(str->neighs, graphStruct_h->neighs, str->nEdges * sizeof(node), hipMemcpyHostToDevice);
maxDeg = graph_h->maxDeg;
minDeg = graph_h->minDeg;
meanDeg = graph_h->meanDeg;
density = graph_h->density;
prob = graph_h->prob;
connected = graph_h->connected;
GPUEnabled = true;
}
/**
* Invoke the kernel to print the graph on device
* @param verbose print details
*/
template<typename nodeW, typename edgeW> void Graph<nodeW, edgeW>::print_d(bool verbose) {
Graph_k::print_d << <1, 1 >> > (str, verbose);
hipDeviceSynchronize();
}
/**
* Print the graph on device (verbose = 1 for "verbose print")
* @param verbose print the complete graph
*/
template<typename nodeW, typename edgeW>
__global__ void Graph_k::print_d(GraphStruct<nodeW, edgeW>* str, bool verbose) {
printf("** Graph (num node: %d, num edges: %d)\n", str->nNodes, str->nEdges);
if (verbose) {
for (int i = 0; i < str->nNodes; i++) {
printf(" node(%d)[%d]-> ", i, str->cumulDegs[i + 1] - str->cumulDegs[i]);
for (int j = 0; j < str->cumulDegs[i + 1] - str->cumulDegs[i]; j++) {
printf("%d ", str->neighs[str->cumulDegs[i] + j]);
}
printf("\n");
}
printf("\n");
}
}
template<typename nodeW, typename edgeW>
void Graph<nodeW, edgeW>::deleteMemGPU() {
if (str->neighs != nullptr) {
hipFree(str->neighs);
str->neighs = nullptr;
}
if (str->cumulDegs != nullptr) {
hipFree(str->cumulDegs);
str->cumulDegs = nullptr;
}
if (str->nodeWeights != nullptr) {
hipFree(str->nodeWeights);
str->nodeWeights = nullptr;
}
if (str->edgeWeights != nullptr) {
hipFree(str->edgeWeights);
str->edgeWeights = nullptr;
}
if (str->nodeThresholds != nullptr) {
hipFree(str->nodeThresholds);
str->nodeThresholds = nullptr;
}
if (str != nullptr) {
delete str;
str = nullptr;
}
}
// This sucks... we need to fix template declarations
//#ifdef WIN32
template class Graph<float, float>;
//#endif
| 0aca2b81015024d187c5160b76b6a778949e18e6.cu | // This is a personal academic project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
#include <stdio.h>
#include <type_traits>
#include "graph.h"
#include "GPUutils/GPUutils.h"
using namespace std;
namespace Graph_k {
template<typename nodeW, typename edgeW> __global__ void print_d(GraphStruct<nodeW, edgeW>*, bool);
};
/**
* Set the CUDA Unified Memory for nodes and edges
* @param memType node or edge memory type
*/
// TODO: sistemare le dimensioni in modo che siano passate come primo argomento
template<typename nodeW, typename edgeW>
void Graph<nodeW, edgeW>::setMemGPU(node_sz nn, int mode) {
cudaError cuSts;
if (mode == GPUINIT_NODES) {
str = new GraphStruct<nodeW, edgeW>();
cuSts = cudaMalloc(&(str->cumulDegs), (nn + 1) * sizeof(node_sz)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphStructSize = sizeof(GraphStruct<nodeW,edgeW>);
//GPUMemTracker::graphDegsSize = (nn+1)*sizeof(node_sz);
}
else if (mode == GPUINIT_EDGES) {
cuSts = cudaMalloc(&(str->neighs), str->nEdges * sizeof(node)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphNeighsSize = str->nEdges*sizeof(node);
}
else if (mode == GPUINIT_NODEW) {
cuSts = cudaMalloc(&(str->nodeWeights), str->nEdges * sizeof(nodeW)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphNodeWSize = str->nEdges*sizeof(nodeW);
}
else if (mode == GPUINIT_EDGEW) {
cuSts = cudaMalloc(&(str->edgeWeights), str->nEdges * sizeof(edgeW)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphEdgeWSize = str->nEdges*sizeof(edgeW);
}
else if (mode == GPUINIT_NODET) {
cuSts = cudaMalloc(&(str->nodeThresholds), str->nNodes * sizeof(nodeW)); cudaCheck(cuSts, __FILE__, __LINE__);
//GPUMemTracker::graphNodeTSize = str->nNodes*sizeof(nodeW);
}
}
template<typename nodeW, typename edgeW>
void Graph<nodeW, edgeW>::setupImporterGPU() {
uint32_t nn = fImport->nNodes;
setMemGPU(nn, GPUINIT_NODES);
str->nNodes = nn;
std::unique_ptr<node_sz[]> temp_cumulDegs(new node_sz[nn + 1]);
#ifdef VERBOSEGRAPH
std::cout << "Creazione liste temporanee..." << std::endl;
#endif
std::list<uint32_t> ** tempN = new std::list<uint32_t>*[nn];
std::list<edgeW> ** tempW = new std::list<edgeW>*[nn];
for (uint32_t i = 0; i < nn; i++) {
tempN[i] = new std::list<uint32_t>;
tempW[i] = new std::list<edgeW>;
}
// Leggo gli archi dal file del grafo
fImport->fRewind();
while (fImport->getNextEdge()) {
if (fImport->edgeIsValid) {
tempN[fImport->srcIdx]->push_back(fImport->dstIdx);
tempW[fImport->srcIdx]->push_back((edgeW)fImport->edgeWgh);
str->nEdges++;
// anche l'arco di ritorno!
tempN[fImport->dstIdx]->push_back(fImport->srcIdx);
tempW[fImport->dstIdx]->push_back((edgeW)fImport->edgeWgh);
str->nEdges++;
}
}
// Ora in tempN e tempW ho tutto quello che serve per costruire il grafo
// Inizio con i cumulDegs
std::fill(temp_cumulDegs.get(), temp_cumulDegs.get() + (nn + 1), 0);
for (uint32_t i = 1; i < (nn + 1); i++)
temp_cumulDegs[i] += (temp_cumulDegs[i - 1] + (uint32_t)(tempN[i - 1]->size()));
setMemGPU(str->nEdges, GPUINIT_EDGES);
setMemGPU(str->nEdges, GPUINIT_EDGEW);
setMemGPU(nn, GPUINIT_NODET);
std::unique_ptr<node[]> temp_neighs(new node[str->nEdges]);
std::unique_ptr<edgeW[]> temp_edgeWeights(new edgeW[str->nEdges]);
std::unique_ptr<nodeW[]> temp_nodeThresholds(new nodeW[str->nNodes]);
for (uint32_t i = 0; i < nn; i++) {
uint32_t j = 0;
for (auto it = tempN[i]->begin(); it != tempN[i]->end(); ++it) {
temp_neighs[temp_cumulDegs[i] + j] = *it;
j++;
}
j = 0;
for (auto it = tempW[i]->begin(); it != tempW[i]->end(); ++it) {
temp_edgeWeights[temp_cumulDegs[i] + j] = *it;
j++;
}
}
// max, min, mean deg
maxDeg = 0;
minDeg = nn;
for (uint32_t i = 0; i < nn; i++) {
if ((temp_cumulDegs[i + 1] - temp_cumulDegs[i]) > maxDeg)
maxDeg = (uint32_t)str->deg(i);
if ((temp_cumulDegs[i + 1] - temp_cumulDegs[i]) < minDeg)
minDeg = (uint32_t)str->deg(i);
}
density = (float)str->nEdges / (float)(nn * (nn - 1) / 2);
meanDeg = (float)str->nEdges / (float)nn;
if (minDeg == 0)
connected = false;
else
connected = true;
// Copio su GPU
cudaMemcpy(str->cumulDegs, temp_cumulDegs.get(), (str->nNodes + 1) * sizeof(node_sz), cudaMemcpyHostToDevice);
cudaMemcpy(str->neighs, temp_neighs.get(), str->nEdges * sizeof(node), cudaMemcpyHostToDevice);
cudaMemcpy(str->edgeWeights, temp_edgeWeights.get(), str->nEdges * sizeof(edgeW), cudaMemcpyHostToDevice);
cudaMemcpy(str->nodeThresholds, temp_nodeThresholds.get(), str->nNodes * sizeof(nodeW), cudaMemcpyHostToDevice);
// elimino le strutture temporanee
for (uint32_t i = 0; i < nn; i++) {
delete tempW[i];
delete tempN[i];
}
delete[] tempW;
delete[] tempN;
}
// Questo setup è su con lo sputo. E' un miracolo se funziona.
template<typename nodeW, typename edgeW>
void Graph<nodeW, edgeW>::setupReduxGPU(const uint32_t * const unlabelled, const uint32_t unlabSize, const int32_t * const labels,
GraphStruct<nodeW, edgeW> * const fullGraphStruct, const uint32_t * const f2R, const uint32_t * const r2F, const float * const thresholds) {
setMemGPU(unlabSize, GPUINIT_NODES);
str->nNodes = unlabSize;
str->nEdges = 0;
std::unique_ptr<node_sz[]> temp_cumulDegs(new node_sz[unlabSize + 1]);
std::fill(temp_cumulDegs.get(), temp_cumulDegs.get() + str->nNodes + 1, 0);
for (uint32_t i = 0; i < unlabSize; i++) {
uint32_t nodeInFullGraph = unlabelled[i];
uint32_t nodeInFullGraphDeg = fullGraphStruct->deg(nodeInFullGraph);
uint32_t neighIdxInFullGraphStruct = fullGraphStruct->cumulDegs[nodeInFullGraph];
// Nuova valutazione dei gradi del grafo redux
uint32_t tempDeg = 0;
for (uint32_t j = 0; j < nodeInFullGraphDeg; j++) {
// se label del vicino � == 0...
if (!labels[fullGraphStruct->neighs[neighIdxInFullGraphStruct + j]])
tempDeg++;
}
temp_cumulDegs[i + 1] += (tempDeg + temp_cumulDegs[i]);
}
// Ora posso allocare le restanti strutture del grafo ridotto
str->nEdges = temp_cumulDegs[str->nNodes];
setMemGPU(str->nEdges, GPUINIT_EDGES);
setMemGPU(str->nEdges, GPUINIT_EDGEW);
setMemGPU(str->nNodes, GPUINIT_NODET);
std::unique_ptr<node[]> temp_neighs(new node[str->nEdges]);
std::unique_ptr<edgeW[]> temp_edgeWeights(new edgeW[str->nEdges]);
std::unique_ptr<nodeW[]> temp_nodeThresholds(new nodeW[str->nNodes]);
// Altro ciclo per riempire la lista dei vicini e dei pesi degli archi associati
for (uint32_t i = 0; i < unlabSize; i++) {
uint32_t nodeInFullGraph = unlabelled[i];
uint32_t nodeInFullGraphDeg = fullGraphStruct->deg(nodeInFullGraph);
uint32_t neighIdxInFullGraphStruct = fullGraphStruct->cumulDegs[nodeInFullGraph];
uint32_t tempNeighIdx = temp_cumulDegs[i];
for (uint32_t j = 0; j < nodeInFullGraphDeg; j++) {
uint32_t neighInFullGraph = fullGraphStruct->neighs[neighIdxInFullGraphStruct + j];
if (!labels[neighInFullGraph]) {
temp_neighs[tempNeighIdx] = f2R[neighInFullGraph];
temp_edgeWeights[tempNeighIdx] = fullGraphStruct->edgeWeights[neighIdxInFullGraphStruct + j];
tempNeighIdx++;
}
}
// Ora, la soglia presa dal vettore locale ad ogni thread
// Occhio, thresholds è già rimappato full-->>redux
temp_nodeThresholds[i] = thresholds[i];
}
// Copio su GPU
cudaMemcpy(str->cumulDegs, temp_cumulDegs.get(), (str->nNodes + 1) * sizeof(node_sz), cudaMemcpyHostToDevice);
cudaMemcpy(str->neighs, temp_neighs.get(), str->nEdges * sizeof(node), cudaMemcpyHostToDevice);
cudaMemcpy(str->edgeWeights, temp_edgeWeights.get(), str->nEdges * sizeof(edgeW), cudaMemcpyHostToDevice);
cudaMemcpy(str->nodeThresholds, temp_nodeThresholds.get(), str->nNodes * sizeof(nodeW), cudaMemcpyHostToDevice);
// Devo fare altro? Altrimenti...
return;
}
template<typename nodeW, typename edgeW>
Graph<nodeW, edgeW>::Graph(Graph<nodeW, edgeW> * const graph_h) {
GraphStruct<nodeW, edgeW> * const graphStruct_h = graph_h->getStruct();
setMemGPU(graphStruct_h->nNodes, GPUINIT_NODES);
str->nNodes = graphStruct_h->nNodes;
str->nEdges = graphStruct_h->nEdges;
setMemGPU(graphStruct_h->nNodes, GPUINIT_EDGES);
cudaMemcpy(str->cumulDegs, graphStruct_h->cumulDegs, (str->nNodes + 1) * sizeof(node_sz), cudaMemcpyHostToDevice);
cudaMemcpy(str->neighs, graphStruct_h->neighs, str->nEdges * sizeof(node), cudaMemcpyHostToDevice);
maxDeg = graph_h->maxDeg;
minDeg = graph_h->minDeg;
meanDeg = graph_h->meanDeg;
density = graph_h->density;
prob = graph_h->prob;
connected = graph_h->connected;
GPUEnabled = true;
}
/**
* Invoke the kernel to print the graph on device
* @param verbose print details
*/
template<typename nodeW, typename edgeW> void Graph<nodeW, edgeW>::print_d(bool verbose) {
Graph_k::print_d << <1, 1 >> > (str, verbose);
cudaDeviceSynchronize();
}
/**
* Print the graph on device (verbose = 1 for "verbose print")
* @param verbose print the complete graph
*/
template<typename nodeW, typename edgeW>
__global__ void Graph_k::print_d(GraphStruct<nodeW, edgeW>* str, bool verbose) {
printf("** Graph (num node: %d, num edges: %d)\n", str->nNodes, str->nEdges);
if (verbose) {
for (int i = 0; i < str->nNodes; i++) {
printf(" node(%d)[%d]-> ", i, str->cumulDegs[i + 1] - str->cumulDegs[i]);
for (int j = 0; j < str->cumulDegs[i + 1] - str->cumulDegs[i]; j++) {
printf("%d ", str->neighs[str->cumulDegs[i] + j]);
}
printf("\n");
}
printf("\n");
}
}
template<typename nodeW, typename edgeW>
void Graph<nodeW, edgeW>::deleteMemGPU() {
if (str->neighs != nullptr) {
cudaFree(str->neighs);
str->neighs = nullptr;
}
if (str->cumulDegs != nullptr) {
cudaFree(str->cumulDegs);
str->cumulDegs = nullptr;
}
if (str->nodeWeights != nullptr) {
cudaFree(str->nodeWeights);
str->nodeWeights = nullptr;
}
if (str->edgeWeights != nullptr) {
cudaFree(str->edgeWeights);
str->edgeWeights = nullptr;
}
if (str->nodeThresholds != nullptr) {
cudaFree(str->nodeThresholds);
str->nodeThresholds = nullptr;
}
if (str != nullptr) {
delete str;
str = nullptr;
}
}
// This sucks... we need to fix template declarations
//#ifdef WIN32
template class Graph<float, float>;
//#endif
|
ced4d5e8c72bda690d48c467cabcbb744f71b018.hip | // !!! This is a file automatically generated by hipify!!!
#include "Hornet.hpp"
#include "Core/GPUHornet/BatchUpdate.cuh"
#include "Util/BatchFunctions.hpp"
#include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension
#include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo
#include <algorithm> //std:.generate
#include <chrono> //std::chrono
#include <random> //std::mt19937_64
#include <hip/hip_runtime_api.h>
//nvprof --profile-from-start off --log-file log.txt --print-gpu-trace
using namespace hornets_nest;
using namespace timer;
using namespace std::string_literals;
using HornetGPU = hornets_nest::gpu::Hornet<EMPTY, EMPTY>;
void exec(int argc, char* argv[]);
/**
* @brief Example tester for Hornet
*/
int main(int argc, char* argv[]) {
exec(argc, argv);
hipDeviceReset();
}
void exec(int argc, char* argv[]) {
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
xlib::device_info();
graph::GraphStd<vid_t, eoff_t> graph;
graph.read(argv[1]);
auto weights = new int[graph.nE()];
std::iota(weights, weights + graph.nE(), 0);
//--------------------------------------------------------------------------
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGPU hornet_gpu(hornet_init);
std::cout << "------------------------------------------------" <<std::endl;
//--------------------------------------------------------------------------
using namespace batch_gen_property;
if (argc == 3) {
int batch_size = std::stoi(argv[2]);
#ifdef TEST
batch_size = 100;
#endif
vid_t* batch_src, *batch_dst;
cuMallocHost(batch_src, batch_size);
cuMallocHost(batch_dst, batch_size);
#ifdef TEST
for (int i = 0; i < batch_size - 10; ++i) {
batch_src[i] = 33;
batch_dst[i] = 8;
}
for (int i = batch_size - 10; i < batch_size; ++i) {
batch_src[i] = 33;
batch_dst[i] = 8;
}
#else
generateBatch(graph, batch_size, batch_src, batch_dst,
BatchGenType::INSERT, UNIQUE);
#endif
gpu::BatchUpdate batch_update(batch_src, batch_dst, batch_size);
//batch_update.print();
std::cout << "------------------------------------------------" <<std::endl;
using namespace gpu::batch_property;
hornet_gpu.reserveBatchOpResource(batch_size);
hornet_gpu.print();
std::cout << "------------------------------------------------" <<std::endl;
hipProfilerStart();
Timer<DEVICE> TM(3);
TM.start();
hornet_gpu.insertEdgeBatch(batch_update);
//hornet_gpu.deleteEdgeBatch(batch_update);
TM.stop();
//TM.print("Insertion "s + std::to_string(batch_size) + ": ");
hipProfilerStop();
//hornet_gpu.check_sorted_adjs();
//delete[] batch_src;
//delete[] batch_dst;
cuFreeHost(batch_src);
cuFreeHost(batch_dst);
//batch_update.print();
hornet_gpu.print();
}
delete[] weights;
}
| ced4d5e8c72bda690d48c467cabcbb744f71b018.cu | #include "Hornet.hpp"
#include "Core/GPUHornet/BatchUpdate.cuh"
#include "Util/BatchFunctions.hpp"
#include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension
#include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo
#include <algorithm> //std:.generate
#include <chrono> //std::chrono
#include <random> //std::mt19937_64
#include <cuda_profiler_api.h>
//nvprof --profile-from-start off --log-file log.txt --print-gpu-trace
using namespace hornets_nest;
using namespace timer;
using namespace std::string_literals;
using HornetGPU = hornets_nest::gpu::Hornet<EMPTY, EMPTY>;
void exec(int argc, char* argv[]);
/**
* @brief Example tester for Hornet
*/
int main(int argc, char* argv[]) {
exec(argc, argv);
cudaDeviceReset();
}
void exec(int argc, char* argv[]) {
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
xlib::device_info();
graph::GraphStd<vid_t, eoff_t> graph;
graph.read(argv[1]);
auto weights = new int[graph.nE()];
std::iota(weights, weights + graph.nE(), 0);
//--------------------------------------------------------------------------
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGPU hornet_gpu(hornet_init);
std::cout << "------------------------------------------------" <<std::endl;
//--------------------------------------------------------------------------
using namespace batch_gen_property;
if (argc == 3) {
int batch_size = std::stoi(argv[2]);
#ifdef TEST
batch_size = 100;
#endif
vid_t* batch_src, *batch_dst;
cuMallocHost(batch_src, batch_size);
cuMallocHost(batch_dst, batch_size);
#ifdef TEST
for (int i = 0; i < batch_size - 10; ++i) {
batch_src[i] = 33;
batch_dst[i] = 8;
}
for (int i = batch_size - 10; i < batch_size; ++i) {
batch_src[i] = 33;
batch_dst[i] = 8;
}
#else
generateBatch(graph, batch_size, batch_src, batch_dst,
BatchGenType::INSERT, UNIQUE);
#endif
gpu::BatchUpdate batch_update(batch_src, batch_dst, batch_size);
//batch_update.print();
std::cout << "------------------------------------------------" <<std::endl;
using namespace gpu::batch_property;
hornet_gpu.reserveBatchOpResource(batch_size);
hornet_gpu.print();
std::cout << "------------------------------------------------" <<std::endl;
cudaProfilerStart();
Timer<DEVICE> TM(3);
TM.start();
hornet_gpu.insertEdgeBatch(batch_update);
//hornet_gpu.deleteEdgeBatch(batch_update);
TM.stop();
//TM.print("Insertion "s + std::to_string(batch_size) + ": ");
cudaProfilerStop();
//hornet_gpu.check_sorted_adjs();
//delete[] batch_src;
//delete[] batch_dst;
cuFreeHost(batch_src);
cuFreeHost(batch_dst);
//batch_update.print();
hornet_gpu.print();
}
delete[] weights;
}
|
77bcb1f09dbc5e7ca76065b83e8fd75b9bfd9a98.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
__global__ void HashingKernel(int *cuda_hash_table,int *cuda_a_list, int *cuda_b_list, int *cuda_random_value,int* cuda_func_index, int n, int p,int *cuda_kicked_list,int t,int flag){
int key = threadIdx.x + blockDim.x*blockIdx.x;
int index = cuda_func_index[key];
int hash_value = (unsigned(cuda_a_list[index] * unsigned(cuda_random_value[key]) + cuda_b_list[index]) % p) % n;
if (cuda_kicked_list[key]==1 || flag==0){
cuda_hash_table[hash_value] = cuda_random_value[key];
cuda_func_index[key] = (cuda_func_index[key] + 1) % t;
}
}
__global__ void CheckKickKernel(int *cuda_hash_table,int *cuda_a_list, int *cuda_b_list, int *cuda_random_value,int* cuda_func_index, int n, int p,int *cuda_kicked_list,int t){
int key = threadIdx.x + blockDim.x*blockIdx.x;
int index = (cuda_func_index[key]+t-1)%t;
int hash_value = (unsigned(cuda_a_list[index] * unsigned(cuda_random_value[key]) + cuda_b_list[index]) % p) % n;
if (cuda_hash_table[hash_value]==cuda_random_value[key]){
cuda_kicked_list[key] = 0;
}else{
cuda_kicked_list[key] = 1;
}
}
__global__ void LookUpKernel(int *cuda_hash_table,int *cuda_a_list,int *cuda_b_list, int t,int n,int p, int *cuda_lookup_table,int *cuda_results){
int key = threadIdx.x + blockDim.x*blockIdx.x;
int i,hash_value;
for(i = 0;i < t;i++){
hash_value = (unsigned(cuda_a_list[i] * unsigned(cuda_lookup_table[key]) + cuda_b_list[i]) % p) % n;
if (cuda_hash_table[hash_value] == cuda_lookup_table[key]){
cuda_results[key] = 1;
break;
}
}
}
int main(int argc,char const *argv[]){
int i;
unsigned int input_size;
unsigned int s;
int t;
unsigned int n;
unsigned int p;
unsigned int bound_length;
int *a_list;
int *b_list;
int *random_value;
int *kicked_list;
int *hash_table;
int *func_index;
int *cuda_a_list;
int *cuda_b_list;
int *cuda_random_value;
int *cuda_hash_table;
int *cuda_kicked_list;
int *cuda_func_index;
int block_num;
int block_size;
int sum;
int flag;
float ts;
clock_t start,end;
s = atoi(argv[1]);
t = atoi(argv[2]);
ts = (float)strtod(argv[4],NULL);
input_size = pow(2,s);
if (s == 24 ){
if (t==2){
input_size-=pow(2,22);
}else if (t == 3){
input_size-=pow(2,15);
}
}
// n = unsigned(pow(2,25));
n = (int)(input_size*ts);
//printf("%d\n",n);
p = 85000173;
bound_length = (int)4*log(n);
block_num = input_size/256;
block_size = 256;
hipMalloc((void **) &cuda_a_list,sizeof(int)*t);
hipMalloc((void **) &cuda_b_list,sizeof(int)*t);
hipMalloc((void **) &cuda_hash_table,sizeof(int)*n);
hipMalloc((void **) &cuda_random_value,sizeof(int)*input_size);
hipMalloc((void **) &cuda_kicked_list,sizeof(int)*input_size);
hipMalloc((void **) &cuda_func_index,sizeof(int)*input_size);
a_list = (int*)malloc(sizeof(int)*t);
b_list = (int*)malloc(sizeof(int)*t);
hash_table = (int*)malloc(sizeof(int)*n);
random_value = (int*)malloc(sizeof(int)*input_size);
kicked_list = (int*)malloc(sizeof(int)*input_size);
func_index = (int*)malloc(sizeof(int)*input_size);
srand(time(0));
for (i = 0;i < t;i++){
a_list[i] = (unsigned)rand()%10000;
b_list[i] = (unsigned)rand()%10000;
while (a_list[i]==0){
a_list[i]=(unsigned)rand();
}
while (b_list[i]==0){
b_list[i]=(unsigned)rand();
}
}
for (i = 0;i < input_size;i++){
random_value[i] =unsigned( rand());
while (random_value[i]==0) {
random_value[i] = unsigned(rand());
}
}
memset(hash_table,0,sizeof(int)*n);
memset(kicked_list,0,sizeof(int)*input_size);
memset(func_index,0,sizeof(int)*input_size);
hipMemcpy(cuda_a_list,a_list,sizeof(int)*t,hipMemcpyHostToDevice);
hipMemcpy(cuda_b_list,b_list,sizeof(int)*t,hipMemcpyHostToDevice);
hipMemcpy(cuda_hash_table,hash_table,sizeof(int)*n,hipMemcpyHostToDevice);
hipMemcpy(cuda_random_value,random_value,sizeof(int)*input_size,hipMemcpyHostToDevice);
hipMemcpy(cuda_kicked_list,kicked_list,sizeof(int)*input_size,hipMemcpyHostToDevice);
hipMemcpy(cuda_func_index,func_index,sizeof(int)*input_size,hipMemcpyHostToDevice);
int count = 0;
int base = pow(2,24);
start=clock();
int first = 0;
while(1){
if (first == 0){
flag = 0;
}else{
flag =1;
}
sum = 0;
first = 1;
hipLaunchKernelGGL(( HashingKernel), dim3(block_num),dim3(block_size), 0, 0, cuda_hash_table,cuda_a_list,cuda_b_list,cuda_random_value,cuda_func_index,n,p,cuda_kicked_list,t,flag);
hipLaunchKernelGGL(( CheckKickKernel), dim3(block_num),dim3(block_size), 0, 0, cuda_hash_table,cuda_a_list,cuda_b_list,cuda_random_value,cuda_func_index,n,p,cuda_kicked_list,t);
hipMemcpy(kicked_list,cuda_kicked_list,sizeof(int)*input_size,hipMemcpyDeviceToHost);
for (i = 0;i<input_size;i++){
sum+=kicked_list[i];
}
// printf("sum=%d,base=%d\n",sum,base);
if(sum < base){
count = 0;
base = sum;
}else{
count += 1;
}
//printf("base = %d\n",base );
if (sum == 0){
break;
}
if(count > 8*bound_length){
count = 0;
first = 0;
// printf("------------------------Restart!------------------------\n");
base = pow(2,24);
for (i = 0;i < t;i++){
a_list[i] = rand();
b_list[i] = rand();
while (a_list[i]==0){
a_list[i]=rand();
}
while (b_list[i]==0){
b_list[i]=rand();
}
}
memset(hash_table,0,sizeof(int)*n);
memset(kicked_list,0,sizeof(int)*input_size);
memset(func_index,0,sizeof(int)*input_size);
hipMemcpy(cuda_a_list,a_list,sizeof(int)*t,hipMemcpyHostToDevice);
hipMemcpy(cuda_b_list,b_list,sizeof(int)*t,hipMemcpyHostToDevice);
hipMemcpy(cuda_hash_table,hash_table,sizeof(int)*n,hipMemcpyHostToDevice);
hipMemcpy(cuda_kicked_list,kicked_list,sizeof(int)*input_size,hipMemcpyHostToDevice);
hipMemcpy(cuda_func_index,func_index,sizeof(int)*input_size,hipMemcpyHostToDevice);
}
}
end=clock();
hipMemcpy(hash_table,cuda_hash_table,sizeof(int)*n,hipMemcpyDeviceToHost);
hipMemcpy(func_index,cuda_func_index,sizeof(int)*input_size,hipMemcpyDeviceToHost);
printf("%f\n",(double)(end-start)/CLOCKS_PER_SEC );
//##########################################################################################
// Experiment 2
// printf("%d\n", input_size);
if (argc == 4 && input_size>pow(2,23)){
int counter;
float percent = float(100-10*atoi(argv[3]))/100.0;
int *results;
int *lookup_table;
int *cuda_results;
int *cuda_lookup_table;
// printf("Insertion Finished. Start Exp2:\n");
// printf("percent=%f\n",percent );
lookup_table = (int*)malloc(sizeof(int)*input_size);
results = (int *)malloc(sizeof(int)*input_size);
memset(lookup_table,0,sizeof(int)*input_size);
memset(results,0,sizeof(int)*input_size);
hipMalloc((void **) &cuda_results,sizeof(int)*input_size);
hipMalloc((void **) &cuda_lookup_table,sizeof(int)*input_size);
for (i=0;i<input_size;i++){
if (i<(int)(input_size*percent)){
lookup_table[i] = random_value[rand()%input_size];
}else{
lookup_table[i] = rand();
}
}
hipMemcpy(cuda_results,results,sizeof(int)*input_size,hipMemcpyHostToDevice);
hipMemcpy(cuda_lookup_table,lookup_table,sizeof(int)*input_size,hipMemcpyHostToDevice);
counter = 0;
start = clock();
hipLaunchKernelGGL(( LookUpKernel), dim3(block_num),dim3(block_size), 0, 0, cuda_hash_table,cuda_a_list,cuda_b_list,t,n,p,cuda_lookup_table,cuda_results);
end = clock();
hipMemcpy(results,cuda_results,sizeof(int)*input_size,hipMemcpyDeviceToHost);
for(i =0;i<input_size;i++){
counter += results[i];
}
if (counter>=(int)(input_size*percent)){
// printf("counter = %d,percent = %d\n", counter,(int)(input_size*percent));
printf("%f\n",(double)(end-start)/CLOCKS_PER_SEC);
}
free(lookup_table);
free(results);
hipFree(cuda_results);
hipFree(cuda_lookup_table);
}
free(a_list);
free(b_list);
free(hash_table);
free(random_value);
free(func_index);
free(kicked_list);
hipFree(cuda_a_list);
hipFree(cuda_b_list);
hipFree(cuda_hash_table);
hipFree(cuda_random_value);
hipFree(cuda_func_index);
hipFree(cuda_kicked_list);
return 0;
}
| 77bcb1f09dbc5e7ca76065b83e8fd75b9bfd9a98.cu | #include <stdlib.h>
#include <stdio.h>
#include <string>
#include <time.h>
#include <cuda.h>
#include <math.h>
__global__ void HashingKernel(int *cuda_hash_table,int *cuda_a_list, int *cuda_b_list, int *cuda_random_value,int* cuda_func_index, int n, int p,int *cuda_kicked_list,int t,int flag){
int key = threadIdx.x + blockDim.x*blockIdx.x;
int index = cuda_func_index[key];
int hash_value = (unsigned(cuda_a_list[index] * unsigned(cuda_random_value[key]) + cuda_b_list[index]) % p) % n;
if (cuda_kicked_list[key]==1 || flag==0){
cuda_hash_table[hash_value] = cuda_random_value[key];
cuda_func_index[key] = (cuda_func_index[key] + 1) % t;
}
}
__global__ void CheckKickKernel(int *cuda_hash_table,int *cuda_a_list, int *cuda_b_list, int *cuda_random_value,int* cuda_func_index, int n, int p,int *cuda_kicked_list,int t){
int key = threadIdx.x + blockDim.x*blockIdx.x;
int index = (cuda_func_index[key]+t-1)%t;
int hash_value = (unsigned(cuda_a_list[index] * unsigned(cuda_random_value[key]) + cuda_b_list[index]) % p) % n;
if (cuda_hash_table[hash_value]==cuda_random_value[key]){
cuda_kicked_list[key] = 0;
}else{
cuda_kicked_list[key] = 1;
}
}
__global__ void LookUpKernel(int *cuda_hash_table,int *cuda_a_list,int *cuda_b_list, int t,int n,int p, int *cuda_lookup_table,int *cuda_results){
int key = threadIdx.x + blockDim.x*blockIdx.x;
int i,hash_value;
for(i = 0;i < t;i++){
hash_value = (unsigned(cuda_a_list[i] * unsigned(cuda_lookup_table[key]) + cuda_b_list[i]) % p) % n;
if (cuda_hash_table[hash_value] == cuda_lookup_table[key]){
cuda_results[key] = 1;
break;
}
}
}
int main(int argc,char const *argv[]){
int i;
unsigned int input_size;
unsigned int s;
int t;
unsigned int n;
unsigned int p;
unsigned int bound_length;
int *a_list;
int *b_list;
int *random_value;
int *kicked_list;
int *hash_table;
int *func_index;
int *cuda_a_list;
int *cuda_b_list;
int *cuda_random_value;
int *cuda_hash_table;
int *cuda_kicked_list;
int *cuda_func_index;
int block_num;
int block_size;
int sum;
int flag;
float ts;
clock_t start,end;
s = atoi(argv[1]);
t = atoi(argv[2]);
ts = (float)strtod(argv[4],NULL);
input_size = pow(2,s);
if (s == 24 ){
if (t==2){
input_size-=pow(2,22);
}else if (t == 3){
input_size-=pow(2,15);
}
}
// n = unsigned(pow(2,25));
n = (int)(input_size*ts);
//printf("%d\n",n);
p = 85000173;
bound_length = (int)4*log(n);
block_num = input_size/256;
block_size = 256;
cudaMalloc((void **) &cuda_a_list,sizeof(int)*t);
cudaMalloc((void **) &cuda_b_list,sizeof(int)*t);
cudaMalloc((void **) &cuda_hash_table,sizeof(int)*n);
cudaMalloc((void **) &cuda_random_value,sizeof(int)*input_size);
cudaMalloc((void **) &cuda_kicked_list,sizeof(int)*input_size);
cudaMalloc((void **) &cuda_func_index,sizeof(int)*input_size);
a_list = (int*)malloc(sizeof(int)*t);
b_list = (int*)malloc(sizeof(int)*t);
hash_table = (int*)malloc(sizeof(int)*n);
random_value = (int*)malloc(sizeof(int)*input_size);
kicked_list = (int*)malloc(sizeof(int)*input_size);
func_index = (int*)malloc(sizeof(int)*input_size);
srand(time(0));
for (i = 0;i < t;i++){
a_list[i] = (unsigned)rand()%10000;
b_list[i] = (unsigned)rand()%10000;
while (a_list[i]==0){
a_list[i]=(unsigned)rand();
}
while (b_list[i]==0){
b_list[i]=(unsigned)rand();
}
}
for (i = 0;i < input_size;i++){
random_value[i] =unsigned( rand());
while (random_value[i]==0) {
random_value[i] = unsigned(rand());
}
}
memset(hash_table,0,sizeof(int)*n);
memset(kicked_list,0,sizeof(int)*input_size);
memset(func_index,0,sizeof(int)*input_size);
cudaMemcpy(cuda_a_list,a_list,sizeof(int)*t,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b_list,b_list,sizeof(int)*t,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_hash_table,hash_table,sizeof(int)*n,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_random_value,random_value,sizeof(int)*input_size,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_kicked_list,kicked_list,sizeof(int)*input_size,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_func_index,func_index,sizeof(int)*input_size,cudaMemcpyHostToDevice);
int count = 0;
int base = pow(2,24);
start=clock();
int first = 0;
while(1){
if (first == 0){
flag = 0;
}else{
flag =1;
}
sum = 0;
first = 1;
HashingKernel<<<block_num,block_size>>>(cuda_hash_table,cuda_a_list,cuda_b_list,cuda_random_value,cuda_func_index,n,p,cuda_kicked_list,t,flag);
CheckKickKernel<<<block_num,block_size>>>(cuda_hash_table,cuda_a_list,cuda_b_list,cuda_random_value,cuda_func_index,n,p,cuda_kicked_list,t);
cudaMemcpy(kicked_list,cuda_kicked_list,sizeof(int)*input_size,cudaMemcpyDeviceToHost);
for (i = 0;i<input_size;i++){
sum+=kicked_list[i];
}
// printf("sum=%d,base=%d\n",sum,base);
if(sum < base){
count = 0;
base = sum;
}else{
count += 1;
}
//printf("base = %d\n",base );
if (sum == 0){
break;
}
if(count > 8*bound_length){
count = 0;
first = 0;
// printf("------------------------Restart!------------------------\n");
base = pow(2,24);
for (i = 0;i < t;i++){
a_list[i] = rand();
b_list[i] = rand();
while (a_list[i]==0){
a_list[i]=rand();
}
while (b_list[i]==0){
b_list[i]=rand();
}
}
memset(hash_table,0,sizeof(int)*n);
memset(kicked_list,0,sizeof(int)*input_size);
memset(func_index,0,sizeof(int)*input_size);
cudaMemcpy(cuda_a_list,a_list,sizeof(int)*t,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b_list,b_list,sizeof(int)*t,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_hash_table,hash_table,sizeof(int)*n,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_kicked_list,kicked_list,sizeof(int)*input_size,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_func_index,func_index,sizeof(int)*input_size,cudaMemcpyHostToDevice);
}
}
end=clock();
cudaMemcpy(hash_table,cuda_hash_table,sizeof(int)*n,cudaMemcpyDeviceToHost);
cudaMemcpy(func_index,cuda_func_index,sizeof(int)*input_size,cudaMemcpyDeviceToHost);
printf("%f\n",(double)(end-start)/CLOCKS_PER_SEC );
//##########################################################################################
// Experiment 2
// printf("%d\n", input_size);
if (argc == 4 && input_size>pow(2,23)){
int counter;
float percent = float(100-10*atoi(argv[3]))/100.0;
int *results;
int *lookup_table;
int *cuda_results;
int *cuda_lookup_table;
// printf("Insertion Finished. Start Exp2:\n");
// printf("percent=%f\n",percent );
lookup_table = (int*)malloc(sizeof(int)*input_size);
results = (int *)malloc(sizeof(int)*input_size);
memset(lookup_table,0,sizeof(int)*input_size);
memset(results,0,sizeof(int)*input_size);
cudaMalloc((void **) &cuda_results,sizeof(int)*input_size);
cudaMalloc((void **) &cuda_lookup_table,sizeof(int)*input_size);
for (i=0;i<input_size;i++){
if (i<(int)(input_size*percent)){
lookup_table[i] = random_value[rand()%input_size];
}else{
lookup_table[i] = rand();
}
}
cudaMemcpy(cuda_results,results,sizeof(int)*input_size,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_lookup_table,lookup_table,sizeof(int)*input_size,cudaMemcpyHostToDevice);
counter = 0;
start = clock();
LookUpKernel<<<block_num,block_size>>>(cuda_hash_table,cuda_a_list,cuda_b_list,t,n,p,cuda_lookup_table,cuda_results);
end = clock();
cudaMemcpy(results,cuda_results,sizeof(int)*input_size,cudaMemcpyDeviceToHost);
for(i =0;i<input_size;i++){
counter += results[i];
}
if (counter>=(int)(input_size*percent)){
// printf("counter = %d,percent = %d\n", counter,(int)(input_size*percent));
printf("%f\n",(double)(end-start)/CLOCKS_PER_SEC);
}
free(lookup_table);
free(results);
cudaFree(cuda_results);
cudaFree(cuda_lookup_table);
}
free(a_list);
free(b_list);
free(hash_table);
free(random_value);
free(func_index);
free(kicked_list);
cudaFree(cuda_a_list);
cudaFree(cuda_b_list);
cudaFree(cuda_hash_table);
cudaFree(cuda_random_value);
cudaFree(cuda_func_index);
cudaFree(cuda_kicked_list);
return 0;
}
|
9cc1ff6daffb0e5652a08cbf655324d85b129f5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define N (1024 * 1024)
__global__ void kernel(float *dA)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = 2.0f * 3.1415926f * (float) idx / (float) N;
dA[idx] = sinf(sqrtf(x));
}
int main(int argc, char *argv[])
{
float *hA, *dA;
hA = (float*)malloc(N * sizeof(float));
hipMalloc((void**)&dA, N * sizeof(float));
hipLaunchKernelGGL(( kernel) , dim3(N/512), dim3(512) , 0, 0, dA);
hipMemcpy(hA, dA, N * sizeof(float), hipMemcpyDeviceToHost);
for (int idx = 0; idx < N; idx++)
printf("a[%d] = %.5f\n", idx, hA[idx]);
free(hA);
hipFree(dA);
return EXIT_SUCCESS;
}
| 9cc1ff6daffb0e5652a08cbf655324d85b129f5c.cu | #include <stdio.h>
#include <stdlib.h>
#define N (1024 * 1024)
__global__ void kernel(float *dA)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = 2.0f * 3.1415926f * (float) idx / (float) N;
dA[idx] = sinf(sqrtf(x));
}
int main(int argc, char *argv[])
{
float *hA, *dA;
hA = (float*)malloc(N * sizeof(float));
cudaMalloc((void**)&dA, N * sizeof(float));
kernel <<< N/512, 512 >>> (dA);
cudaMemcpy(hA, dA, N * sizeof(float), cudaMemcpyDeviceToHost);
for (int idx = 0; idx < N; idx++)
printf("a[%d] = %.5f\n", idx, hA[idx]);
free(hA);
cudaFree(dA);
return EXIT_SUCCESS;
}
|
245f2c6d25c908291c441f09bd172bfaac2f7830.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/hip/HIPBlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
return legacy::cuda::_th_addmm(b_self, mat1, mat2, beta, alpha);
}
Tensor& addmm_cuda_out(Tensor &result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return legacy::cuda::_th_addmm_out(result, b_self, mat1, mat2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor addbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
return legacy::cuda::_th_addbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& addbmm_cuda_out(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm_out");
return legacy::cuda::_th_addbmm_out(result, self, batch1, batch2, beta, alpha);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
return legacy::cuda::_th_bmm(self, mat2);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
if ((tensor_strides[0] == 1) && (tensor_strides[1] != 0)) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] != 0)) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
// Check https://github.com/pytorch/pytorch/issues/22078
// for information about the bug. We don't know the exact conditions that trigger it,
// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
// necessary condition.
static void checkCuda90Bug(int i_m, int i_n, int i_k)
{
#if TORCH_HIP_VERSION < 9200 && TORCH_HIP_VERSION >= 9000
static std::once_flag alreadyWarned;
const int LIMIT = 1 << 21;
if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major == 5 || prop->major == 6) {
std::call_once(alreadyWarned, []() {
TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
});
}
}
#endif
}
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(
(mat1.dim() == 2) && (mat2.dim() == 2) &&
(self.dim() == 2) && (result.dim() == 2),
"tensors must be 2-D"
);
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self_sizes = self.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self_sizes[0] == mat1_sizes[0], "self dim 0 must match mat1 dim 0");
TORCH_CHECK(self_sizes[1] == mat2_sizes[1], "self dim 1 must match mat2 dim 1");
// If self and result either point to the same data or if beta is zero,
// we can avoid copying self into result. Otherwise, we need to copy.
if (beta.to<double>() != 0.0) {
if ((result.data_ptr() != self.data_ptr()) || (result.strides() != self.strides())) {
result.copy_(self);
}
}
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self.scalar_type();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
if (scalar_type == at::ScalarType::Half || scalar_type == at::ScalarType::Float) {
checkCuda90Bug(static_cast<int>(m), static_cast<int>(n), static_cast<int>(k));
}
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
} }
| 245f2c6d25c908291c441f09bd172bfaac2f7830.cu | #include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/cuda/CUDABlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
return legacy::cuda::_th_addmm(b_self, mat1, mat2, beta, alpha);
}
Tensor& addmm_cuda_out(Tensor &result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return legacy::cuda::_th_addmm_out(result, b_self, mat1, mat2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor addbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
return legacy::cuda::_th_addbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& addbmm_cuda_out(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm_out");
return legacy::cuda::_th_addbmm_out(result, self, batch1, batch2, beta, alpha);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
return legacy::cuda::_th_bmm(self, mat2);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
if ((tensor_strides[0] == 1) && (tensor_strides[1] != 0)) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] != 0)) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
// Check https://github.com/pytorch/pytorch/issues/22078
// for information about the bug. We don't know the exact conditions that trigger it,
// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
// necessary condition.
static void checkCuda90Bug(int i_m, int i_n, int i_k)
{
#if CUDA_VERSION < 9200 && CUDA_VERSION >= 9000
static std::once_flag alreadyWarned;
const int LIMIT = 1 << 21;
if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major == 5 || prop->major == 6) {
std::call_once(alreadyWarned, []() {
TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
});
}
}
#endif
}
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(
(mat1.dim() == 2) && (mat2.dim() == 2) &&
(self.dim() == 2) && (result.dim() == 2),
"tensors must be 2-D"
);
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self_sizes = self.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self_sizes[0] == mat1_sizes[0], "self dim 0 must match mat1 dim 0");
TORCH_CHECK(self_sizes[1] == mat2_sizes[1], "self dim 1 must match mat2 dim 1");
// If self and result either point to the same data or if beta is zero,
// we can avoid copying self into result. Otherwise, we need to copy.
if (beta.to<double>() != 0.0) {
if ((result.data_ptr() != self.data_ptr()) || (result.strides() != self.strides())) {
result.copy_(self);
}
}
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self.scalar_type();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
if (scalar_type == at::ScalarType::Half || scalar_type == at::ScalarType::Float) {
checkCuda90Bug(static_cast<int>(m), static_cast<int>(n), static_cast<int>(k));
}
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
} }
|
729a39a6538399340f96adc9e31f50a192f61c5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void add(int n, float *cRarr, float *cIarr, int *result) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
float cR = cRarr[i];
float cI = cIarr[i];
int n = 0;
float x = 0;
float y = 0;
for(n = 0; (y*y) < 4 && n < 255; n++) {
float xNew = (x * x) - (y * y) + cR;
y = (2 * x * y) + cI;
x = xNew;
}
result[i] = n;
}
} | 729a39a6538399340f96adc9e31f50a192f61c5e.cu | extern "C"
__global__ void add(int n, float *cRarr, float *cIarr, int *result) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
float cR = cRarr[i];
float cI = cIarr[i];
int n = 0;
float x = 0;
float y = 0;
for(n = 0; (y*y) < 4 && n < 255; n++) {
float xNew = (x * x) - (y * y) + cR;
y = (2 * x * y) + cI;
x = xNew;
}
result[i] = n;
}
} |
1bacbb9a8de72d2a491b762a1a48da3f0884366c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star2d4r-512-4-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 33
#define BENCH_RAD 4
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 9 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.11111f * A[t%2][i-4][j] + 0.06251f * A[t%2][i-3][j] + 0.06255f * A[t%2][i-2][j] +
0.06245f * A[t%2][i-1][j] + 0.22222f * A[t%2][i][j-4] + 0.06252f * A[t%2][i][j-3] +
0.06249f * A[t%2][i][j-2] + 0.06244f * A[t%2][i][j-1] + 0.25005f * A[t%2][i][j] +
0.06248f * A[t%2][i][j+1] + 0.06243f * A[t%2][i][j+2] + 0.06253f * A[t%2][i][j+3] -
0.22220f * A[t%2][i][j+4] + 0.06246f * A[t%2][i+1][j] + 0.06242f * A[t%2][i+2][j] +
0.06254f * A[t%2][i+3][j] - 0.11110f * A[t%2][i-4][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 1bacbb9a8de72d2a491b762a1a48da3f0884366c.cu | #include <assert.h>
#include <stdio.h>
#include "star2d4r-512-4-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 33
#define BENCH_RAD 4
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 9 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.11111f * A[t%2][i-4][j] + 0.06251f * A[t%2][i-3][j] + 0.06255f * A[t%2][i-2][j] +
0.06245f * A[t%2][i-1][j] + 0.22222f * A[t%2][i][j-4] + 0.06252f * A[t%2][i][j-3] +
0.06249f * A[t%2][i][j-2] + 0.06244f * A[t%2][i][j-1] + 0.25005f * A[t%2][i][j] +
0.06248f * A[t%2][i][j+1] + 0.06243f * A[t%2][i][j+2] + 0.06253f * A[t%2][i][j+3] -
0.22220f * A[t%2][i][j+4] + 0.06246f * A[t%2][i+1][j] + 0.06242f * A[t%2][i+2][j] +
0.06254f * A[t%2][i+3][j] - 0.11110f * A[t%2][i-4][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
618355ab4710a2de3ae3028240f2ab9dab2fbccf.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
//poison log likelihood for one observation
__device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh) {
KC_FP_TYPE logex = KC_MAX(((g*x+sh)>80)?(g*x+sh):KC_LOG(1.0+KC_EXP(g*x+sh)),1e-30);
return y*(KC_LOG(logex)+KC_LOG(dt)) - dt*logex - KC_GAMMALN(y+1.0);
}
//sums up log likelihood of each trial given model parameters
__global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < 1) {
log_p[0] = 0;
for(int ii = 0; ii < NT; ii++) {
log_p[0] += log_p_tr[ii];
}
}
}
//averages log likelihood of each simulated path
// (one thread for each trial)
__global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
log_p_tr[idx] = 0;
KC_FP_TYPE trSum = 0;
KC_FP_TYPE log_x = 0;
log_p_tr[idx] = KC_SQRT(-1.0);
//computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial
// does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods
for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) {
trSum = 1 ;
log_x = log_p[ii*NT+idx];
for(int kk = 0; kk < ii; kk++) {
trSum += KC_EXP(log_p[kk*NT+idx] - log_x);
}
for(int kk = ii+1; kk < nSims; kk++) {
trSum += KC_EXP(log_p[kk*NT+idx] - log_x);
}
if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) {
log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum);
break;
}
}
}
}
//simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood
__global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT ) {
int trNum = idx;
int T1 = trIdx[trNum];
//xx contains zero mean Gaussian noise of variance \omega^2
xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial
int currIdx = sim*(NT)+idx;
log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1]);
for(int ii = T1+1; ii < trIdx[trNum+1];ii++) {
//progates particle forward in time
xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0);
//log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii]
log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii]);
}
}
}
//Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters
// This estimation is made by Monte Carlo simulations from the model to integrate out latent variable
//args
// 0 = y (observations)
// 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB)
// 3 = spike history effect (same size as y)
// 4 = beta values
// 5 = w (variance of diffusion process)
// 6 = l_0 (starting lambda value)
// 7 = g (absorbing boundary effective height)
// 8 = dt (bin size in seconds)
// 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000)
//outputs (left-hand side)
// 0 = log p(y|\theta)
// 1 = log p(y|\theta) for each individual trial
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
hipError_t ce;
//load up trial data
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * y = kcGetArrayData(prhs[0]);
int * trIdx = kcGetArrayDataInt(prhs[1]);
unsigned int NT = kcGetArrayNumEl(prhs[1])-1;
int * betaIdx = kcGetArrayDataInt(prhs[2],TT);
// load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[3]);
//how many simulations to use to estimate log p(y|\theta)
int trialsToSim = (int)mxGetScalar(prhs[9]);
//load up parameters to simulate model
if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) {
mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!");
}
KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]);
int numBetas = mxGetNumberOfElements(prhs[4]);
KC_FP_TYPE * b_gpu;
ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas);
if(ce != hipSuccess) {
mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
checkCudaErrors(hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice));
KC_FP_TYPE w = mxGetScalar(prhs[5]);
KC_FP_TYPE l_0 = mxGetScalar(prhs[6]);
KC_FP_TYPE g = mxGetScalar(prhs[7]);
KC_FP_TYPE dt = mxGetScalar(prhs[8]);
//setup CUDA variables + random number generator
int randSize = TT + (((TT)%2==0)?0:1);
KC_FP_TYPE * xx;
checkCudaErrors(hipMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE)));
hiprandGenerator_t curandGen = 0;
hiprandStatus_t hiprandStatus_t;
hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-1 error %d\n",(int)hiprandStatus_t);
mexErrMsgTxt("CUDA errors");
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-2 error %d\n",(int)hiprandStatus_t);
mexErrMsgTxt("CUDA errors");
}
int blockSize = 2;
int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1);
int blockSizeT = 2;
int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1);
//allocates sspace on GPU for simulating the likelihood
KC_FP_TYPE * log_p;
//KC_FP_TYPE * log_p_2;
KC_FP_TYPE * log_p_tr;
KC_FP_TYPE * sum_log_p;
checkCudaErrors(hipMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim));
//checkCudaErrors(hipMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim));
checkCudaErrors(hipMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT));
checkCudaErrors(hipMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1));
// generate AR1 noise
for(int kk = 0; kk < trialsToSim; kk++) {
//generates zero mean Gaussian noise with correct variance
hiprandStatus_t = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w));
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) {
mexPrintf("CURAND gen error %d\n",(int)hiprandStatus_t);
mexErrMsgTxt("CUDA errors");
}
//checkCudaErrors(hipDeviceSynchronize());
//calculate path + logP
hipLaunchKernelGGL(( kcSimGBPaths), dim3(nBlocks),dim3(blockSize), 0, 0, y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error in simulating of kcSimGaussianBound.cu ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA errors");
}
}
// log_p_2 = log_p;
//average likelihood of each sampled path to get log p(y|\theta) for each trial
hipLaunchKernelGGL(( kcSumGBlogpTr), dim3(nBlocksT),dim3(blockSizeT), 0, 0, log_p,log_p_tr,NT,trialsToSim);
checkCudaErrors(hipDeviceSynchronize());
//sums up log likelihood of each trial
hipLaunchKernelGGL(( kcSumGBfinal), dim3(1),dim3(1), 0, 0, log_p_tr,sum_log_p,NT);
checkCudaErrors(hipDeviceSynchronize());
//copy back to host
if(nlhs > 0) {
plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost));
}
if(nlhs > 1) {
plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost));
}
//free up CUDA variables
checkCudaErrors(hiprandDestroyGenerator(curandGen));
checkCudaErrors(hipFree(xx));
checkCudaErrors(hipFree(b_gpu));
checkCudaErrors(hipFree(log_p));
checkCudaErrors(hipFree(log_p_tr));
checkCudaErrors(hipFree(sum_log_p));
}
| 618355ab4710a2de3ae3028240f2ab9dab2fbccf.cu | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include "cublas_v2.h"
#include <curand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
//poison log likelihood for one observation
__device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh) {
KC_FP_TYPE logex = KC_MAX(((g*x+sh)>80)?(g*x+sh):KC_LOG(1.0+KC_EXP(g*x+sh)),1e-30);
return y*(KC_LOG(logex)+KC_LOG(dt)) - dt*logex - KC_GAMMALN(y+1.0);
}
//sums up log likelihood of each trial given model parameters
__global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < 1) {
log_p[0] = 0;
for(int ii = 0; ii < NT; ii++) {
log_p[0] += log_p_tr[ii];
}
}
}
//averages log likelihood of each simulated path
// (one thread for each trial)
__global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
log_p_tr[idx] = 0;
KC_FP_TYPE trSum = 0;
KC_FP_TYPE log_x = 0;
log_p_tr[idx] = KC_SQRT(-1.0);
//computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial
// does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods
for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) {
trSum = 1 ;
log_x = log_p[ii*NT+idx];
for(int kk = 0; kk < ii; kk++) {
trSum += KC_EXP(log_p[kk*NT+idx] - log_x);
}
for(int kk = ii+1; kk < nSims; kk++) {
trSum += KC_EXP(log_p[kk*NT+idx] - log_x);
}
if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) {
log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum);
break;
}
}
}
}
//simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood
__global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT ) {
int trNum = idx;
int T1 = trIdx[trNum];
//xx contains zero mean Gaussian noise of variance \omega^2
xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial
int currIdx = sim*(NT)+idx;
log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1]);
for(int ii = T1+1; ii < trIdx[trNum+1];ii++) {
//progates particle forward in time
xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0);
//log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii]
log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii]);
}
}
}
//Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters
// This estimation is made by Monte Carlo simulations from the model to integrate out latent variable
//args
// 0 = y (observations)
// 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB)
// 3 = spike history effect (same size as y)
// 4 = beta values
// 5 = w (variance of diffusion process)
// 6 = l_0 (starting lambda value)
// 7 = g (absorbing boundary effective height)
// 8 = dt (bin size in seconds)
// 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000)
//outputs (left-hand side)
// 0 = log p(y|\theta)
// 1 = log p(y|\theta) for each individual trial
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
cudaError_t ce;
//load up trial data
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * y = kcGetArrayData(prhs[0]);
int * trIdx = kcGetArrayDataInt(prhs[1]);
unsigned int NT = kcGetArrayNumEl(prhs[1])-1;
int * betaIdx = kcGetArrayDataInt(prhs[2],TT);
// load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[3]);
//how many simulations to use to estimate log p(y|\theta)
int trialsToSim = (int)mxGetScalar(prhs[9]);
//load up parameters to simulate model
if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) {
mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!");
}
KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]);
int numBetas = mxGetNumberOfElements(prhs[4]);
KC_FP_TYPE * b_gpu;
ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas);
if(ce != cudaSuccess) {
mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
checkCudaErrors(cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice));
KC_FP_TYPE w = mxGetScalar(prhs[5]);
KC_FP_TYPE l_0 = mxGetScalar(prhs[6]);
KC_FP_TYPE g = mxGetScalar(prhs[7]);
KC_FP_TYPE dt = mxGetScalar(prhs[8]);
//setup CUDA variables + random number generator
int randSize = TT + (((TT)%2==0)?0:1);
KC_FP_TYPE * xx;
checkCudaErrors(cudaMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE)));
curandGenerator_t curandGen = 0;
curandStatus_t curandStatus;
curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT);
if(curandStatus != CURAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-1 error %d\n",(int)curandStatus);
mexErrMsgTxt("CUDA errors");
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
if(curandStatus != CURAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-2 error %d\n",(int)curandStatus);
mexErrMsgTxt("CUDA errors");
}
int blockSize = 2;
int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1);
int blockSizeT = 2;
int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1);
//allocates sspace on GPU for simulating the likelihood
KC_FP_TYPE * log_p;
//KC_FP_TYPE * log_p_2;
KC_FP_TYPE * log_p_tr;
KC_FP_TYPE * sum_log_p;
checkCudaErrors(cudaMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim));
//checkCudaErrors(cudaMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim));
checkCudaErrors(cudaMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT));
checkCudaErrors(cudaMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1));
// generate AR1 noise
for(int kk = 0; kk < trialsToSim; kk++) {
//generates zero mean Gaussian noise with correct variance
curandStatus = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w));
if(curandStatus != CURAND_STATUS_SUCCESS ) {
mexPrintf("CURAND gen error %d\n",(int)curandStatus);
mexErrMsgTxt("CUDA errors");
}
//checkCudaErrors(cudaDeviceSynchronize());
//calculate path + logP
kcSimGBPaths<<<nBlocks,blockSize>>>(y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error in simulating of kcSimGaussianBound.cu ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA errors");
}
}
// log_p_2 = log_p;
//average likelihood of each sampled path to get log p(y|\theta) for each trial
kcSumGBlogpTr<<<nBlocksT,blockSizeT>>>(log_p,log_p_tr,NT,trialsToSim);
checkCudaErrors(cudaDeviceSynchronize());
//sums up log likelihood of each trial
kcSumGBfinal<<<1,1>>>(log_p_tr,sum_log_p,NT);
checkCudaErrors(cudaDeviceSynchronize());
//copy back to host
if(nlhs > 0) {
plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost));
}
if(nlhs > 1) {
plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost));
}
//free up CUDA variables
checkCudaErrors(curandDestroyGenerator(curandGen));
checkCudaErrors(cudaFree(xx));
checkCudaErrors(cudaFree(b_gpu));
checkCudaErrors(cudaFree(log_p));
checkCudaErrors(cudaFree(log_p_tr));
checkCudaErrors(cudaFree(sum_log_p));
}
|
3329f852afe51b2a4cace70be8e2ff5cf7f22132.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "NormalDistributionsTransform.h"
#include "debug.h"
#include <cmath>
namespace gpu {
void GNormalDistributionsTransform::setInputTarget(float *target_x, float *target_y, float *target_z, int points_number)
{
GRegistration::setInputTarget(target_x, target_y, target_z, points_number);
if (points_number != 0)
voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_);
}
void GNormalDistributionsTransform::computeTransformation(Eigen::Matrix<float, 4, 4> &guess)
{
nr_iterations_ = 0;
converged_ = false;
double gauss_c1, gauss_c2, gauss_d3;
gauss_c1 = 10 * ( 1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow(resolution_, 3);
gauss_d3 = - log(gauss_c2);
gauss_d1_ = -log(gauss_c1 + gauss_c2) - gauss_d3;
gauss_d2_ = -2 * log((-log(gauss_c1 * exp(-0.5) + gauss_c2) - gauss_d3) / gauss_d1_);
if (guess != Eigen::Matrix4f::Identity()) {
final_transformation_ = guess;
transformPointCloud(x_, y_, z_, trans_x_, trans_y_, trans_z_, points_number_, guess);
}
Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation;
eig_transformation.matrix() = final_transformation_;
Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient;
Eigen::Vector3f init_translation = eig_transformation.translation();
Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2);
p << init_translation(0), init_translation(1), init_translation(2), init_rotation(0), init_rotation(1), init_rotation(2);
Eigen::Matrix<double, 6, 6> hessian;
double score = 0;
double delta_p_norm;
score = computeDerivatives(score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_, p);
while (!converged_) {
previous_transformation_ = transformation_;
Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(hessian, Eigen::ComputeFullU | Eigen::ComputeFullV);
delta_p = sv.solve(-score_gradient);
delta_p_norm = delta_p.norm();
if (delta_p_norm == 0 || delta_p_norm != delta_p_norm) {
trans_probability_ = score / static_cast<double>(points_number_);
converged_ = delta_p_norm == delta_p_norm;
return;
}
delta_p.normalize();
delta_p_norm = computeStepLengthMT(p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_);
delta_p *= delta_p_norm;
transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(delta_p(0)), static_cast<float>(delta_p(1)), static_cast<float>(delta_p(2))) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(5)), Eigen::Vector3f::UnitZ())).matrix();
p = p + delta_p;
//Not update visualizer
if (nr_iterations_ > max_iterations_ || (nr_iterations_ && (::fabs(delta_p_norm) < transformation_epsilon_)))
converged_ = true;
nr_iterations_++;
}
trans_probability_ = score / static_cast<double>(points_number_);
}
extern "C" __global__ void matrixListInit(MatrixDevice *matrix, double *matrix_buff, int matrix_num, int rows, int cols)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < matrix_num; i += stride) {
matrix[i].setRows(rows);
matrix[i].setCols(cols);
matrix[i].setOffset(matrix_num);
matrix[i].setBuffer(matrix_buff + i);
for (int j = 0; j < rows; j++) {
for (int k = 0; k < cols; k++) {
matrix[i](j, k) = 0;
}
}
}
}
extern "C" __global__ void computePointDerivatives(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
MatrixDevice j_ang_a, MatrixDevice j_ang_b, MatrixDevice j_ang_c, MatrixDevice j_ang_d,
MatrixDevice j_ang_e, MatrixDevice j_ang_f, MatrixDevice j_ang_g, MatrixDevice j_ang_h,
MatrixDevice h_ang_a2, MatrixDevice h_ang_a3, MatrixDevice h_ang_b2, MatrixDevice h_ang_b3, MatrixDevice h_ang_c2,
MatrixDevice h_ang_c3, MatrixDevice h_ang_d1, MatrixDevice h_ang_d2, MatrixDevice h_ang_d3, MatrixDevice h_ang_e1,
MatrixDevice h_ang_e2, MatrixDevice h_ang_e3, MatrixDevice h_ang_f1, MatrixDevice h_ang_f2, MatrixDevice h_ang_f3,
MatrixDevice *point_gradients, MatrixDevice *point_hessians, bool compute_hessian)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
float o_x = x[pid];
float o_y = y[pid];
float o_z = z[pid];
MatrixDevice pg = point_gradients[i]; //3x6 Matrix
//Compute point derivatives
pg(1, 3) = o_x * j_ang_a(0) + o_y * j_ang_a(1) + o_z * j_ang_a(2);
pg(2, 3) = o_x * j_ang_b(0) + o_y * j_ang_b(1) + o_z * j_ang_b(2);
pg(0, 4) = o_x * j_ang_c(0) + o_y * j_ang_c(1) + o_z * j_ang_c(2);
pg(1, 4) = o_x * j_ang_d(0) + o_y * j_ang_d(1) + o_z * j_ang_d(2);
pg(2, 4) = o_x * j_ang_e(0) + o_y * j_ang_e(1) + o_z * j_ang_e(2);
pg(0, 5) = o_x * j_ang_f(0) + o_y * j_ang_f(1) + o_z * j_ang_f(2);
pg(1, 5) = o_x * j_ang_g(0) + o_y * j_ang_g(1) + o_z * j_ang_g(2);
pg(2, 5) = o_x * j_ang_h(0) + o_y * j_ang_h(1) + o_z * j_ang_h(2);
if (compute_hessian) {
MatrixDevice ph = point_hessians[i]; //18x6 Matrix
ph(9, 3) = 0;
ph(10, 3) = o_x * h_ang_a2(0) + o_y * h_ang_a2(1) + o_z * h_ang_a2(2);
ph(11, 3) = o_x * h_ang_a3(0) + o_y * h_ang_a3(1) + o_z * h_ang_a3(2);
ph(12, 3) = ph(9, 4) = 0;
ph(13, 3) = ph(10, 4) = o_x * h_ang_b2(0) + o_y * h_ang_b2(1) + o_z * h_ang_b2(2);
ph(14, 3) = ph(11, 4) = o_x * h_ang_b3(0) + o_y * h_ang_b3(1) + o_z * h_ang_b3(2);
ph(15, 3) = 0;
ph(16, 3) = ph(9, 5) = o_x * h_ang_c2(0) + o_y * h_ang_c2(1) + o_z * h_ang_c2(2);
ph(17, 3) = ph(10, 5) = o_x * h_ang_c3(0) + o_y * h_ang_c3(1) + o_z * h_ang_c3(2);
ph(12, 4) = o_x * h_ang_d1(0) + o_y * h_ang_d1(1) + o_z * h_ang_d1(2);
ph(13, 4) = o_x * h_ang_d2(0) + o_y * h_ang_d2(1) + o_z * h_ang_d2(2);
ph(14, 4) = o_x * h_ang_d3(0) + o_y * h_ang_d3(1) + o_z * h_ang_d3(2);
ph(15, 4) = ph(12, 5) = o_x * h_ang_e1(0) + o_y * h_ang_e1(1) + o_z * h_ang_e1(2);
ph(16, 4) = ph(13, 5) = o_x * h_ang_e2(0) + o_y * h_ang_e2(1) + o_z * h_ang_e2(2);
ph(17, 4) = ph(14, 5) = o_x * h_ang_e3(0) + o_y * h_ang_e3(1) + o_z * h_ang_e3(2);
ph(15, 5) = o_x * h_ang_f1(0) + o_y * h_ang_f1(1) + o_z * h_ang_f1(2);
ph(16, 5) = o_x * h_ang_f2(0) + o_y * h_ang_f2(1) + o_z * h_ang_f2(2);
ph(17, 5) = o_x * h_ang_f3(0) + o_y * h_ang_f3(1) + o_z * h_ang_f3(2);
}
}
}
extern "C" __global__ void computeDerivative(float *trans_x, float *trans_y, float *trans_z, int points_num,
int *valid_points, int *voxel_id, int valid_points_num,
GVoxel *grid, double gauss_d1, double gauss_d2,
MatrixDevice *point_gradients, MatrixDevice *point_hessians,
MatrixDevice *score_gradients, MatrixDevice *hessians,
double *score, bool compute_hessian)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Transformed coordinates
float t_x = trans_x[pid];
float t_y = trans_y[pid];
float t_z = trans_z[pid];
MatrixDevice pg = point_gradients[i]; //3x6 Matrix
MatrixDevice sg = score_gradients[i]; //6x1 Matrix
double score_inc = 0;
for ( int vid = voxel_id[i]; vid < voxel_id[i + 1]; vid++) {
GVoxel *voxel = grid + vid;
MatrixDevice centroid = voxel->centroid();
MatrixDevice icov = voxel->inverseCovariance(); //3x3 matrix
double cov_dxd_pi_x, cov_dxd_pi_y, cov_dxd_pi_z;
t_x -= centroid(0);
t_y -= centroid(1);
t_z -= centroid(2);
double e_x_cov_x = expf(-gauss_d2 * ((t_x * icov(0, 0) + t_y * icov(1, 0) + t_z * icov(2, 0)) * t_x
+ ((t_x * icov(0, 1) + t_y * icov(1, 1) + t_z * icov(2, 1)) * t_y)
+ ((t_x * icov(0, 2) + t_y * icov(1, 2) + t_z * icov(2, 2)) * t_z)) / 2);
score_inc += -gauss_d1 * e_x_cov_x;
e_x_cov_x *= gauss_d2;
e_x_cov_x *= gauss_d1;
for (int n = 0; n < 6; n++) {
cov_dxd_pi_x = icov(0, 0) * pg(0, n) + icov(0, 1) * pg(1, n) + icov(0, 2) * pg(2, n);
cov_dxd_pi_y = icov(1, 0) * pg(0, n) + icov(1, 1) * pg(1, n) + icov(1, 2) * pg(2, n);
cov_dxd_pi_z = icov(2, 0) * pg(0, n) + icov(2, 1) * pg(1, n) + icov(2, 2) * pg(2, n);
sg(n) += (t_x * cov_dxd_pi_x + t_y * cov_dxd_pi_y + t_z * cov_dxd_pi_z) * e_x_cov_x;
//Compute hessian
if (compute_hessian) {
MatrixDevice ph = point_hessians[i]; //18x6 Matrix
MatrixDevice h = hessians[i]; //6x6 Matrix
for (int p = 0; p < h.cols(); p++) {
h(n, p) += e_x_cov_x * (-gauss_d2 * (t_x * cov_dxd_pi_x + t_y * cov_dxd_pi_y + t_z * cov_dxd_pi_z) *
(t_x * (icov(0, 0) * pg(0, p) + icov(0, 1) * pg(1, p) + icov(0, 2) * pg(2, p))
+ t_y * (icov(1, 0) * pg(0, p) + icov(1, 1) * pg(1, p) + icov(1, 2) * pg(2, p))
+ t_z * (icov(2, 0) * pg(0, p) + icov(2, 1) * pg(1, p) + icov(2, 2) * pg(2, p)))
+ (t_x * (icov(0, 0) * ph(3 * n, p) + icov(0, 1) * ph(3 * n + 1, p) + icov(0, 2) * ph(3 * n + 2, p))
+ t_y * (icov(1, 0) * ph(3 * n, p) + icov(1, 1) * ph(3 * n + 1, p) + icov(1, 2) * ph(3 * n + 2, p))
+ t_z * (icov(2, 0) * ph(3 * n, p) + icov(2, 1) * ph(3 * n + 1, p) + icov(2, 2) * ph(3 * n + 2, p)))
+ (pg(0, p) * cov_dxd_pi_x + pg(1, p) * cov_dxd_pi_y + pg(2, p) * cov_dxd_pi_z));
}
}
}
}
score[i] = score_inc;
}
}
/* Compute sum of a list of a matrixes */
extern "C" __global__ void matrixSum(MatrixDevice *matrix_list, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
MatrixDevice left = matrix_list[i];
MatrixDevice right = (i + half_size < full_size) ? matrix_list[i + half_size] : MatrixDevice();
if (!right.isEmpty()) {
for (int j = 0; j < left.rows(); j++) {
for (int k = 0; k < left.cols(); k++) {
left(j, k) += right(j, k);
}
}
}
}
}
extern "C" __global__ void sumScore(double *score, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
score[i] += (i + half_size < full_size) ? score[i + half_size] : 0;
}
}
double GNormalDistributionsTransform::computeDerivatives(Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
float *trans_x, float *trans_y, float *trans_z,
int points_num, Eigen::Matrix<double, 6, 1> pose, bool compute_hessian)
{
MatrixHost p(6, 1);
for (int i = 0; i < 6; i++) {
p(i) = pose(i, 0);
}
//Compute Angle Derivatives
computeAngleDerivatives(p, compute_hessian);
//Radius Search
voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX);
int *valid_points = voxel_grid_.getValidPoints();
int *voxel_id = voxel_grid_.getVoxelIds();
int search_size = voxel_grid_.getSearchResultSize();
int valid_points_num = voxel_grid_.getValidPointsNum();
GVoxel *voxel_list = voxel_grid_.getVoxelList();
int voxel_num = voxel_grid_.getVoxelNum();
float max_x = voxel_grid_.getMaxX();
float max_y = voxel_grid_.getMaxY();
float max_z = voxel_grid_.getMaxZ();
float min_x = voxel_grid_.getMinX();
float min_y = voxel_grid_.getMinY();
float min_z = voxel_grid_.getMinZ();
float voxel_x = voxel_grid_.getVoxelX();
float voxel_y = voxel_grid_.getVoxelY();
float voxel_z = voxel_grid_.getVoxelZ();
int max_b_x = voxel_grid_.getMaxBX();
int max_b_y = voxel_grid_.getMaxBY();
int max_b_z = voxel_grid_.getMaxBZ();
int min_b_x = voxel_grid_.getMinBX();
int min_b_y = voxel_grid_.getMinBY();
int min_b_z = voxel_grid_.getMinBZ();
//Update score gradient and hessian matrix
MatrixDevice *gradients_list, *hessians_list, *points_gradient, *points_hessian;
checkCudaErrors(hipMalloc(&gradients_list, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(hipMalloc(&hessians_list, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(hipMalloc(&points_gradient, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(hipMalloc(&points_hessian, sizeof(MatrixDevice) * valid_points_num));
double *gradient_buff, *hessian_buff, *points_gradient_buff, *points_hessian_buff, *score;
checkCudaErrors(hipMalloc(&gradient_buff, sizeof(double) * valid_points_num * 6));
checkCudaErrors(hipMalloc(&hessian_buff, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(hipMalloc(&points_gradient_buff, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(hipMalloc(&points_hessian_buff, sizeof(double) * valid_points_num * 18 * 6));
checkCudaErrors(hipMalloc(&score, sizeof(double) * valid_points_num));
int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num;
int grid_x = (valid_points_num - 1) / block_x + 1;
hipLaunchKernelGGL(( matrixListInit), dim3(grid_x), dim3(block_x), 0, 0, gradients_list, gradient_buff, valid_points_num, 1, 6);
hipLaunchKernelGGL(( matrixListInit), dim3(grid_x), dim3(block_x), 0, 0, hessians_list, hessian_buff, valid_points_num, 6, 6);
hipLaunchKernelGGL(( matrixListInit), dim3(grid_x), dim3(block_x), 0, 0, points_gradient, points_gradient_buff, valid_points_num, 3, 6);
hipLaunchKernelGGL(( matrixListInit), dim3(grid_x), dim3(block_x), 0, 0, points_hessian, points_hessian_buff, valid_points_num, 18, 6);
hipLaunchKernelGGL(( computePointDerivatives), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dj_ang_a_, dj_ang_b_, dj_ang_c_, dj_ang_d_,
dj_ang_e_, dj_ang_f_, dj_ang_g_, dj_ang_h_,
dh_ang_a2_, dh_ang_a3_, dh_ang_b2_, dh_ang_b3_, dh_ang_c2_,
dh_ang_c3_, dh_ang_d1_, dh_ang_d2_, dh_ang_d3_, dh_ang_e1_,
dh_ang_e2_, dh_ang_e3_, dh_ang_f1_, dh_ang_f2_, dh_ang_f3_,
points_gradient, points_hessian, compute_hessian);
hipLaunchKernelGGL(( computeDerivative), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, points_num,
valid_points, voxel_id, valid_points_num,
voxel_list, gauss_d1_, gauss_d2_,
points_gradient, points_hessian,
gradients_list, hessians_list,
score, compute_hessian);
checkCudaErrors(hipDeviceSynchronize());
int full_size = valid_points_num;
int half_size = (full_size - 1) / 2 + 1;
while (full_size > 1) {
block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
grid_x = (half_size - 1) / block_x + 1;
hipLaunchKernelGGL(( matrixSum), dim3(grid_x), dim3(block_x), 0, 0, gradients_list, full_size, half_size);
hipLaunchKernelGGL(( matrixSum), dim3(grid_x), dim3(block_x), 0, 0, hessians_list, full_size, half_size);
hipLaunchKernelGGL(( sumScore), dim3(grid_x), dim3(block_x), 0, 0, score, full_size, half_size);
full_size = half_size;
half_size = (full_size - 1) / 2 + 1;
}
checkCudaErrors(hipDeviceSynchronize());
MatrixDevice dscore_g(6, 1), dhessian(6, 6);
MatrixHost hscore_g(6, 1), hhessian(6, 6);
checkCudaErrors(hipMemcpy(&dscore_g, gradients_list, sizeof(MatrixDevice), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&dhessian, hessians_list, sizeof(MatrixDevice), hipMemcpyDeviceToHost));
hscore_g.moveToHost(dscore_g);
hhessian.moveToHost(dhessian);
for (int i = 0; i < 6; i++) {
score_gradient(i, 0) = hscore_g(i, 0);
}
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
hessian(i, j) = hhessian(i, j);
}
}
double score_inc;
checkCudaErrors(hipMemcpy(&score_inc, score, sizeof(double), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(gradients_list));
checkCudaErrors(hipFree(hessians_list));
checkCudaErrors(hipFree(points_gradient));
checkCudaErrors(hipFree(points_hessian));
checkCudaErrors(hipFree(gradient_buff));
checkCudaErrors(hipFree(hessian_buff));
checkCudaErrors(hipFree(points_hessian_buff));
checkCudaErrors(hipFree(points_gradient_buff));
checkCudaErrors(hipFree(score));
return score_inc;
}
void GNormalDistributionsTransform::computeAngleDerivatives(MatrixHost pose, bool compute_hessian)
{
double cx, cy, cz, sx, sy, sz;
if (fabs(pose(3)) < 10e-5) {
cx = 1.0;
sx = 0.0;
} else {
cx = cos(pose(3));
sx = sin(pose(3));
}
if (fabs(pose(4)) < 10e-5) {
cy = 1.0;
sy = 0.0;
} else {
cy = cos(pose(4));
sy = sin(pose(4));
}
if (fabs(pose(5)) < 10e-5) {
cz = cos(pose(5));
sz = sin(pose(5));
}
j_ang_a_ = MatrixHost(3, 1);
j_ang_b_ = MatrixHost(3, 1);
j_ang_c_ = MatrixHost(3, 1);
j_ang_d_ = MatrixHost(3, 1);
j_ang_e_ = MatrixHost(3, 1);
j_ang_f_ = MatrixHost(3, 1);
j_ang_g_ = MatrixHost(3, 1);
j_ang_h_ = MatrixHost(3, 1);
j_ang_a_(0) = -sx * sz + cx * sy * cz;
j_ang_a_(1) = -sx * cz - cx * sy * sz;
j_ang_a_(2) = -cx * cy;
j_ang_b_(0) = cx * sz + sx * sy * cz;
j_ang_b_(1) = cx * cz - sx * sy * sz;
j_ang_b_(2) = -sx * cy;
j_ang_c_(0) = -sy * cz;
j_ang_c_(1) = sy * sz;
j_ang_c_(2) = cy;
j_ang_d_(0) = sx * cy * cz;
j_ang_d_(1) = -sx * cy * sz;
j_ang_d_(2) = sx * sy;
j_ang_e_(0) = -cx * cy * cz;
j_ang_e_(1) = cx * cy * sz;
j_ang_e_(2) = -cx * sy;
j_ang_f_(0) = -cy * sz;
j_ang_f_(1) = -cy * cz;
j_ang_f_(2) = 0;
j_ang_g_(0) = cx * cz - sx * sy * sz;
j_ang_g_(1) = -cx * sz - sx * sy * cz;
j_ang_g_(2) = 0;
j_ang_h_(0) = sx * cz + cx * sy * sz;
j_ang_h_(1) = cx * sy * cz - sx * sz;
j_ang_h_(2) = 0;
dj_ang_a_ = MatrixDevice(3, 1);
dj_ang_b_ = MatrixDevice(3, 1);
dj_ang_c_ = MatrixDevice(3, 1);
dj_ang_d_ = MatrixDevice(3, 1);
dj_ang_e_ = MatrixDevice(3, 1);
dj_ang_f_ = MatrixDevice(3, 1);
dj_ang_g_ = MatrixDevice(3, 1);
dj_ang_h_ = MatrixDevice(3, 1);
j_ang_a_.moveToGpu(dj_ang_a_);
j_ang_b_.moveToGpu(dj_ang_b_);
j_ang_c_.moveToGpu(dj_ang_c_);
j_ang_d_.moveToGpu(dj_ang_d_);
j_ang_e_.moveToGpu(dj_ang_e_);
j_ang_f_.moveToGpu(dj_ang_f_);
j_ang_g_.moveToGpu(dj_ang_g_);
j_ang_h_.moveToGpu(dj_ang_h_);
if (compute_hessian) {
h_ang_a2_ = MatrixHost(3, 1);
h_ang_a3_ = MatrixHost(3, 1);
h_ang_b2_ = MatrixHost(3, 1);
h_ang_b3_ = MatrixHost(3, 1);
h_ang_c2_ = MatrixHost(3, 1);
h_ang_c3_ = MatrixHost(3, 1);
h_ang_d1_ = MatrixHost(3, 1);
h_ang_d2_ = MatrixHost(3, 1);
h_ang_d3_ = MatrixHost(3, 1);
h_ang_e1_ = MatrixHost(3, 1);
h_ang_e2_ = MatrixHost(3, 1);
h_ang_e3_ = MatrixHost(3, 1);
h_ang_f1_ = MatrixHost(3, 1);
h_ang_f2_ = MatrixHost(3, 1);
h_ang_f3_ = MatrixHost(3, 1);
h_ang_a2_(0) = -cx * sz - sx * sy * cz;
h_ang_a2_(1) = -cx * cz + sx * sy * sz;
h_ang_a2_(2) = sx * cy;
h_ang_a3_(0) = -sx * sz + cx * sy * cz;
h_ang_a3_(1) = -cx * sy * sz - sx * cz;
h_ang_a3_(2) = -cx * cy;
h_ang_b2_(0) = cx * cy * cz;
h_ang_b2_(1) = -cx * cy * sz;
h_ang_b2_(2) = cx * sy;
h_ang_b3_(0) = sx * cy * cz;
h_ang_b3_(1) = -sx * cy * sz;
h_ang_b3_(2) = sx * sy;
h_ang_c2_(0) = -sx * cz - cx * sy * sz;
h_ang_c2_(1) = sx * sz - cx * sy * cz;
h_ang_c2_(2) = 0;
h_ang_c3_(0) = cx * cz - sx * sy * sz;
h_ang_c3_(1) = -sx * sy * cz - cx * sz;
h_ang_c3_(2) = 0;
h_ang_d1_(0) = -cy * cz;
h_ang_d1_(1) = cy * sz;
h_ang_d1_(2) = sy;
h_ang_d2_(0) = -sx * sy * cz;
h_ang_d2_(1) = sx * sy * sz;
h_ang_d2_(2) = sx * cy;
h_ang_d3_(0) = cx * sy * cz;
h_ang_d3_(1) = -cx * sy * sz;
h_ang_d3_(2) = -cx * cy;
h_ang_e1_(0) = sy * sz;
h_ang_e1_(1) = sy * cz;
h_ang_e1_(3) = 0;
h_ang_e2_(0) = -sx * cy * sz;
h_ang_e2_(1) = -sx * cy * cz;
h_ang_e2_(2) = 0;
h_ang_e3_(0) = cx * cy * sz;
h_ang_e3_(1) = cx * cy * cz;
h_ang_e3_(2) = 0;
h_ang_f1_(0) = -cy * cz;
h_ang_f1_(1) = cy * sz;
h_ang_f1_(2) = 0;
h_ang_f2_(0) = -cx * sz - sx * sy * cz;
h_ang_f2_(1) = -cx * cz + sx * sy * sz;
h_ang_f2_(2) = 0;
h_ang_f3_(0) = -sx * sz + cx * sy * cz;
h_ang_f3_(1) = -cx * sy * sz - sx * cz;
h_ang_f3_(2) = 0;
dh_ang_a2_ = MatrixDevice(3, 1);
dh_ang_a3_ = MatrixDevice(3, 1);
dh_ang_b2_ = MatrixDevice(3, 1);
dh_ang_b3_ = MatrixDevice(3, 1);
dh_ang_c2_ = MatrixDevice(3, 1);
dh_ang_c3_ = MatrixDevice(3, 1);
dh_ang_d1_ = MatrixDevice(3, 1);
dh_ang_d2_ = MatrixDevice(3, 1);
dh_ang_d3_ = MatrixDevice(3, 1);
dh_ang_e1_ = MatrixDevice(3, 1);
dh_ang_e2_ = MatrixDevice(3, 1);
dh_ang_e3_ = MatrixDevice(3, 1);
dh_ang_f1_ = MatrixDevice(3, 1);
dh_ang_f2_ = MatrixDevice(3, 1);
dh_ang_f3_ = MatrixDevice(3, 1);
h_ang_a2_.moveToGpu(dh_ang_a2_);
h_ang_a3_.moveToGpu(dh_ang_a3_);
h_ang_b2_.moveToGpu(dh_ang_b2_);
h_ang_b3_.moveToGpu(dh_ang_b3_);
h_ang_c2_.moveToGpu(dh_ang_c2_);
h_ang_c2_.moveToGpu(dh_ang_c3_);
h_ang_d1_.moveToGpu(dh_ang_d1_);
h_ang_d2_.moveToGpu(dh_ang_d2_);
h_ang_d3_.moveToGpu(dh_ang_d3_);
h_ang_e1_.moveToGpu(dh_ang_e1_);
h_ang_e2_.moveToGpu(dh_ang_e2_);
h_ang_e3_.moveToGpu(dh_ang_e3_);
h_ang_f1_.moveToGpu(dh_ang_f1_);
h_ang_f2_.moveToGpu(dh_ang_f2_);
h_ang_f3_.moveToGpu(dh_ang_f3_);
}
}
extern "C" __global__ void gpuTransform(float *in_x, float *in_y, float *in_z,
float *trans_x, float *trans_y, float *trans_z,
int point_num, MatrixDevice transform)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
float x, y, z;
for (int i = idx; i < point_num; i += stride) {
x = in_x[i];
y = in_y[i];
z = in_z[i];
trans_x[i] = transform(0, 0) * x + transform(0, 1) * y + transform(0, 2) * z + transform(0, 3);
trans_y[i] = transform(1, 0) * x + transform(1, 1) * y + transform(1, 2) * z + transform(1, 3);
trans_z[i] = transform(2, 0) * x + transform(2, 1) * y + transform(2, 2) * z + transform(2, 3);
}
}
void GNormalDistributionsTransform::transformPointCloud(float *in_x, float *in_y, float *in_z,
float *trans_x, float *trans_y, float *trans_z,
int points_number, Eigen::Matrix<float, 4, 4> transform)
{
Eigen::Transform<float, 3, Eigen::Affine> t(transform);
MatrixHost htrans(3, 4);
MatrixDevice dtrans(3, 4);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
htrans(i, j) = t(i, j);
}
}
htrans.moveToGpu(dtrans);
if (points_number > 0) {
int block_x = (points_number <= BLOCK_SIZE_X) ? points_number : BLOCK_SIZE_X;
int grid_x = (points_number - 1) / block_x + 1;
hipLaunchKernelGGL(( gpuTransform), dim3(grid_x), dim3(block_x) , 0, 0, in_x, in_y, in_z, trans_x, trans_y, trans_z, points_number, dtrans);
checkCudaErrors(hipDeviceSynchronize());
}
}
double GNormalDistributionsTransform::computeStepLengthMT(const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir,
double step_init, double step_max, double step_min, double &score,
Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
float *trans_x, float *trans_y, float *trans_z, int points_num)
{
double phi_0 = -score;
double d_phi_0 = -(score_gradient.dot(step_dir));
Eigen::Matrix<double, 6, 1> x_t;
if (d_phi_0 >= 0) {
if (d_phi_0 == 0)
return 0;
else {
d_phi_0 *= -1;
step_dir *= -1;
}
}
int max_step_iterations = 10;
int step_iterations = 0;
double mu = 1.e-4;
double nu = 0.9;
double a_l = 0, a_u = 0;
double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu);
double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu);
double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
bool interval_converged = (step_max - step_min) > 0, open_interval = true;
double a_t = step_init;
a_t = ::min(a_t, step_max);
a_t = ::max(a_t, step_min);
x_t = x + step_dir * a_t;
final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ())).matrix();
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_);
score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, true);
double phi_t = -score;
double d_phi_t = -(score_gradient.dot(step_dir));
double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 && d_phi_t <= -nu * d_phi_0)) {
if (open_interval) {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
} else {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
a_t = (a_t < step_max) ? a_t : step_max;
a_t = (a_t > step_min) ? a_t : step_min;
x_t = x + step_dir * a_t;
final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ())).matrix();
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_);
score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, false);
phi_t -= score;
d_phi_t -= (score_gradient.dot(step_dir));
psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) {
open_interval = false;
f_l += phi_0 - mu * d_phi_0 * a_l;
g_l += mu * d_phi_0;
f_u += phi_0 - mu * d_phi_0 * a_u;
g_u += mu * d_phi_0;
}
if (open_interval) {
interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
} else {
interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
step_iterations++;
}
if (step_iterations)
computeHessian(hessian, trans_x, trans_y, trans_z, points_num, x_t);
return a_t;
}
//Copied from ndt.hpp
double GNormalDistributionsTransform::trialValueSelectionMT (double a_l, double f_l, double g_l,
double a_u, double f_u, double g_u,
double a_t, double f_t, double g_t)
{
// Case 1 in Trial Value Selection [More, Thuente 1994]
if (f_t > f_l) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l
// Equation 2.4.2 [Sun, Yuan 2006]
double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t));
if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l))
return (a_c);
else
return (0.5 * (a_q + a_c));
}
// Case 2 in Trial Value Selection [More, Thuente 1994]
else if (g_t * g_l < 0) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t))
return (a_c);
else
return (a_s);
}
// Case 3 in Trial Value Selection [More, Thuente 1994]
else if (std::fabs (g_t) <= std::fabs (g_l)) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
double a_t_next;
if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t))
a_t_next = a_c;
else
a_t_next = a_s;
if (a_t > a_l)
return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next));
else
return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next));
}
// Case 4 in Trial Value Selection [More, Thuente 1994]
else {
// Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u;
double w = std::sqrt (z * z - g_t * g_u);
// Equation 2.4.56 [Sun, Yuan 2006]
return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w));
}
}
//Copied from ndt.hpp
double GNormalDistributionsTransform::updateIntervalMT (double &a_l, double &f_l, double &g_l,
double &a_u, double &f_u, double &g_u,
double a_t, double f_t, double g_t)
{
// Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994]
if (f_t > f_l) {
a_u = a_t;
f_u = f_t;
g_u = g_t;
return (false);
}
// Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994]
else if (g_t * (a_l - a_t) > 0) {
a_l = a_t;
f_l = f_t;
g_l = g_t;
return (false);
}
// Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994]
else if (g_t * (a_l - a_t) < 0) {
a_u = a_l;
f_u = f_l;
g_u = g_l;
a_l = a_t;
f_l = f_t;
g_l = g_t;
return (false);
}
// Interval Converged
else
return (true);
}
extern "C" __global__ void updateHessian(float *trans_x, float *trans_y, float *trans_z, int points_num,
int *valid_points, int *voxel_id, int valid_points_num,
GVoxel *grid, float gauss_d1, float gauss_d2,
MatrixDevice *point_gradients, MatrixDevice *point_hessians, MatrixDevice *hessians)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Transformed coordinates
float t_x = trans_x[pid];
float t_y = trans_y[pid];
float t_z = trans_z[pid];
MatrixDevice pg = point_gradients[i]; //3x6 Matrix
MatrixDevice ph = point_hessians[i]; //18x6 Matrix
MatrixDevice h = hessians[i]; //6x6 Matrix
double score_inc = 0;
for ( int vid = voxel_id[i]; vid < voxel_id[i + 1]; vid++) {
GVoxel *voxel = grid + vid;
MatrixDevice centroid = voxel->centroid();
MatrixDevice icov = voxel->inverseCovariance(); //3x3 matrix
float cov_dxd_pi_x, cov_dxd_pi_y, cov_dxd_pi_z;
t_x -= centroid(0);
t_y -= centroid(1);
t_z -= centroid(2);
double e_x_cov_x = expf(-gauss_d2 * ((t_x * icov(0, 0) + t_y * icov(1, 0) + t_z * icov(2, 0)) * t_x
+ ((t_x * icov(0, 1) + t_y * icov(1, 1) + t_z * icov(2, 1)) * t_y)
+ ((t_x * icov(0, 2) + t_y * icov(1, 2) + t_z * icov(2, 2)) * t_z)) / 2);
score_inc += -gauss_d1 * e_x_cov_x;
e_x_cov_x *= gauss_d2;
e_x_cov_x *= gauss_d1;
for (int n = 0; n < 6; n++) {
cov_dxd_pi_x = icov(0, 0) * pg(0, n) + icov(0, 1) * pg(1, n) + icov(0, 2) * pg(2, n);
cov_dxd_pi_y = icov(1, 0) * pg(0, n) + icov(1, 1) * pg(1, n) + icov(1, 2) * pg(2, n);
cov_dxd_pi_z = icov(2, 0) * pg(0, n) + icov(2, 1) * pg(1, n) + icov(2, 2) * pg(2, n);
//Compute hessian
for (int p = 0; p < h.cols(); p++) {
h(n, p) += e_x_cov_x * (-gauss_d2 * (t_x * cov_dxd_pi_x + t_y * cov_dxd_pi_y + t_z * cov_dxd_pi_z) *
(t_x * (icov(0, 0) * pg(0, p) + icov(0, 1) * pg(1, p) + icov(0, 2) * pg(2, p))
+ t_y * (icov(1, 0) * pg(0, p) + icov(1, 1) * pg(1, p) + icov(1, 2) * pg(2, p))
+ t_z * (icov(2, 0) * pg(0, p) + icov(2, 1) * pg(1, p) + icov(2, 2) * pg(2, p)))
+ (t_x * (icov(0, 0) * ph(3 * n, p) + icov(0, 1) * ph(3 * n + 1, p) + icov(0, 2) * ph(3 * n + 2, p))
+ t_y * (icov(1, 0) * ph(3 * n, p) + icov(1, 1) * ph(3 * n + 1, p) + icov(1, 2) * ph(3 * n + 2, p))
+ t_z * (icov(2, 0) * ph(3 * n, p) + icov(2, 1) * ph(3 * n + 1, p) + icov(2, 2) * ph(3 * n + 2, p)))
+ (pg(0, p) * cov_dxd_pi_x + pg(1, p) * cov_dxd_pi_y + pg(2, p) * cov_dxd_pi_z));
}
}
}
}
}
void GNormalDistributionsTransform::computeHessian (Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> &p)
{
//Radius Search
voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX);
int *valid_points = voxel_grid_.getValidPoints();
int *voxel_id = voxel_grid_.getVoxelIds();
int search_size = voxel_grid_.getSearchResultSize();
int valid_points_num = voxel_grid_.getValidPointsNum();
GVoxel *voxel_list = voxel_grid_.getVoxelList();
int voxel_num = voxel_grid_.getVoxelNum();
float max_x = voxel_grid_.getMaxX();
float max_y = voxel_grid_.getMaxY();
float max_z = voxel_grid_.getMaxZ();
float min_x = voxel_grid_.getMinX();
float min_y = voxel_grid_.getMinY();
float min_z = voxel_grid_.getMinZ();
float voxel_x = voxel_grid_.getVoxelX();
float voxel_y = voxel_grid_.getVoxelY();
float voxel_z = voxel_grid_.getVoxelZ();
int max_b_x = voxel_grid_.getMaxBX();
int max_b_y = voxel_grid_.getMaxBY();
int max_b_z = voxel_grid_.getMaxBZ();
int min_b_x = voxel_grid_.getMinBX();
int min_b_y = voxel_grid_.getMinBY();
int min_b_z = voxel_grid_.getMinBZ();
//Update score gradient and hessian matrix
MatrixDevice *hessians_list, *points_gradient, *points_hessian;
checkCudaErrors(hipMalloc(&hessians_list, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(hipMalloc(&points_gradient, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(hipMalloc(&points_hessian, sizeof(MatrixDevice) * valid_points_num));
double *hessian_buff, *points_gradient_buff, *points_hessian_buff;
checkCudaErrors(hipMalloc(&hessian_buff, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(hipMalloc(&points_gradient_buff, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(hipMalloc(&points_hessian_buff, sizeof(double) * valid_points_num * 18 * 6));
int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num;
int grid_x = (valid_points_num - 1) / block_x + 1;
hipLaunchKernelGGL(( matrixListInit), dim3(grid_x), dim3(block_x), 0, 0, hessians_list, hessian_buff, valid_points_num, 6, 6);
hipLaunchKernelGGL(( matrixListInit), dim3(grid_x), dim3(block_x), 0, 0, points_gradient, points_gradient_buff, valid_points_num, 3, 6);
hipLaunchKernelGGL(( matrixListInit), dim3(grid_x), dim3(block_x), 0, 0, points_hessian, points_hessian_buff, valid_points_num, 18, 6);
hipLaunchKernelGGL(( computePointDerivatives), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dj_ang_a_, dj_ang_b_, dj_ang_c_, dj_ang_d_,
dj_ang_e_, dj_ang_f_, dj_ang_g_, dj_ang_h_,
dh_ang_a2_, dh_ang_a3_, dh_ang_b2_, dh_ang_b3_, dh_ang_c2_,
dh_ang_c3_, dh_ang_d1_, dh_ang_d2_, dh_ang_d3_, dh_ang_e1_,
dh_ang_e2_, dh_ang_e3_, dh_ang_f1_, dh_ang_f2_, dh_ang_f3_,
points_gradient, points_hessian, true);
hipLaunchKernelGGL(( updateHessian), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, points_num,
valid_points, voxel_id, valid_points_num,
voxel_list, gauss_d1_, gauss_d2_,
points_gradient, points_hessian, hessians_list);
checkCudaErrors(hipDeviceSynchronize());
int full_size = valid_points_num;
int half_size = (full_size - 1) / 2 + 1;
while (full_size > 1) {
block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
grid_x = (half_size - 1) / block_x + 1;
hipLaunchKernelGGL(( matrixSum), dim3(grid_x), dim3(block_x), 0, 0, hessians_list, full_size, half_size);
full_size = half_size;
half_size = (full_size - 1) / 2 + 1;
}
checkCudaErrors(hipDeviceSynchronize());
MatrixDevice dhessian(6, 6);
MatrixHost hhessian(6, 6);
checkCudaErrors(hipMemcpy(&dhessian, hessians_list, sizeof(MatrixDevice), hipMemcpyDeviceToHost));
hhessian.moveToHost(dhessian);
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
hessian(i, j) = hhessian(i, j);
}
}
checkCudaErrors(hipFree(hessians_list));
checkCudaErrors(hipFree(points_gradient));
checkCudaErrors(hipFree(points_hessian));
checkCudaErrors(hipFree(hessian_buff));
checkCudaErrors(hipFree(points_hessian_buff));
checkCudaErrors(hipFree(points_gradient_buff));
}
}
| 3329f852afe51b2a4cace70be8e2ff5cf7f22132.cu | #include "NormalDistributionsTransform.h"
#include "debug.h"
#include <cmath>
namespace gpu {
void GNormalDistributionsTransform::setInputTarget(float *target_x, float *target_y, float *target_z, int points_number)
{
GRegistration::setInputTarget(target_x, target_y, target_z, points_number);
if (points_number != 0)
voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_);
}
void GNormalDistributionsTransform::computeTransformation(Eigen::Matrix<float, 4, 4> &guess)
{
nr_iterations_ = 0;
converged_ = false;
double gauss_c1, gauss_c2, gauss_d3;
gauss_c1 = 10 * ( 1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow(resolution_, 3);
gauss_d3 = - log(gauss_c2);
gauss_d1_ = -log(gauss_c1 + gauss_c2) - gauss_d3;
gauss_d2_ = -2 * log((-log(gauss_c1 * exp(-0.5) + gauss_c2) - gauss_d3) / gauss_d1_);
if (guess != Eigen::Matrix4f::Identity()) {
final_transformation_ = guess;
transformPointCloud(x_, y_, z_, trans_x_, trans_y_, trans_z_, points_number_, guess);
}
Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation;
eig_transformation.matrix() = final_transformation_;
Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient;
Eigen::Vector3f init_translation = eig_transformation.translation();
Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2);
p << init_translation(0), init_translation(1), init_translation(2), init_rotation(0), init_rotation(1), init_rotation(2);
Eigen::Matrix<double, 6, 6> hessian;
double score = 0;
double delta_p_norm;
score = computeDerivatives(score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_, p);
while (!converged_) {
previous_transformation_ = transformation_;
Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(hessian, Eigen::ComputeFullU | Eigen::ComputeFullV);
delta_p = sv.solve(-score_gradient);
delta_p_norm = delta_p.norm();
if (delta_p_norm == 0 || delta_p_norm != delta_p_norm) {
trans_probability_ = score / static_cast<double>(points_number_);
converged_ = delta_p_norm == delta_p_norm;
return;
}
delta_p.normalize();
delta_p_norm = computeStepLengthMT(p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_);
delta_p *= delta_p_norm;
transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(delta_p(0)), static_cast<float>(delta_p(1)), static_cast<float>(delta_p(2))) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(5)), Eigen::Vector3f::UnitZ())).matrix();
p = p + delta_p;
//Not update visualizer
if (nr_iterations_ > max_iterations_ || (nr_iterations_ && (std::fabs(delta_p_norm) < transformation_epsilon_)))
converged_ = true;
nr_iterations_++;
}
trans_probability_ = score / static_cast<double>(points_number_);
}
extern "C" __global__ void matrixListInit(MatrixDevice *matrix, double *matrix_buff, int matrix_num, int rows, int cols)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < matrix_num; i += stride) {
matrix[i].setRows(rows);
matrix[i].setCols(cols);
matrix[i].setOffset(matrix_num);
matrix[i].setBuffer(matrix_buff + i);
for (int j = 0; j < rows; j++) {
for (int k = 0; k < cols; k++) {
matrix[i](j, k) = 0;
}
}
}
}
extern "C" __global__ void computePointDerivatives(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
MatrixDevice j_ang_a, MatrixDevice j_ang_b, MatrixDevice j_ang_c, MatrixDevice j_ang_d,
MatrixDevice j_ang_e, MatrixDevice j_ang_f, MatrixDevice j_ang_g, MatrixDevice j_ang_h,
MatrixDevice h_ang_a2, MatrixDevice h_ang_a3, MatrixDevice h_ang_b2, MatrixDevice h_ang_b3, MatrixDevice h_ang_c2,
MatrixDevice h_ang_c3, MatrixDevice h_ang_d1, MatrixDevice h_ang_d2, MatrixDevice h_ang_d3, MatrixDevice h_ang_e1,
MatrixDevice h_ang_e2, MatrixDevice h_ang_e3, MatrixDevice h_ang_f1, MatrixDevice h_ang_f2, MatrixDevice h_ang_f3,
MatrixDevice *point_gradients, MatrixDevice *point_hessians, bool compute_hessian)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
float o_x = x[pid];
float o_y = y[pid];
float o_z = z[pid];
MatrixDevice pg = point_gradients[i]; //3x6 Matrix
//Compute point derivatives
pg(1, 3) = o_x * j_ang_a(0) + o_y * j_ang_a(1) + o_z * j_ang_a(2);
pg(2, 3) = o_x * j_ang_b(0) + o_y * j_ang_b(1) + o_z * j_ang_b(2);
pg(0, 4) = o_x * j_ang_c(0) + o_y * j_ang_c(1) + o_z * j_ang_c(2);
pg(1, 4) = o_x * j_ang_d(0) + o_y * j_ang_d(1) + o_z * j_ang_d(2);
pg(2, 4) = o_x * j_ang_e(0) + o_y * j_ang_e(1) + o_z * j_ang_e(2);
pg(0, 5) = o_x * j_ang_f(0) + o_y * j_ang_f(1) + o_z * j_ang_f(2);
pg(1, 5) = o_x * j_ang_g(0) + o_y * j_ang_g(1) + o_z * j_ang_g(2);
pg(2, 5) = o_x * j_ang_h(0) + o_y * j_ang_h(1) + o_z * j_ang_h(2);
if (compute_hessian) {
MatrixDevice ph = point_hessians[i]; //18x6 Matrix
ph(9, 3) = 0;
ph(10, 3) = o_x * h_ang_a2(0) + o_y * h_ang_a2(1) + o_z * h_ang_a2(2);
ph(11, 3) = o_x * h_ang_a3(0) + o_y * h_ang_a3(1) + o_z * h_ang_a3(2);
ph(12, 3) = ph(9, 4) = 0;
ph(13, 3) = ph(10, 4) = o_x * h_ang_b2(0) + o_y * h_ang_b2(1) + o_z * h_ang_b2(2);
ph(14, 3) = ph(11, 4) = o_x * h_ang_b3(0) + o_y * h_ang_b3(1) + o_z * h_ang_b3(2);
ph(15, 3) = 0;
ph(16, 3) = ph(9, 5) = o_x * h_ang_c2(0) + o_y * h_ang_c2(1) + o_z * h_ang_c2(2);
ph(17, 3) = ph(10, 5) = o_x * h_ang_c3(0) + o_y * h_ang_c3(1) + o_z * h_ang_c3(2);
ph(12, 4) = o_x * h_ang_d1(0) + o_y * h_ang_d1(1) + o_z * h_ang_d1(2);
ph(13, 4) = o_x * h_ang_d2(0) + o_y * h_ang_d2(1) + o_z * h_ang_d2(2);
ph(14, 4) = o_x * h_ang_d3(0) + o_y * h_ang_d3(1) + o_z * h_ang_d3(2);
ph(15, 4) = ph(12, 5) = o_x * h_ang_e1(0) + o_y * h_ang_e1(1) + o_z * h_ang_e1(2);
ph(16, 4) = ph(13, 5) = o_x * h_ang_e2(0) + o_y * h_ang_e2(1) + o_z * h_ang_e2(2);
ph(17, 4) = ph(14, 5) = o_x * h_ang_e3(0) + o_y * h_ang_e3(1) + o_z * h_ang_e3(2);
ph(15, 5) = o_x * h_ang_f1(0) + o_y * h_ang_f1(1) + o_z * h_ang_f1(2);
ph(16, 5) = o_x * h_ang_f2(0) + o_y * h_ang_f2(1) + o_z * h_ang_f2(2);
ph(17, 5) = o_x * h_ang_f3(0) + o_y * h_ang_f3(1) + o_z * h_ang_f3(2);
}
}
}
extern "C" __global__ void computeDerivative(float *trans_x, float *trans_y, float *trans_z, int points_num,
int *valid_points, int *voxel_id, int valid_points_num,
GVoxel *grid, double gauss_d1, double gauss_d2,
MatrixDevice *point_gradients, MatrixDevice *point_hessians,
MatrixDevice *score_gradients, MatrixDevice *hessians,
double *score, bool compute_hessian)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Transformed coordinates
float t_x = trans_x[pid];
float t_y = trans_y[pid];
float t_z = trans_z[pid];
MatrixDevice pg = point_gradients[i]; //3x6 Matrix
MatrixDevice sg = score_gradients[i]; //6x1 Matrix
double score_inc = 0;
for ( int vid = voxel_id[i]; vid < voxel_id[i + 1]; vid++) {
GVoxel *voxel = grid + vid;
MatrixDevice centroid = voxel->centroid();
MatrixDevice icov = voxel->inverseCovariance(); //3x3 matrix
double cov_dxd_pi_x, cov_dxd_pi_y, cov_dxd_pi_z;
t_x -= centroid(0);
t_y -= centroid(1);
t_z -= centroid(2);
double e_x_cov_x = expf(-gauss_d2 * ((t_x * icov(0, 0) + t_y * icov(1, 0) + t_z * icov(2, 0)) * t_x
+ ((t_x * icov(0, 1) + t_y * icov(1, 1) + t_z * icov(2, 1)) * t_y)
+ ((t_x * icov(0, 2) + t_y * icov(1, 2) + t_z * icov(2, 2)) * t_z)) / 2);
score_inc += -gauss_d1 * e_x_cov_x;
e_x_cov_x *= gauss_d2;
e_x_cov_x *= gauss_d1;
for (int n = 0; n < 6; n++) {
cov_dxd_pi_x = icov(0, 0) * pg(0, n) + icov(0, 1) * pg(1, n) + icov(0, 2) * pg(2, n);
cov_dxd_pi_y = icov(1, 0) * pg(0, n) + icov(1, 1) * pg(1, n) + icov(1, 2) * pg(2, n);
cov_dxd_pi_z = icov(2, 0) * pg(0, n) + icov(2, 1) * pg(1, n) + icov(2, 2) * pg(2, n);
sg(n) += (t_x * cov_dxd_pi_x + t_y * cov_dxd_pi_y + t_z * cov_dxd_pi_z) * e_x_cov_x;
//Compute hessian
if (compute_hessian) {
MatrixDevice ph = point_hessians[i]; //18x6 Matrix
MatrixDevice h = hessians[i]; //6x6 Matrix
for (int p = 0; p < h.cols(); p++) {
h(n, p) += e_x_cov_x * (-gauss_d2 * (t_x * cov_dxd_pi_x + t_y * cov_dxd_pi_y + t_z * cov_dxd_pi_z) *
(t_x * (icov(0, 0) * pg(0, p) + icov(0, 1) * pg(1, p) + icov(0, 2) * pg(2, p))
+ t_y * (icov(1, 0) * pg(0, p) + icov(1, 1) * pg(1, p) + icov(1, 2) * pg(2, p))
+ t_z * (icov(2, 0) * pg(0, p) + icov(2, 1) * pg(1, p) + icov(2, 2) * pg(2, p)))
+ (t_x * (icov(0, 0) * ph(3 * n, p) + icov(0, 1) * ph(3 * n + 1, p) + icov(0, 2) * ph(3 * n + 2, p))
+ t_y * (icov(1, 0) * ph(3 * n, p) + icov(1, 1) * ph(3 * n + 1, p) + icov(1, 2) * ph(3 * n + 2, p))
+ t_z * (icov(2, 0) * ph(3 * n, p) + icov(2, 1) * ph(3 * n + 1, p) + icov(2, 2) * ph(3 * n + 2, p)))
+ (pg(0, p) * cov_dxd_pi_x + pg(1, p) * cov_dxd_pi_y + pg(2, p) * cov_dxd_pi_z));
}
}
}
}
score[i] = score_inc;
}
}
/* Compute sum of a list of a matrixes */
extern "C" __global__ void matrixSum(MatrixDevice *matrix_list, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
MatrixDevice left = matrix_list[i];
MatrixDevice right = (i + half_size < full_size) ? matrix_list[i + half_size] : MatrixDevice();
if (!right.isEmpty()) {
for (int j = 0; j < left.rows(); j++) {
for (int k = 0; k < left.cols(); k++) {
left(j, k) += right(j, k);
}
}
}
}
}
extern "C" __global__ void sumScore(double *score, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
score[i] += (i + half_size < full_size) ? score[i + half_size] : 0;
}
}
double GNormalDistributionsTransform::computeDerivatives(Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
float *trans_x, float *trans_y, float *trans_z,
int points_num, Eigen::Matrix<double, 6, 1> pose, bool compute_hessian)
{
MatrixHost p(6, 1);
for (int i = 0; i < 6; i++) {
p(i) = pose(i, 0);
}
//Compute Angle Derivatives
computeAngleDerivatives(p, compute_hessian);
//Radius Search
voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX);
int *valid_points = voxel_grid_.getValidPoints();
int *voxel_id = voxel_grid_.getVoxelIds();
int search_size = voxel_grid_.getSearchResultSize();
int valid_points_num = voxel_grid_.getValidPointsNum();
GVoxel *voxel_list = voxel_grid_.getVoxelList();
int voxel_num = voxel_grid_.getVoxelNum();
float max_x = voxel_grid_.getMaxX();
float max_y = voxel_grid_.getMaxY();
float max_z = voxel_grid_.getMaxZ();
float min_x = voxel_grid_.getMinX();
float min_y = voxel_grid_.getMinY();
float min_z = voxel_grid_.getMinZ();
float voxel_x = voxel_grid_.getVoxelX();
float voxel_y = voxel_grid_.getVoxelY();
float voxel_z = voxel_grid_.getVoxelZ();
int max_b_x = voxel_grid_.getMaxBX();
int max_b_y = voxel_grid_.getMaxBY();
int max_b_z = voxel_grid_.getMaxBZ();
int min_b_x = voxel_grid_.getMinBX();
int min_b_y = voxel_grid_.getMinBY();
int min_b_z = voxel_grid_.getMinBZ();
//Update score gradient and hessian matrix
MatrixDevice *gradients_list, *hessians_list, *points_gradient, *points_hessian;
checkCudaErrors(cudaMalloc(&gradients_list, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(cudaMalloc(&hessians_list, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(cudaMalloc(&points_gradient, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(cudaMalloc(&points_hessian, sizeof(MatrixDevice) * valid_points_num));
double *gradient_buff, *hessian_buff, *points_gradient_buff, *points_hessian_buff, *score;
checkCudaErrors(cudaMalloc(&gradient_buff, sizeof(double) * valid_points_num * 6));
checkCudaErrors(cudaMalloc(&hessian_buff, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(cudaMalloc(&points_gradient_buff, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(cudaMalloc(&points_hessian_buff, sizeof(double) * valid_points_num * 18 * 6));
checkCudaErrors(cudaMalloc(&score, sizeof(double) * valid_points_num));
int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num;
int grid_x = (valid_points_num - 1) / block_x + 1;
matrixListInit<<<grid_x, block_x>>>(gradients_list, gradient_buff, valid_points_num, 1, 6);
matrixListInit<<<grid_x, block_x>>>(hessians_list, hessian_buff, valid_points_num, 6, 6);
matrixListInit<<<grid_x, block_x>>>(points_gradient, points_gradient_buff, valid_points_num, 3, 6);
matrixListInit<<<grid_x, block_x>>>(points_hessian, points_hessian_buff, valid_points_num, 18, 6);
computePointDerivatives<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dj_ang_a_, dj_ang_b_, dj_ang_c_, dj_ang_d_,
dj_ang_e_, dj_ang_f_, dj_ang_g_, dj_ang_h_,
dh_ang_a2_, dh_ang_a3_, dh_ang_b2_, dh_ang_b3_, dh_ang_c2_,
dh_ang_c3_, dh_ang_d1_, dh_ang_d2_, dh_ang_d3_, dh_ang_e1_,
dh_ang_e2_, dh_ang_e3_, dh_ang_f1_, dh_ang_f2_, dh_ang_f3_,
points_gradient, points_hessian, compute_hessian);
computeDerivative<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, points_num,
valid_points, voxel_id, valid_points_num,
voxel_list, gauss_d1_, gauss_d2_,
points_gradient, points_hessian,
gradients_list, hessians_list,
score, compute_hessian);
checkCudaErrors(cudaDeviceSynchronize());
int full_size = valid_points_num;
int half_size = (full_size - 1) / 2 + 1;
while (full_size > 1) {
block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
grid_x = (half_size - 1) / block_x + 1;
matrixSum<<<grid_x, block_x>>>(gradients_list, full_size, half_size);
matrixSum<<<grid_x, block_x>>>(hessians_list, full_size, half_size);
sumScore<<<grid_x, block_x>>>(score, full_size, half_size);
full_size = half_size;
half_size = (full_size - 1) / 2 + 1;
}
checkCudaErrors(cudaDeviceSynchronize());
MatrixDevice dscore_g(6, 1), dhessian(6, 6);
MatrixHost hscore_g(6, 1), hhessian(6, 6);
checkCudaErrors(cudaMemcpy(&dscore_g, gradients_list, sizeof(MatrixDevice), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&dhessian, hessians_list, sizeof(MatrixDevice), cudaMemcpyDeviceToHost));
hscore_g.moveToHost(dscore_g);
hhessian.moveToHost(dhessian);
for (int i = 0; i < 6; i++) {
score_gradient(i, 0) = hscore_g(i, 0);
}
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
hessian(i, j) = hhessian(i, j);
}
}
double score_inc;
checkCudaErrors(cudaMemcpy(&score_inc, score, sizeof(double), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(gradients_list));
checkCudaErrors(cudaFree(hessians_list));
checkCudaErrors(cudaFree(points_gradient));
checkCudaErrors(cudaFree(points_hessian));
checkCudaErrors(cudaFree(gradient_buff));
checkCudaErrors(cudaFree(hessian_buff));
checkCudaErrors(cudaFree(points_hessian_buff));
checkCudaErrors(cudaFree(points_gradient_buff));
checkCudaErrors(cudaFree(score));
return score_inc;
}
void GNormalDistributionsTransform::computeAngleDerivatives(MatrixHost pose, bool compute_hessian)
{
double cx, cy, cz, sx, sy, sz;
if (fabs(pose(3)) < 10e-5) {
cx = 1.0;
sx = 0.0;
} else {
cx = cos(pose(3));
sx = sin(pose(3));
}
if (fabs(pose(4)) < 10e-5) {
cy = 1.0;
sy = 0.0;
} else {
cy = cos(pose(4));
sy = sin(pose(4));
}
if (fabs(pose(5)) < 10e-5) {
cz = cos(pose(5));
sz = sin(pose(5));
}
j_ang_a_ = MatrixHost(3, 1);
j_ang_b_ = MatrixHost(3, 1);
j_ang_c_ = MatrixHost(3, 1);
j_ang_d_ = MatrixHost(3, 1);
j_ang_e_ = MatrixHost(3, 1);
j_ang_f_ = MatrixHost(3, 1);
j_ang_g_ = MatrixHost(3, 1);
j_ang_h_ = MatrixHost(3, 1);
j_ang_a_(0) = -sx * sz + cx * sy * cz;
j_ang_a_(1) = -sx * cz - cx * sy * sz;
j_ang_a_(2) = -cx * cy;
j_ang_b_(0) = cx * sz + sx * sy * cz;
j_ang_b_(1) = cx * cz - sx * sy * sz;
j_ang_b_(2) = -sx * cy;
j_ang_c_(0) = -sy * cz;
j_ang_c_(1) = sy * sz;
j_ang_c_(2) = cy;
j_ang_d_(0) = sx * cy * cz;
j_ang_d_(1) = -sx * cy * sz;
j_ang_d_(2) = sx * sy;
j_ang_e_(0) = -cx * cy * cz;
j_ang_e_(1) = cx * cy * sz;
j_ang_e_(2) = -cx * sy;
j_ang_f_(0) = -cy * sz;
j_ang_f_(1) = -cy * cz;
j_ang_f_(2) = 0;
j_ang_g_(0) = cx * cz - sx * sy * sz;
j_ang_g_(1) = -cx * sz - sx * sy * cz;
j_ang_g_(2) = 0;
j_ang_h_(0) = sx * cz + cx * sy * sz;
j_ang_h_(1) = cx * sy * cz - sx * sz;
j_ang_h_(2) = 0;
dj_ang_a_ = MatrixDevice(3, 1);
dj_ang_b_ = MatrixDevice(3, 1);
dj_ang_c_ = MatrixDevice(3, 1);
dj_ang_d_ = MatrixDevice(3, 1);
dj_ang_e_ = MatrixDevice(3, 1);
dj_ang_f_ = MatrixDevice(3, 1);
dj_ang_g_ = MatrixDevice(3, 1);
dj_ang_h_ = MatrixDevice(3, 1);
j_ang_a_.moveToGpu(dj_ang_a_);
j_ang_b_.moveToGpu(dj_ang_b_);
j_ang_c_.moveToGpu(dj_ang_c_);
j_ang_d_.moveToGpu(dj_ang_d_);
j_ang_e_.moveToGpu(dj_ang_e_);
j_ang_f_.moveToGpu(dj_ang_f_);
j_ang_g_.moveToGpu(dj_ang_g_);
j_ang_h_.moveToGpu(dj_ang_h_);
if (compute_hessian) {
h_ang_a2_ = MatrixHost(3, 1);
h_ang_a3_ = MatrixHost(3, 1);
h_ang_b2_ = MatrixHost(3, 1);
h_ang_b3_ = MatrixHost(3, 1);
h_ang_c2_ = MatrixHost(3, 1);
h_ang_c3_ = MatrixHost(3, 1);
h_ang_d1_ = MatrixHost(3, 1);
h_ang_d2_ = MatrixHost(3, 1);
h_ang_d3_ = MatrixHost(3, 1);
h_ang_e1_ = MatrixHost(3, 1);
h_ang_e2_ = MatrixHost(3, 1);
h_ang_e3_ = MatrixHost(3, 1);
h_ang_f1_ = MatrixHost(3, 1);
h_ang_f2_ = MatrixHost(3, 1);
h_ang_f3_ = MatrixHost(3, 1);
h_ang_a2_(0) = -cx * sz - sx * sy * cz;
h_ang_a2_(1) = -cx * cz + sx * sy * sz;
h_ang_a2_(2) = sx * cy;
h_ang_a3_(0) = -sx * sz + cx * sy * cz;
h_ang_a3_(1) = -cx * sy * sz - sx * cz;
h_ang_a3_(2) = -cx * cy;
h_ang_b2_(0) = cx * cy * cz;
h_ang_b2_(1) = -cx * cy * sz;
h_ang_b2_(2) = cx * sy;
h_ang_b3_(0) = sx * cy * cz;
h_ang_b3_(1) = -sx * cy * sz;
h_ang_b3_(2) = sx * sy;
h_ang_c2_(0) = -sx * cz - cx * sy * sz;
h_ang_c2_(1) = sx * sz - cx * sy * cz;
h_ang_c2_(2) = 0;
h_ang_c3_(0) = cx * cz - sx * sy * sz;
h_ang_c3_(1) = -sx * sy * cz - cx * sz;
h_ang_c3_(2) = 0;
h_ang_d1_(0) = -cy * cz;
h_ang_d1_(1) = cy * sz;
h_ang_d1_(2) = sy;
h_ang_d2_(0) = -sx * sy * cz;
h_ang_d2_(1) = sx * sy * sz;
h_ang_d2_(2) = sx * cy;
h_ang_d3_(0) = cx * sy * cz;
h_ang_d3_(1) = -cx * sy * sz;
h_ang_d3_(2) = -cx * cy;
h_ang_e1_(0) = sy * sz;
h_ang_e1_(1) = sy * cz;
h_ang_e1_(3) = 0;
h_ang_e2_(0) = -sx * cy * sz;
h_ang_e2_(1) = -sx * cy * cz;
h_ang_e2_(2) = 0;
h_ang_e3_(0) = cx * cy * sz;
h_ang_e3_(1) = cx * cy * cz;
h_ang_e3_(2) = 0;
h_ang_f1_(0) = -cy * cz;
h_ang_f1_(1) = cy * sz;
h_ang_f1_(2) = 0;
h_ang_f2_(0) = -cx * sz - sx * sy * cz;
h_ang_f2_(1) = -cx * cz + sx * sy * sz;
h_ang_f2_(2) = 0;
h_ang_f3_(0) = -sx * sz + cx * sy * cz;
h_ang_f3_(1) = -cx * sy * sz - sx * cz;
h_ang_f3_(2) = 0;
dh_ang_a2_ = MatrixDevice(3, 1);
dh_ang_a3_ = MatrixDevice(3, 1);
dh_ang_b2_ = MatrixDevice(3, 1);
dh_ang_b3_ = MatrixDevice(3, 1);
dh_ang_c2_ = MatrixDevice(3, 1);
dh_ang_c3_ = MatrixDevice(3, 1);
dh_ang_d1_ = MatrixDevice(3, 1);
dh_ang_d2_ = MatrixDevice(3, 1);
dh_ang_d3_ = MatrixDevice(3, 1);
dh_ang_e1_ = MatrixDevice(3, 1);
dh_ang_e2_ = MatrixDevice(3, 1);
dh_ang_e3_ = MatrixDevice(3, 1);
dh_ang_f1_ = MatrixDevice(3, 1);
dh_ang_f2_ = MatrixDevice(3, 1);
dh_ang_f3_ = MatrixDevice(3, 1);
h_ang_a2_.moveToGpu(dh_ang_a2_);
h_ang_a3_.moveToGpu(dh_ang_a3_);
h_ang_b2_.moveToGpu(dh_ang_b2_);
h_ang_b3_.moveToGpu(dh_ang_b3_);
h_ang_c2_.moveToGpu(dh_ang_c2_);
h_ang_c2_.moveToGpu(dh_ang_c3_);
h_ang_d1_.moveToGpu(dh_ang_d1_);
h_ang_d2_.moveToGpu(dh_ang_d2_);
h_ang_d3_.moveToGpu(dh_ang_d3_);
h_ang_e1_.moveToGpu(dh_ang_e1_);
h_ang_e2_.moveToGpu(dh_ang_e2_);
h_ang_e3_.moveToGpu(dh_ang_e3_);
h_ang_f1_.moveToGpu(dh_ang_f1_);
h_ang_f2_.moveToGpu(dh_ang_f2_);
h_ang_f3_.moveToGpu(dh_ang_f3_);
}
}
extern "C" __global__ void gpuTransform(float *in_x, float *in_y, float *in_z,
float *trans_x, float *trans_y, float *trans_z,
int point_num, MatrixDevice transform)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
float x, y, z;
for (int i = idx; i < point_num; i += stride) {
x = in_x[i];
y = in_y[i];
z = in_z[i];
trans_x[i] = transform(0, 0) * x + transform(0, 1) * y + transform(0, 2) * z + transform(0, 3);
trans_y[i] = transform(1, 0) * x + transform(1, 1) * y + transform(1, 2) * z + transform(1, 3);
trans_z[i] = transform(2, 0) * x + transform(2, 1) * y + transform(2, 2) * z + transform(2, 3);
}
}
void GNormalDistributionsTransform::transformPointCloud(float *in_x, float *in_y, float *in_z,
float *trans_x, float *trans_y, float *trans_z,
int points_number, Eigen::Matrix<float, 4, 4> transform)
{
Eigen::Transform<float, 3, Eigen::Affine> t(transform);
MatrixHost htrans(3, 4);
MatrixDevice dtrans(3, 4);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
htrans(i, j) = t(i, j);
}
}
htrans.moveToGpu(dtrans);
if (points_number > 0) {
int block_x = (points_number <= BLOCK_SIZE_X) ? points_number : BLOCK_SIZE_X;
int grid_x = (points_number - 1) / block_x + 1;
gpuTransform<<<grid_x, block_x >>>(in_x, in_y, in_z, trans_x, trans_y, trans_z, points_number, dtrans);
checkCudaErrors(cudaDeviceSynchronize());
}
}
double GNormalDistributionsTransform::computeStepLengthMT(const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir,
double step_init, double step_max, double step_min, double &score,
Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
float *trans_x, float *trans_y, float *trans_z, int points_num)
{
double phi_0 = -score;
double d_phi_0 = -(score_gradient.dot(step_dir));
Eigen::Matrix<double, 6, 1> x_t;
if (d_phi_0 >= 0) {
if (d_phi_0 == 0)
return 0;
else {
d_phi_0 *= -1;
step_dir *= -1;
}
}
int max_step_iterations = 10;
int step_iterations = 0;
double mu = 1.e-4;
double nu = 0.9;
double a_l = 0, a_u = 0;
double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu);
double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu);
double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
bool interval_converged = (step_max - step_min) > 0, open_interval = true;
double a_t = step_init;
a_t = std::min(a_t, step_max);
a_t = std::max(a_t, step_min);
x_t = x + step_dir * a_t;
final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ())).matrix();
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_);
score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, true);
double phi_t = -score;
double d_phi_t = -(score_gradient.dot(step_dir));
double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 && d_phi_t <= -nu * d_phi_0)) {
if (open_interval) {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
} else {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
a_t = (a_t < step_max) ? a_t : step_max;
a_t = (a_t > step_min) ? a_t : step_min;
x_t = x + step_dir * a_t;
final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ())).matrix();
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_);
score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, false);
phi_t -= score;
d_phi_t -= (score_gradient.dot(step_dir));
psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) {
open_interval = false;
f_l += phi_0 - mu * d_phi_0 * a_l;
g_l += mu * d_phi_0;
f_u += phi_0 - mu * d_phi_0 * a_u;
g_u += mu * d_phi_0;
}
if (open_interval) {
interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
} else {
interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
step_iterations++;
}
if (step_iterations)
computeHessian(hessian, trans_x, trans_y, trans_z, points_num, x_t);
return a_t;
}
//Copied from ndt.hpp
double GNormalDistributionsTransform::trialValueSelectionMT (double a_l, double f_l, double g_l,
double a_u, double f_u, double g_u,
double a_t, double f_t, double g_t)
{
// Case 1 in Trial Value Selection [More, Thuente 1994]
if (f_t > f_l) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l
// Equation 2.4.2 [Sun, Yuan 2006]
double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t));
if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l))
return (a_c);
else
return (0.5 * (a_q + a_c));
}
// Case 2 in Trial Value Selection [More, Thuente 1994]
else if (g_t * g_l < 0) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t))
return (a_c);
else
return (a_s);
}
// Case 3 in Trial Value Selection [More, Thuente 1994]
else if (std::fabs (g_t) <= std::fabs (g_l)) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
double a_t_next;
if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t))
a_t_next = a_c;
else
a_t_next = a_s;
if (a_t > a_l)
return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next));
else
return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next));
}
// Case 4 in Trial Value Selection [More, Thuente 1994]
else {
// Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u;
double w = std::sqrt (z * z - g_t * g_u);
// Equation 2.4.56 [Sun, Yuan 2006]
return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w));
}
}
//Copied from ndt.hpp
double GNormalDistributionsTransform::updateIntervalMT (double &a_l, double &f_l, double &g_l,
double &a_u, double &f_u, double &g_u,
double a_t, double f_t, double g_t)
{
// Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994]
if (f_t > f_l) {
a_u = a_t;
f_u = f_t;
g_u = g_t;
return (false);
}
// Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994]
else if (g_t * (a_l - a_t) > 0) {
a_l = a_t;
f_l = f_t;
g_l = g_t;
return (false);
}
// Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994]
else if (g_t * (a_l - a_t) < 0) {
a_u = a_l;
f_u = f_l;
g_u = g_l;
a_l = a_t;
f_l = f_t;
g_l = g_t;
return (false);
}
// Interval Converged
else
return (true);
}
extern "C" __global__ void updateHessian(float *trans_x, float *trans_y, float *trans_z, int points_num,
int *valid_points, int *voxel_id, int valid_points_num,
GVoxel *grid, float gauss_d1, float gauss_d2,
MatrixDevice *point_gradients, MatrixDevice *point_hessians, MatrixDevice *hessians)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Transformed coordinates
float t_x = trans_x[pid];
float t_y = trans_y[pid];
float t_z = trans_z[pid];
MatrixDevice pg = point_gradients[i]; //3x6 Matrix
MatrixDevice ph = point_hessians[i]; //18x6 Matrix
MatrixDevice h = hessians[i]; //6x6 Matrix
double score_inc = 0;
for ( int vid = voxel_id[i]; vid < voxel_id[i + 1]; vid++) {
GVoxel *voxel = grid + vid;
MatrixDevice centroid = voxel->centroid();
MatrixDevice icov = voxel->inverseCovariance(); //3x3 matrix
float cov_dxd_pi_x, cov_dxd_pi_y, cov_dxd_pi_z;
t_x -= centroid(0);
t_y -= centroid(1);
t_z -= centroid(2);
double e_x_cov_x = expf(-gauss_d2 * ((t_x * icov(0, 0) + t_y * icov(1, 0) + t_z * icov(2, 0)) * t_x
+ ((t_x * icov(0, 1) + t_y * icov(1, 1) + t_z * icov(2, 1)) * t_y)
+ ((t_x * icov(0, 2) + t_y * icov(1, 2) + t_z * icov(2, 2)) * t_z)) / 2);
score_inc += -gauss_d1 * e_x_cov_x;
e_x_cov_x *= gauss_d2;
e_x_cov_x *= gauss_d1;
for (int n = 0; n < 6; n++) {
cov_dxd_pi_x = icov(0, 0) * pg(0, n) + icov(0, 1) * pg(1, n) + icov(0, 2) * pg(2, n);
cov_dxd_pi_y = icov(1, 0) * pg(0, n) + icov(1, 1) * pg(1, n) + icov(1, 2) * pg(2, n);
cov_dxd_pi_z = icov(2, 0) * pg(0, n) + icov(2, 1) * pg(1, n) + icov(2, 2) * pg(2, n);
//Compute hessian
for (int p = 0; p < h.cols(); p++) {
h(n, p) += e_x_cov_x * (-gauss_d2 * (t_x * cov_dxd_pi_x + t_y * cov_dxd_pi_y + t_z * cov_dxd_pi_z) *
(t_x * (icov(0, 0) * pg(0, p) + icov(0, 1) * pg(1, p) + icov(0, 2) * pg(2, p))
+ t_y * (icov(1, 0) * pg(0, p) + icov(1, 1) * pg(1, p) + icov(1, 2) * pg(2, p))
+ t_z * (icov(2, 0) * pg(0, p) + icov(2, 1) * pg(1, p) + icov(2, 2) * pg(2, p)))
+ (t_x * (icov(0, 0) * ph(3 * n, p) + icov(0, 1) * ph(3 * n + 1, p) + icov(0, 2) * ph(3 * n + 2, p))
+ t_y * (icov(1, 0) * ph(3 * n, p) + icov(1, 1) * ph(3 * n + 1, p) + icov(1, 2) * ph(3 * n + 2, p))
+ t_z * (icov(2, 0) * ph(3 * n, p) + icov(2, 1) * ph(3 * n + 1, p) + icov(2, 2) * ph(3 * n + 2, p)))
+ (pg(0, p) * cov_dxd_pi_x + pg(1, p) * cov_dxd_pi_y + pg(2, p) * cov_dxd_pi_z));
}
}
}
}
}
void GNormalDistributionsTransform::computeHessian (Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> &p)
{
//Radius Search
voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX);
int *valid_points = voxel_grid_.getValidPoints();
int *voxel_id = voxel_grid_.getVoxelIds();
int search_size = voxel_grid_.getSearchResultSize();
int valid_points_num = voxel_grid_.getValidPointsNum();
GVoxel *voxel_list = voxel_grid_.getVoxelList();
int voxel_num = voxel_grid_.getVoxelNum();
float max_x = voxel_grid_.getMaxX();
float max_y = voxel_grid_.getMaxY();
float max_z = voxel_grid_.getMaxZ();
float min_x = voxel_grid_.getMinX();
float min_y = voxel_grid_.getMinY();
float min_z = voxel_grid_.getMinZ();
float voxel_x = voxel_grid_.getVoxelX();
float voxel_y = voxel_grid_.getVoxelY();
float voxel_z = voxel_grid_.getVoxelZ();
int max_b_x = voxel_grid_.getMaxBX();
int max_b_y = voxel_grid_.getMaxBY();
int max_b_z = voxel_grid_.getMaxBZ();
int min_b_x = voxel_grid_.getMinBX();
int min_b_y = voxel_grid_.getMinBY();
int min_b_z = voxel_grid_.getMinBZ();
//Update score gradient and hessian matrix
MatrixDevice *hessians_list, *points_gradient, *points_hessian;
checkCudaErrors(cudaMalloc(&hessians_list, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(cudaMalloc(&points_gradient, sizeof(MatrixDevice) * valid_points_num));
checkCudaErrors(cudaMalloc(&points_hessian, sizeof(MatrixDevice) * valid_points_num));
double *hessian_buff, *points_gradient_buff, *points_hessian_buff;
checkCudaErrors(cudaMalloc(&hessian_buff, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(cudaMalloc(&points_gradient_buff, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(cudaMalloc(&points_hessian_buff, sizeof(double) * valid_points_num * 18 * 6));
int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num;
int grid_x = (valid_points_num - 1) / block_x + 1;
matrixListInit<<<grid_x, block_x>>>(hessians_list, hessian_buff, valid_points_num, 6, 6);
matrixListInit<<<grid_x, block_x>>>(points_gradient, points_gradient_buff, valid_points_num, 3, 6);
matrixListInit<<<grid_x, block_x>>>(points_hessian, points_hessian_buff, valid_points_num, 18, 6);
computePointDerivatives<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dj_ang_a_, dj_ang_b_, dj_ang_c_, dj_ang_d_,
dj_ang_e_, dj_ang_f_, dj_ang_g_, dj_ang_h_,
dh_ang_a2_, dh_ang_a3_, dh_ang_b2_, dh_ang_b3_, dh_ang_c2_,
dh_ang_c3_, dh_ang_d1_, dh_ang_d2_, dh_ang_d3_, dh_ang_e1_,
dh_ang_e2_, dh_ang_e3_, dh_ang_f1_, dh_ang_f2_, dh_ang_f3_,
points_gradient, points_hessian, true);
updateHessian<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, points_num,
valid_points, voxel_id, valid_points_num,
voxel_list, gauss_d1_, gauss_d2_,
points_gradient, points_hessian, hessians_list);
checkCudaErrors(cudaDeviceSynchronize());
int full_size = valid_points_num;
int half_size = (full_size - 1) / 2 + 1;
while (full_size > 1) {
block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
grid_x = (half_size - 1) / block_x + 1;
matrixSum<<<grid_x, block_x>>>(hessians_list, full_size, half_size);
full_size = half_size;
half_size = (full_size - 1) / 2 + 1;
}
checkCudaErrors(cudaDeviceSynchronize());
MatrixDevice dhessian(6, 6);
MatrixHost hhessian(6, 6);
checkCudaErrors(cudaMemcpy(&dhessian, hessians_list, sizeof(MatrixDevice), cudaMemcpyDeviceToHost));
hhessian.moveToHost(dhessian);
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
hessian(i, j) = hhessian(i, j);
}
}
checkCudaErrors(cudaFree(hessians_list));
checkCudaErrors(cudaFree(points_gradient));
checkCudaErrors(cudaFree(points_hessian));
checkCudaErrors(cudaFree(hessian_buff));
checkCudaErrors(cudaFree(points_hessian_buff));
checkCudaErrors(cudaFree(points_gradient_buff));
}
}
|
11fcd0647e106d4c000c37e44ad86830d6165d0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tobias Ribizel
@generated from sparse/blas/magma_zsampleselect.cu, normal z -> s, Wed Jan 2 14:18:54 2019
*/
#include "magma_sampleselect.h"
#include <cstdint>
#define PRECISION_s
namespace magma_sampleselect {
__global__ void compute_abs(const float* __restrict__ in, float* __restrict__ out, int32_t size) {
auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= size) {
return;
}
auto v = in[idx];
out[idx] = real(v) * real(v) + imag(v) * imag(v);
}
} // namespace magma_sampleselect
using namespace magma_sampleselect;
/**
Purpose
-------
This routine selects a threshold separating the subset_size smallest
magnitude elements from the rest.
Arguments
---------
@param[in]
total_size magma_int_t
size of array val
@param[in]
subset_size magma_int_t
number of smallest elements to separate
@param[in]
val float
array containing the values
@param[out]
thrs float*
computed threshold
@param[inout]
tmp_ptr magma_ptr*
pointer to pointer to temporary storage.
May be reallocated during execution.
@param[inout]
tmp_size magma_int_t*
pointer to size of temporary storage.
May be increased during execution.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_ssampleselect(
magma_int_t total_size,
magma_int_t subset_size,
float *val,
float *thrs,
magma_ptr *tmp_ptr,
magma_int_t *tmp_size,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_int_t num_blocks = magma_ceildiv(total_size, block_size);
magma_int_t required_size = sizeof(float) * (total_size * 2 + searchtree_size)
+ sizeof(int32_t) * sampleselect_alloc_size(total_size);
auto realloc_result = realloc_if_necessary(tmp_ptr, tmp_size, required_size);
float* gputmp1 = (float*)*tmp_ptr;
float* gputmp2 = gputmp1 + total_size;
float* gputree = gputmp2 + total_size;
float* gpuresult = gputree + searchtree_size;
int32_t* gpuints = (int32_t*)(gpuresult + 1);
CHECK(realloc_result);
hipLaunchKernelGGL(( compute_abs), dim3(num_blocks), dim3(block_size), 0, queue->cuda_stream(),
val, gputmp1, total_size);
hipLaunchKernelGGL(( sampleselect), dim3(1), dim3(1), 0, queue->cuda_stream(),
gputmp1, gputmp2, gputree, gpuints, total_size, subset_size, gpuresult);
magma_sgetvector(1, gpuresult, 1, thrs, 1, queue );
*thrs = std::sqrt(*thrs);
cleanup:
return info;
}
/**
Purpose
-------
This routine selects an approximate threshold separating the subset_size
smallest magnitude elements from the rest.
Arguments
---------
@param[in]
total_size magma_int_t
size of array val
@param[in]
subset_size magma_int_t
number of smallest elements to separate
@param[in]
val float
array containing the values
@param[out]
thrs float*
computed threshold
@param[inout]
tmp_ptr magma_ptr*
pointer to pointer to temporary storage.
May be reallocated during execution.
@param[inout]
tmp_size magma_int_t*
pointer to size of temporary storage.
May be increased during execution.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_ssampleselect_approx(
magma_int_t total_size,
magma_int_t subset_size,
float *val,
float *thrs,
magma_ptr *tmp_ptr,
magma_int_t *tmp_size,
magma_queue_t queue )
{
magma_int_t info = 0;
auto num_blocks = magma_ceildiv(total_size, block_size);
auto local_work = (total_size + num_threads - 1) / num_threads;
auto required_size = sizeof(float) * (total_size + searchtree_size)
+ sizeof(int32_t) * (searchtree_width * (num_grouped_blocks + 1) + 1);
auto realloc_result = realloc_if_necessary(tmp_ptr, tmp_size, required_size);
float* gputmp = (float*)*tmp_ptr;
float* gputree = gputmp + total_size;
uint32_t* gpubucketidx = (uint32_t*)(gputree + searchtree_size);
int32_t* gpurankout = (int32_t*)(gpubucketidx + 1);
int32_t* gpucounts = gpurankout + 1;
int32_t* gpulocalcounts = gpucounts + searchtree_width;
uint32_t bucketidx{};
CHECK(realloc_result);
hipLaunchKernelGGL(( compute_abs), dim3(num_blocks), dim3(block_size), 0, queue->cuda_stream(),
val, gputmp, total_size);
hipLaunchKernelGGL(( build_searchtree), dim3(1), dim3(sample_size), 0, queue->cuda_stream(),
gputmp, gputree, total_size);
hipLaunchKernelGGL(( count_buckets), dim3(num_grouped_blocks), dim3(block_size), 0, queue->cuda_stream(),
gputmp, gputree, gpulocalcounts, total_size, local_work);
hipLaunchKernelGGL(( reduce_counts), dim3(searchtree_width), dim3(num_grouped_blocks), 0, queue->cuda_stream(),
gpulocalcounts, gpucounts, num_grouped_blocks);
hipLaunchKernelGGL(( sampleselect_findbucket), dim3(1), dim3(searchtree_width / 2), 0, queue->cuda_stream(),
gpucounts, subset_size, gpubucketidx, gpurankout);
magma_getvector(1, sizeof(uint32_t), gpubucketidx, 1, &bucketidx, 1, queue);
magma_sgetvector(1, gputree + searchtree_width - 1 + bucketidx, 1, thrs, 1, queue);
*thrs = std::sqrt(*thrs);
cleanup:
return info;
}
| 11fcd0647e106d4c000c37e44ad86830d6165d0b.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tobias Ribizel
@generated from sparse/blas/magma_zsampleselect.cu, normal z -> s, Wed Jan 2 14:18:54 2019
*/
#include "magma_sampleselect.h"
#include <cstdint>
#define PRECISION_s
namespace magma_sampleselect {
__global__ void compute_abs(const float* __restrict__ in, float* __restrict__ out, int32_t size) {
auto idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= size) {
return;
}
auto v = in[idx];
out[idx] = real(v) * real(v) + imag(v) * imag(v);
}
} // namespace magma_sampleselect
using namespace magma_sampleselect;
/**
Purpose
-------
This routine selects a threshold separating the subset_size smallest
magnitude elements from the rest.
Arguments
---------
@param[in]
total_size magma_int_t
size of array val
@param[in]
subset_size magma_int_t
number of smallest elements to separate
@param[in]
val float
array containing the values
@param[out]
thrs float*
computed threshold
@param[inout]
tmp_ptr magma_ptr*
pointer to pointer to temporary storage.
May be reallocated during execution.
@param[inout]
tmp_size magma_int_t*
pointer to size of temporary storage.
May be increased during execution.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_ssampleselect(
magma_int_t total_size,
magma_int_t subset_size,
float *val,
float *thrs,
magma_ptr *tmp_ptr,
magma_int_t *tmp_size,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_int_t num_blocks = magma_ceildiv(total_size, block_size);
magma_int_t required_size = sizeof(float) * (total_size * 2 + searchtree_size)
+ sizeof(int32_t) * sampleselect_alloc_size(total_size);
auto realloc_result = realloc_if_necessary(tmp_ptr, tmp_size, required_size);
float* gputmp1 = (float*)*tmp_ptr;
float* gputmp2 = gputmp1 + total_size;
float* gputree = gputmp2 + total_size;
float* gpuresult = gputree + searchtree_size;
int32_t* gpuints = (int32_t*)(gpuresult + 1);
CHECK(realloc_result);
compute_abs<<<num_blocks, block_size, 0, queue->cuda_stream()>>>
(val, gputmp1, total_size);
sampleselect<<<1, 1, 0, queue->cuda_stream()>>>
(gputmp1, gputmp2, gputree, gpuints, total_size, subset_size, gpuresult);
magma_sgetvector(1, gpuresult, 1, thrs, 1, queue );
*thrs = std::sqrt(*thrs);
cleanup:
return info;
}
/**
Purpose
-------
This routine selects an approximate threshold separating the subset_size
smallest magnitude elements from the rest.
Arguments
---------
@param[in]
total_size magma_int_t
size of array val
@param[in]
subset_size magma_int_t
number of smallest elements to separate
@param[in]
val float
array containing the values
@param[out]
thrs float*
computed threshold
@param[inout]
tmp_ptr magma_ptr*
pointer to pointer to temporary storage.
May be reallocated during execution.
@param[inout]
tmp_size magma_int_t*
pointer to size of temporary storage.
May be increased during execution.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_ssampleselect_approx(
magma_int_t total_size,
magma_int_t subset_size,
float *val,
float *thrs,
magma_ptr *tmp_ptr,
magma_int_t *tmp_size,
magma_queue_t queue )
{
magma_int_t info = 0;
auto num_blocks = magma_ceildiv(total_size, block_size);
auto local_work = (total_size + num_threads - 1) / num_threads;
auto required_size = sizeof(float) * (total_size + searchtree_size)
+ sizeof(int32_t) * (searchtree_width * (num_grouped_blocks + 1) + 1);
auto realloc_result = realloc_if_necessary(tmp_ptr, tmp_size, required_size);
float* gputmp = (float*)*tmp_ptr;
float* gputree = gputmp + total_size;
uint32_t* gpubucketidx = (uint32_t*)(gputree + searchtree_size);
int32_t* gpurankout = (int32_t*)(gpubucketidx + 1);
int32_t* gpucounts = gpurankout + 1;
int32_t* gpulocalcounts = gpucounts + searchtree_width;
uint32_t bucketidx{};
CHECK(realloc_result);
compute_abs<<<num_blocks, block_size, 0, queue->cuda_stream()>>>
(val, gputmp, total_size);
build_searchtree<<<1, sample_size, 0, queue->cuda_stream()>>>
(gputmp, gputree, total_size);
count_buckets<<<num_grouped_blocks, block_size, 0, queue->cuda_stream()>>>
(gputmp, gputree, gpulocalcounts, total_size, local_work);
reduce_counts<<<searchtree_width, num_grouped_blocks, 0, queue->cuda_stream()>>>
(gpulocalcounts, gpucounts, num_grouped_blocks);
sampleselect_findbucket<<<1, searchtree_width / 2, 0, queue->cuda_stream()>>>
(gpucounts, subset_size, gpubucketidx, gpurankout);
magma_getvector(1, sizeof(uint32_t), gpubucketidx, 1, &bucketidx, 1, queue);
magma_sgetvector(1, gputree + searchtree_width - 1 + bucketidx, 1, thrs, 1, queue);
*thrs = std::sqrt(*thrs);
cleanup:
return info;
}
|
a3528894d62aeeaaeabc33a35c38fe76c1748044.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuml/tsa/arima_common.h>
#include <cuml/tsa/batched_arima.hpp>
#include <raft/handle.hpp>
#include <raft/random/rng.cuh>
#include <raft/cudart_utils.h>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace Arima {
struct ArimaParams {
TimeSeriesParams data;
ARIMAOrder order;
};
template <typename DataT>
class ArimaLoglikelihood : public TsFixtureRandom<DataT> {
public:
ArimaLoglikelihood(const std::string& name, const ArimaParams& p)
: TsFixtureRandom<DataT>(name, p.data), order(p.order)
{
}
// Note: public function because of the __device__ lambda
void runBenchmark(::benchmark::State& state) override
{
using MLCommon::Bench::CudaEventTimer;
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto counting = thrust::make_counting_iterator(0);
// Generate random parameters
int N = order.complexity();
raft::random::Rng gpu_gen(this->params.seed, raft::random::GenPhilox);
gpu_gen.uniform(param, N * this->params.batch_size, -1.0, 1.0, stream);
// Set sigma2 parameters to 1.0
DataT* x = param; // copy the object attribute for thrust
thrust::for_each(thrust::hip::par.on(stream),
counting,
counting + this->params.batch_size,
[=] __device__(int bid) { x[(bid + 1) * N - 1] = 1.0; });
CUDA_CHECK(hipStreamSynchronize(stream));
// Benchmark loop
this->loopOnState(state, [this]() {
ARIMAMemory<double> arima_mem(order, this->params.batch_size, this->params.n_obs, temp_mem);
// Evaluate log-likelihood
batched_loglike(*this->handle,
arima_mem,
this->data.X,
this->params.batch_size,
this->params.n_obs,
order,
param,
loglike,
residual,
true,
false);
});
}
void allocateBuffers(const ::benchmark::State& state)
{
Fixture::allocateBuffers(state);
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto allocator = handle.get_device_allocator();
// Buffer for the model parameters
param = (DataT*)allocator->allocate(
order.complexity() * this->params.batch_size * sizeof(DataT), stream);
// Buffers for the log-likelihood and residuals
loglike = (DataT*)allocator->allocate(this->params.batch_size * sizeof(DataT), stream);
residual = (DataT*)allocator->allocate(
this->params.batch_size * this->params.n_obs * sizeof(DataT), stream);
// Temporary memory
size_t temp_buf_size =
ARIMAMemory<double>::compute_size(order, this->params.batch_size, this->params.n_obs);
temp_mem = (char*)allocator->allocate(temp_buf_size, stream);
}
void deallocateBuffers(const ::benchmark::State& state)
{
Fixture::deallocateBuffers(state);
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto allocator = handle.get_device_allocator();
allocator->deallocate(
param, order.complexity() * this->params.batch_size * sizeof(DataT), stream);
allocator->deallocate(loglike, this->params.batch_size * sizeof(DataT), stream);
allocator->deallocate(
residual, this->params.batch_size * this->params.n_obs * sizeof(DataT), stream);
}
protected:
ARIMAOrder order;
DataT* param;
DataT* loglike;
DataT* residual;
char* temp_mem;
};
std::vector<ArimaParams> getInputs()
{
struct std::vector<ArimaParams> out;
ArimaParams p;
p.data.seed = 12345ULL;
std::vector<ARIMAOrder> list_order = {{1, 1, 1, 0, 0, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 4, 0},
{1, 1, 1, 1, 1, 1, 12, 0},
{1, 1, 1, 1, 1, 1, 24, 0},
{1, 1, 1, 1, 1, 1, 52, 0}};
std::vector<int> list_batch_size = {10, 100, 1000, 10000};
std::vector<int> list_n_obs = {200, 500, 1000};
for (auto& order : list_order) {
for (auto& batch_size : list_batch_size) {
for (auto& n_obs : list_n_obs) {
p.order = order;
p.data.batch_size = batch_size;
p.data.n_obs = n_obs;
out.push_back(p);
}
}
}
return out;
}
ML_BENCH_REGISTER(ArimaParams, ArimaLoglikelihood<double>, "arima", getInputs());
} // namespace Arima
} // namespace Bench
} // namespace ML
| a3528894d62aeeaaeabc33a35c38fe76c1748044.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuml/tsa/arima_common.h>
#include <cuml/tsa/batched_arima.hpp>
#include <raft/handle.hpp>
#include <raft/random/rng.cuh>
#include <raft/cudart_utils.h>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace Arima {
struct ArimaParams {
TimeSeriesParams data;
ARIMAOrder order;
};
template <typename DataT>
class ArimaLoglikelihood : public TsFixtureRandom<DataT> {
public:
ArimaLoglikelihood(const std::string& name, const ArimaParams& p)
: TsFixtureRandom<DataT>(name, p.data), order(p.order)
{
}
// Note: public function because of the __device__ lambda
void runBenchmark(::benchmark::State& state) override
{
using MLCommon::Bench::CudaEventTimer;
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto counting = thrust::make_counting_iterator(0);
// Generate random parameters
int N = order.complexity();
raft::random::Rng gpu_gen(this->params.seed, raft::random::GenPhilox);
gpu_gen.uniform(param, N * this->params.batch_size, -1.0, 1.0, stream);
// Set sigma2 parameters to 1.0
DataT* x = param; // copy the object attribute for thrust
thrust::for_each(thrust::cuda::par.on(stream),
counting,
counting + this->params.batch_size,
[=] __device__(int bid) { x[(bid + 1) * N - 1] = 1.0; });
CUDA_CHECK(cudaStreamSynchronize(stream));
// Benchmark loop
this->loopOnState(state, [this]() {
ARIMAMemory<double> arima_mem(order, this->params.batch_size, this->params.n_obs, temp_mem);
// Evaluate log-likelihood
batched_loglike(*this->handle,
arima_mem,
this->data.X,
this->params.batch_size,
this->params.n_obs,
order,
param,
loglike,
residual,
true,
false);
});
}
void allocateBuffers(const ::benchmark::State& state)
{
Fixture::allocateBuffers(state);
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto allocator = handle.get_device_allocator();
// Buffer for the model parameters
param = (DataT*)allocator->allocate(
order.complexity() * this->params.batch_size * sizeof(DataT), stream);
// Buffers for the log-likelihood and residuals
loglike = (DataT*)allocator->allocate(this->params.batch_size * sizeof(DataT), stream);
residual = (DataT*)allocator->allocate(
this->params.batch_size * this->params.n_obs * sizeof(DataT), stream);
// Temporary memory
size_t temp_buf_size =
ARIMAMemory<double>::compute_size(order, this->params.batch_size, this->params.n_obs);
temp_mem = (char*)allocator->allocate(temp_buf_size, stream);
}
void deallocateBuffers(const ::benchmark::State& state)
{
Fixture::deallocateBuffers(state);
auto& handle = *this->handle;
auto stream = handle.get_stream();
auto allocator = handle.get_device_allocator();
allocator->deallocate(
param, order.complexity() * this->params.batch_size * sizeof(DataT), stream);
allocator->deallocate(loglike, this->params.batch_size * sizeof(DataT), stream);
allocator->deallocate(
residual, this->params.batch_size * this->params.n_obs * sizeof(DataT), stream);
}
protected:
ARIMAOrder order;
DataT* param;
DataT* loglike;
DataT* residual;
char* temp_mem;
};
std::vector<ArimaParams> getInputs()
{
struct std::vector<ArimaParams> out;
ArimaParams p;
p.data.seed = 12345ULL;
std::vector<ARIMAOrder> list_order = {{1, 1, 1, 0, 0, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 4, 0},
{1, 1, 1, 1, 1, 1, 12, 0},
{1, 1, 1, 1, 1, 1, 24, 0},
{1, 1, 1, 1, 1, 1, 52, 0}};
std::vector<int> list_batch_size = {10, 100, 1000, 10000};
std::vector<int> list_n_obs = {200, 500, 1000};
for (auto& order : list_order) {
for (auto& batch_size : list_batch_size) {
for (auto& n_obs : list_n_obs) {
p.order = order;
p.data.batch_size = batch_size;
p.data.n_obs = n_obs;
out.push_back(p);
}
}
}
return out;
}
ML_BENCH_REGISTER(ArimaParams, ArimaLoglikelihood<double>, "arima", getInputs());
} // namespace Arima
} // namespace Bench
} // namespace ML
|
2d7125cc57e392562ca024daee172a787dd367cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <hip/device_functions.h>
#include <stdio.h>
#include <time.h>
#include <cstdlib>
#include <math.h>
const int BLOCK_SIZE = 32;
const int N = 1024;
const int RANDOM_NUMBER = 101;
__global__ void mulMatrixDevice(int* C, int* A, int* B, int N){
int blockX = blockIdx.x;
int blockY = blockIdx.y;
int threadX = threadIdx.x;
int threadY = threadIdx.y;
int aBegin = N * BLOCK_SIZE * blockY;
int bBegin = BLOCK_SIZE * blockX;
int aEnd = aBegin + N - 1;
int aStep = BLOCK_SIZE;
int bStep = BLOCK_SIZE * N;
int sum = 0;
for (int indexA = aBegin, indexB = bBegin; indexA <= aEnd; indexA += aStep, indexB += bStep){
__shared__ int As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE];
As[threadY][threadX] = A[indexA + N * threadY + threadX];
Bs[threadY][threadX] = B[indexB + N * threadY + threadX];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++)
sum += As[threadY][k] * Bs[k][threadX];
__syncthreads();
}
int c = N * BLOCK_SIZE * blockY + BLOCK_SIZE * blockX;
C[c + N * threadY + threadX] = sum;
}
void mulMatrixHost(int* a, int *b, int* c, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int sum = 0;
for (int k = 0; k < n; k++)
sum = sum + a[i * n + k] * b[k * n + j];
c[i * n + j] = sum;
}
}
}
int* randomMatrix(int* matrix, int n) {
srand(time(NULL));
for (int i = 0; i < n; ++i) {
matrix[i] = rand() % RANDOM_NUMBER - round(RANDOM_NUMBER/2);
}
return matrix;
}
bool checkMatrix(int* matrix1, int* matrix2, int size) {
for (int i = 0; i < size; ++i) {
if (matrix1[i] != matrix2[i])
return false;
}
return true;
}
int main(){
int matrixSize = N*N;
int* matrixA = new int[matrixSize];
int* matrixB = new int[matrixSize];
int* hostMatrixC = new int[matrixSize];
int* deviceMatrixC = new int[matrixSize];
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipMalloc((void**)&dev_a, matrixSize * sizeof(int));
hipMalloc((void**)&dev_b, matrixSize * sizeof(int));
hipMalloc((void**)&dev_c, matrixSize * sizeof(int));
randomMatrix(matrixA, matrixSize);
randomMatrix(matrixB, matrixSize);
clock_t time = clock();
mulMatrixHost(matrixA, matrixB, hostMatrixC, N);
double hostTime = double(clock() - time) * 1000 / CLOCKS_PER_SEC;
time = clock();
hipMemcpy(dev_a, matrixA, matrixSize * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, matrixB, matrixSize * sizeof(int), hipMemcpyHostToDevice);
mulMatrixDevice << <dim3(N / BLOCK_SIZE, N / BLOCK_SIZE), dim3(BLOCK_SIZE, BLOCK_SIZE) >> >(dev_c, dev_a, dev_b, N);
hipDeviceSynchronize();
hipMemcpy(deviceMatrixC, dev_c, matrixSize * sizeof(int), hipMemcpyDeviceToHost);
double deviceTime = double(clock() - time) * 1000 / CLOCKS_PER_SEC;
if (checkMatrix(hostMatrixC, deviceMatrixC, matrixSize)) {
printf("CPU: %f\n", hostTime);
printf("GPU: %f\n", deviceTime);
}
else {
printf("Matrixs not equals!!!");
}
free(matrixA);
free(matrixB);
free(deviceMatrixC);
free(hostMatrixC);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
system("pause");
return 0;
}
| 2d7125cc57e392562ca024daee172a787dd367cc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <device_functions.h>
#include <stdio.h>
#include <time.h>
#include <cstdlib>
#include <math.h>
const int BLOCK_SIZE = 32;
const int N = 1024;
const int RANDOM_NUMBER = 101;
__global__ void mulMatrixDevice(int* C, int* A, int* B, int N){
int blockX = blockIdx.x;
int blockY = blockIdx.y;
int threadX = threadIdx.x;
int threadY = threadIdx.y;
int aBegin = N * BLOCK_SIZE * blockY;
int bBegin = BLOCK_SIZE * blockX;
int aEnd = aBegin + N - 1;
int aStep = BLOCK_SIZE;
int bStep = BLOCK_SIZE * N;
int sum = 0;
for (int indexA = aBegin, indexB = bBegin; indexA <= aEnd; indexA += aStep, indexB += bStep){
__shared__ int As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE];
As[threadY][threadX] = A[indexA + N * threadY + threadX];
Bs[threadY][threadX] = B[indexB + N * threadY + threadX];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++)
sum += As[threadY][k] * Bs[k][threadX];
__syncthreads();
}
int c = N * BLOCK_SIZE * blockY + BLOCK_SIZE * blockX;
C[c + N * threadY + threadX] = sum;
}
void mulMatrixHost(int* a, int *b, int* c, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int sum = 0;
for (int k = 0; k < n; k++)
sum = sum + a[i * n + k] * b[k * n + j];
c[i * n + j] = sum;
}
}
}
int* randomMatrix(int* matrix, int n) {
srand(time(NULL));
for (int i = 0; i < n; ++i) {
matrix[i] = rand() % RANDOM_NUMBER - round(RANDOM_NUMBER/2);
}
return matrix;
}
bool checkMatrix(int* matrix1, int* matrix2, int size) {
for (int i = 0; i < size; ++i) {
if (matrix1[i] != matrix2[i])
return false;
}
return true;
}
int main(){
int matrixSize = N*N;
int* matrixA = new int[matrixSize];
int* matrixB = new int[matrixSize];
int* hostMatrixC = new int[matrixSize];
int* deviceMatrixC = new int[matrixSize];
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaMalloc((void**)&dev_a, matrixSize * sizeof(int));
cudaMalloc((void**)&dev_b, matrixSize * sizeof(int));
cudaMalloc((void**)&dev_c, matrixSize * sizeof(int));
randomMatrix(matrixA, matrixSize);
randomMatrix(matrixB, matrixSize);
clock_t time = clock();
mulMatrixHost(matrixA, matrixB, hostMatrixC, N);
double hostTime = double(clock() - time) * 1000 / CLOCKS_PER_SEC;
time = clock();
cudaMemcpy(dev_a, matrixA, matrixSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, matrixB, matrixSize * sizeof(int), cudaMemcpyHostToDevice);
mulMatrixDevice << <dim3(N / BLOCK_SIZE, N / BLOCK_SIZE), dim3(BLOCK_SIZE, BLOCK_SIZE) >> >(dev_c, dev_a, dev_b, N);
cudaDeviceSynchronize();
cudaMemcpy(deviceMatrixC, dev_c, matrixSize * sizeof(int), cudaMemcpyDeviceToHost);
double deviceTime = double(clock() - time) * 1000 / CLOCKS_PER_SEC;
if (checkMatrix(hostMatrixC, deviceMatrixC, matrixSize)) {
printf("CPU: %f\n", hostTime);
printf("GPU: %f\n", deviceTime);
}
else {
printf("Matrixs not equals!!!");
}
free(matrixA);
free(matrixB);
free(deviceMatrixC);
free(hostMatrixC);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
system("pause");
return 0;
}
|
35ab6250c099b1fdd3acb5aa3e5644962d216493.hip | // !!! This is a file automatically generated by hipify!!!
#include <jni.h>
#include <hip/hip_runtime.h>
extern "C" JNIEXPORT void JNICALL Java_ppl_delite_runtime_executor_CudaExecutionThread_initializeThread(JNIEnv* env, jobject obj, jint threadId, jint numThreads);
JNIEXPORT void JNICALL Java_ppl_delite_runtime_executor_CudaExecutionThread_initializeThread(JNIEnv* env, jobject obj, jint threadId, jint numThreads) {
//chose device num
if(hipSuccess != hipSetDevice(threadId)) {
printf("FATAL : GPU device could not be initialized. \n");
exit(1);
}
//reset the device
if(hipSuccess != hipDeviceReset()) {
printf("FATAL : hipDeviceReset() failed \n");
exit(1);
}
//set device options
if(hipSuccess != hipSetDeviceFlags(hipDeviceScheduleBlockingSync)) {
printf("FATAL : GPU device has crashed (cudaSetDviceFlags). \n");
exit(1);
}
//called to initialize the device (can take a while)
if(hipSuccess != hipDeviceSynchronize()) {
printf("FATAL : GPU device has crashed (hipDeviceSynchronize). \n");
exit(1);
}
}
| 35ab6250c099b1fdd3acb5aa3e5644962d216493.cu | #include <jni.h>
#include <cuda_runtime.h>
extern "C" JNIEXPORT void JNICALL Java_ppl_delite_runtime_executor_CudaExecutionThread_initializeThread(JNIEnv* env, jobject obj, jint threadId, jint numThreads);
JNIEXPORT void JNICALL Java_ppl_delite_runtime_executor_CudaExecutionThread_initializeThread(JNIEnv* env, jobject obj, jint threadId, jint numThreads) {
//chose device num
if(cudaSuccess != cudaSetDevice(threadId)) {
printf("FATAL : GPU device could not be initialized. \n");
exit(1);
}
//reset the device
if(cudaSuccess != cudaDeviceReset()) {
printf("FATAL : cudaDeviceReset() failed \n");
exit(1);
}
//set device options
if(cudaSuccess != cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync)) {
printf("FATAL : GPU device has crashed (cudaSetDviceFlags). \n");
exit(1);
}
//called to initialize the device (can take a while)
if(cudaSuccess != cudaThreadSynchronize()) {
printf("FATAL : GPU device has crashed (cudaThreadSynchronize). \n");
exit(1);
}
}
|
70111d43f166daf7b601b337e8edb6fc2d152306.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Device/Util/Timer.cuh>
#include "Static/KCore/CoreNumbers.cuh"
#include <fstream>
#include <chrono>
#include <roctracer/roctx.h>
#define DELETE 1
using namespace timer;
namespace hornets_nest {
KCore::KCore(HornetGraph &hornet) : // Constructor
StaticAlgorithm(hornet),
vqueue(hornet),
peel_vqueue(hornet),
active_queue(hornet),
iter_queue(hornet),
vertex_frontier(hornet),
load_balancing(hornet)
{
gpu::allocate(vertex_pres, hornet.nV()); // Creates arrays of length n for whether vertices are present, their current degree, and their color
gpu::allocate(vertex_color, hornet.nV());
gpu::allocate(vertex_deg, hornet.nV());
gpu::allocate(vertex_core_number, hornet.nV()); // Keep track of core numbers of vertices
gpu::allocate(vertex_nbhr_pointer, hornet.nV()); // Keep track of clique numbers of vertices
gpu::allocate(hd_data().src, hornet.nE()); // Allocate space for endpoints of edges and counter
gpu::allocate(hd_data().dst, hornet.nE());
gpu::allocate(hd_data().counter, 1);
gpu::allocate(edge_in_clique, hornet.nE());
gpu::allocate(vertex_nbhr_offsets, hornet.nV());
gpu::allocate(device_clique_size, 1);
}
KCore::~KCore() { // Deconstructor, frees up all GPU memory used by algorithm
gpu::free(vertex_pres);
gpu::free(vertex_color);
gpu::free(vertex_deg);
gpu::free(vertex_core_number);
gpu::free(vertex_nbhr_pointer);
gpu::free(hd_data().src);
gpu::free(hd_data().dst);
gpu::free(edge_in_clique);
gpu::free(vertex_nbhr_offsets);
gpu::free(device_clique_size);
}
// Why are we creating separate structs for all of these? is it because each struct can only have 1 operator?
// I think that these structs are defined to do things with the member data of KCore object. But why structs?
struct ActiveVertices { // Create data structure to keep track of active vertices
vid_t *vertex_pres; // What is vid_t? Is it a vertex array?
vid_t *deg;
TwoLevelQueue<vid_t> active_queue; // What is a TwoLevelQueue? I think it has something to do with load balancing...
OPERATOR(Vertex &v) { // What does OPERATOR mean? Does it just do this for everything in the struct?
vid_t id = v.id(); // I think that it simply looks at a given vertex and if it has nonzero degree, pops it in the active queue, marks it as present, and adds its degree to the degree array.
if (v.degree() > 0) {
vertex_pres[id] = 1;
active_queue.insert(id);
deg[id] = v.degree();
}
}
};
struct FixedCoreNumVertices{
uint32_t *core_number;
uint32_t curr_coreness;
TwoLevelQueue<vid_t> vertex_frontier;
OPERATOR(Vertex &v){
vid_t id = v.id();
if(core_number[id] == curr_coreness){
vertex_frontier.insert(id);
}
}
};
struct InitializeOffsets{
int *vertex_nbhr_offsets;
// vid_t *vertex_degrees;
HostDeviceVar<KCoreData> hd;
OPERATOR(Vertex &v){
uint32_t deg = v.degree();
vid_t id = v.id();
vertex_nbhr_offsets[id] = atomicAdd(hd().counter, deg);
// atomicAdd(hd().counter, deg);
}
};
// bool check_clique(Vertex &v, Vertex &u){
// #pragma omp parallel for
// bool is_clique = true;
// for (Edge::iterator i = v.edge_begin(); i != v.edge_end(); i++){
// bool found = false;
// if (WeightT *i.weight() == 1){
// vid_t id = *i.src_id();
// #pragma omp parallel for
// for (Edge::iterator j = u.edge_begin(); j != u.edge_end(); j++){
// if (*j.dst_id() == id){
// bool found = true;
// }
// }
// if (!found){
// is_clique = false;
// }
// }
// }
// return is_clique;
// }
// __device__ __forceinline__
// void workPerBlock(vid_t numVertices,
// vid_t* __restrict__ outMpStart,
// vid_t* __restrict__ outMpEnd,
// int blockSize) {
// vid_t verticesPerMp = numVertices / gridDim.x;
// vid_t remainderBlocks = numVertices % gridDim.x;
// vid_t extraVertexBlocks = (blockIdx.x > remainderBlocks) ? remainderBlocks
// : blockIdx.x;
// vid_t regularVertexBlocks = (blockIdx.x > remainderBlocks) ?
// blockIdx.x - remainderBlocks : 0;
// vid_t mpStart = (verticesPerMp + 1) * extraVertexBlocks +
// verticesPerMp * regularVertexBlocks;
// *outMpStart = mpStart;
// *outMpEnd = mpStart + verticesPerMp + (blockIdx.x < remainderBlocks);
// }
// template<typename HornetDevice>
// __global__ void GetLocalClique(
// HornetDevice hornet ,
// uint32_t *core_number,
// int *vertex_nbhr_offsets,
// bool *edge_in_clique,
// uint32_t max_clique_size,
// int N){
// int k = threadIdx.x + blockIdx.x *blockDim.x;
// if (k>=N) return;
// for
// }
// run(){
// const int BLOCK_SIZE = 256;
// int blocks = (elements)/BLOCK_SIZE + (((elements)%BLOCK_SIZE)?1:0);
// GetLocalClique<<<blocks,BLOCK_SIZE>>>(hornet.device_side(),...,elements);
// }
struct GetPointersAndDegrees{
vid_t **vertex_nbhr_pointer;
vid_t *deg;
OPERATOR(Vertex &v){
vid_t id = v.id();
vertex_nbhr_pointer[id] = v.neighbor_ptr();
deg[id] = v.degree();
}
};
struct GetLocalClique{
uint32_t *core_number;
int *vertex_nbhr_offsets;
bool *edge_in_clique;
vid_t **vertex_nbhr_pointer;
vid_t *deg;
uint32_t *device_clique_size;
OPERATOR(Vertex &v){
// construct std::set of neighbors of current vertex
/* I want a vertex properties that include:
whether a vertex was visited on current sweep and
whether it's in the max clique
*/
uint32_t curr_size = 1;
// Make sure vertex has coreness >= max_clique_size before inserting
vid_t* vNeighPtr = v.neighbor_ptr();
vid_t v_id = v.id();
vid_t length_v = deg[v_id];
int offset = vertex_nbhr_offsets[v_id];
for (vid_t i = 0; i < length_v; i++){
vid_t u_id = vNeighPtr[i];
// Get nbhr info for u
vid_t* uNeighPtr = vertex_nbhr_pointer[u_id];
vid_t length_u = deg[u_id];
// Ignore this vertex if core number is too low
// if (length_u < (vid_t)device_clique_size) continue;
// Loop through neibhbors of v currently in clique and check to see if also nbhrs of u
// #pragma omp parallel for
bool is_clique = true;
bool found = false;
for (vid_t j = 0; j < length_v; j++){
found = false;
if (edge_in_clique[offset - length_v + j]){
vid_t w_id = vNeighPtr[j];
// if (deg[w_id] < (vid_t)device_clique_size) continue;
// #pragma omp parallel for
for (vid_t k = 0; k < length_u; k++){
if (uNeighPtr[k] == w_id){
found = true;
break;
}
}
if (!found){
is_clique = false;
break;
}
}
}
// Check if nbhrs with coreness >= max_clique_size are part of a clique
// If so, increment clique size
if (is_clique){
edge_in_clique[offset + i - length_v] = true;
curr_size += 1;
// printf("Adding vertex to clique \n");
atomicMax(device_clique_size, curr_size);
// if (v_id < 100000) printf("Vertex added!\n");
}
}
}
};
struct PeelVertices { // Data structure to keep track of vertices to peel off
vid_t *vertex_pres;
vid_t *deg;
uint32_t peel;
TwoLevelQueue<vid_t> peel_queue;
TwoLevelQueue<vid_t> iter_queue;
OPERATOR(Vertex &v) { // Mark present vertices with insufficicnt degree for peeling
vid_t id = v.id();
if (vertex_pres[id] == 1 && deg[id] <= peel) {
vertex_pres[id] = 2;
peel_queue.insert(id);
iter_queue.insert(id);
}
}
};
struct CoreRemoveVertices { // Data structure to keep track of vertices to peel off
vid_t *vertex_pres;
uint32_t *core_number;
uint32_t peel;
OPERATOR(Vertex &v) { // Mark present vertices with insufficicnt degree for peeling
vid_t id = v.id();
if (vertex_pres[id] >= 1 && core_number[id] < peel) {
vertex_pres[id] = 0;
}
}
};
struct RemovePres { // Struct to remove vertices marked by PeelVertices
vid_t *vertex_pres;
OPERATOR(Vertex &v) {
vid_t id = v.id();
if (vertex_pres[id] == 2) {
vertex_pres[id] = 0;
}
}
};
struct DecrementDegree { // Struct to decrement degrees of every vertex attached to a removed vertex
vid_t *deg;
OPERATOR(Vertex &v, Edge &e) {
vid_t src = v.id();
vid_t dst = e.dst_id();
atomicAdd(°[src], -1);
atomicAdd(°[dst], -1);
}
};
struct UpdateCoreNumber{ // Update the core number of each vertex peeled off in current iteration
uint32_t *core_number;
vid_t *vertex_pres;
uint32_t peel;
OPERATOR(Vertex &v){
vid_t id = v.id();
if (vertex_pres[id] == 2){
core_number[id] = peel;
}
}
};
struct ExtractSubgraph { // Struct to extract subgraph of vertices that get peeled off? (Why not the other way around?)
HostDeviceVar<KCoreData> hd;
vid_t *vertex_pres;
OPERATOR(Vertex &v, Edge &e) {
vid_t src = v.id();
vid_t dst = e.dst_id();
if (vertex_pres[src] == 2 && vertex_pres[dst] == 2) {
int spot = atomicAdd(hd().counter, 1); // What do atomic operations do?
hd().src[spot] = src; // We do still keep the vertex numbers of the marked vertices
hd().dst[spot] = dst;
}
}
};
struct GetDegOne { // Mark all vertices of degree 1
TwoLevelQueue<vid_t> vqueue;
vid_t *vertex_color;
OPERATOR(Vertex &v) {
vid_t id = v.id();
if (v.degree() == 1) {
vqueue.insert(id);
vertex_color[id] = 1;
}
}
};
struct DegOneEdges {
HostDeviceVar<KCoreData> hd;
vid_t *vertex_color;
OPERATOR(Vertex &v, Edge &e) { // I think that this edge should be coming from the given vertex
vid_t src = v.id();
vid_t dst = e.dst_id();
if (vertex_color[src] || vertex_color[dst]) { // If one of the endpoints has degree 1, add to subgraph hd
int spot = atomicAdd(hd().counter, 1);
hd().src[spot] = src;
hd().dst[spot] = dst;
if (!vertex_color[src] || !vertex_color[dst]) { // Get different subgraph for ones for which both endpoints are not degree 1
int spot_rev = atomicAdd(hd().counter, 1);
hd().src[spot_rev] = dst;
hd().dst[spot_rev] = src;
}
}
}
};
struct SmallCoreEdges {
vid_t *vertex_pres;
HostDeviceVar<KCoreData> hd;
OPERATOR(Vertex &v, Edge &e){
vid_t src = v.id();
vid_t dst = e.dst_id();
if (!vertex_pres[src] || !vertex_pres[dst]){
int spot = atomicAdd(hd().counter, 1);
hd().src[spot] = src;
hd().dst[spot] = dst;
if (vertex_pres[src] || vertex_pres[dst]){
int spot_rev = atomicAdd(hd().counter, 1);
hd().src[spot_rev] = dst;
hd().dst[spot_rev] = src;
}
}
}
};
struct ResetWeight { // Reset edge weight to 0 after finding current iteration of cliques
OPERATOR(Vertex &v, Edge &e){
e.set_weight(0);
}
};
void KCore::reset() { // What does this do?
vqueue.swap();
peel_vqueue.swap();
active_queue.swap();
iter_queue.swap();
}
// Delete all edges in given batch
void oper_bidirect_batch(HornetGraph &hornet, vid_t *src, vid_t *dst,
int size, uint8_t op) {
gpu::BatchUpdate batch_update(src, dst, size, gpu::BatchType::DEVICE); //What does specifying this GPU BatchUpdate object do?
// Delete edges in the forward and backward directions.
hornet.deleteEdgeBatch(batch_update, gpu::batch_property::IN_PLACE); // What do you mean "in forward and backward directions?"
}
void get_core_numbers(HornetGraph &hornet,
TwoLevelQueue<vid_t> &peel_queue,
TwoLevelQueue<vid_t> &active_queue,
TwoLevelQueue<vid_t> &iter_queue,
load_balancing::VertexBased1 load_balancing,
vid_t *deg,
vid_t *vertex_pres,
uint32_t *core_number,
uint32_t *max_peel){
forAllVertices(hornet, ActiveVertices { vertex_pres, deg, active_queue }); // Get active vertices in parallel (puts in input queue)
active_queue.swap(); // Swap input to output queue
int n_active = active_queue.size();
uint32_t peel = 0;
while (n_active > 0) {
// Why do we use a particular queue in forAllVertices? Does it go through all vertices in this queue?
forAllVertices(hornet, active_queue,
PeelVertices { vertex_pres, deg, peel, peel_queue, iter_queue} );
iter_queue.swap();
n_active -= iter_queue.size();
if (iter_queue.size() == 0) {
peel++; // Once we have removed all vertices with core <= current peel, increment peel
peel_queue.swap();
// Shouldn't this be the peel_queue? If not, why?
// Would this be faster if it were peel_queue?
forAllVertices(hornet, peel_queue, UpdateCoreNumber { core_number, vertex_pres, peel });
forAllVertices(hornet, peel_queue, RemovePres { vertex_pres }); // Why do we never update the active queue? Does this modify its data in some way?
} else {
forAllEdges(hornet, iter_queue, DecrementDegree { deg }, load_balancing); // Go through vertices in iter_queue and decrement the degree of their nbhrs
}
}
*max_peel = peel;
}
void max_clique_heuristic(HornetGraph &hornet,
HostDeviceVar<KCoreData>& hd,
TwoLevelQueue<vid_t> &vertex_frontier,
load_balancing::VertexBased1 load_balancing,
vid_t *vertex_pres,
uint32_t *core_number,
int *vertex_nbhr_offsets,
vid_t **vertex_nbhr_pointer,
vid_t *vertex_degree,
bool *edge_in_clique,
uint32_t *device_clique_size,
uint32_t *max_clique_size,
uint32_t *peel,
int *batch_size){
// // uint32_t curr_peel = *peel;
uint32_t clique_size = 0;
clique_size++;
// while (vertex_frontier.size() == 0){
// std::cout << "Peel: " << *peel << std::endl;
forAllVertices(hornet, FixedCoreNumVertices{ core_number, *peel, vertex_frontier });
// std::cout << "Vertex Frontier Size before swap: " << vertex_frontier.size() << std::endl;
vertex_frontier.swap();
// std::cout << "Vertex Frontier Size after swap: " << vertex_frontier.size() << std::endl;
if (vertex_frontier.size() > 0) {
// Get clique numbers of vertices of frontier core number
forAllVertices(hornet, vertex_frontier, GetLocalClique { core_number, vertex_nbhr_offsets, edge_in_clique, vertex_nbhr_pointer, vertex_degree, device_clique_size });
// Remove vertices without sufficiently high core number
// uint32_t *curr_max = clique_size;
forAllVertices(hornet, CoreRemoveVertices { vertex_pres, core_number, clique_size });
forAllEdges(hornet, SmallCoreEdges { vertex_pres, hd }, load_balancing);
}
// peel--;
// }
int size = 0;
hipMemcpy(&size, hd().counter, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&clique_size, hd().counter, sizeof(int), hipMemcpyDeviceToHost);
*batch_size = size;
*max_clique_size = clique_size;
// std::cout << "Max Clique Found: " << max_clique_size << std::endl;
}
void KCore::run() {
omp_set_num_threads(72);
vid_t *src = new vid_t[hornet.nE()];
vid_t *dst = new vid_t[hornet.nE()];
uint32_t len = hornet.nE() / 2 + 1;
// uint32_t peel = new uint32_t;
uint32_t ne = hornet.nE();
std::cout << "ne: " << ne << std::endl;
// uint32_t max_clique_size = new uint32_t;
auto pres = vertex_pres;
auto deg = vertex_deg;
auto color = vertex_color;
auto core_number = vertex_core_number;
auto offsets = vertex_nbhr_offsets;
vid_t** nbhr_pointer = vertex_nbhr_pointer;
auto clique_edges = edge_in_clique;
uint32_t* temp_clique_size = device_clique_size;
gpu::memsetZero(temp_clique_size);
// What does this do?
forAllnumV(hornet, [=] __device__ (int i){ pres[i] = 0; } );
forAllnumV(hornet, [=] __device__ (int i){ deg[i] = 0; } );
forAllnumV(hornet, [=] __device__ (int i){ color[i] = 0; } );
forAllnumV(hornet, [=] __device__ (int i){ offsets[i] = 0; } );
// forAllnumV(hornet, [=] __device__ (int* i){ nbhr_pointer[i] = 0; } );
forAllnumE(hornet, [=] __device__ (int i){ clique_edges[i] = false; } );
Timer<DEVICE> TM;
Timer<DEVICE> Tclique;
TM.start();
forAllVertices(hornet, InitializeOffsets { offsets, hd_data });
/* Begin degree 1 vertex preprocessing optimization */
// Find vertices of degree 1.
forAllVertices(hornet, GetDegOne { vqueue, vertex_color });
vqueue.swap();
gpu::memsetZero(hd_data().counter);
// Find the edges incident to these vertices.
gpu::memsetZero(hd_data().counter); // reset counter.
forAllEdges(hornet, vqueue,
DegOneEdges { hd_data, vertex_color }, load_balancing);
// Mark edges with peel 1.
int peel_one_count = 0;
hipMemcpy(&peel_one_count, hd_data().counter, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(src, hd_data().src, hornet.nV() * sizeof(vid_t),
hipMemcpyDeviceToHost);
hipMemcpy(dst, hd_data().dst, hornet.nV() * sizeof(vid_t),
hipMemcpyDeviceToHost);
// Delete peel 1 edges.
oper_bidirect_batch(hornet, hd_data().src, hd_data().dst, peel_one_count, DELETE);
// Get vertex core numbers
uint32_t peel = 0;
uint32_t max_clique_size = 0;
max_clique_size++;
// uint32_t temp_clique_size;
get_core_numbers(hornet, peel_vqueue, active_queue, iter_queue,
load_balancing, deg, vertex_pres, core_number, &peel);
gpu::memsetZero(hd_data().counter);
TM.stop();
TM.print("CoreNumbers");
// // Get active vertices (with clique number > 0)
// forAllVertices(hornet, CoreActiveVertices { vertex_pres, core_number, active_queue }); // Get active vertices in parallel (puts in input queue)
// active_queue.swap(); // Swap input to output queue
// int n_active = active_queue.size();
Tclique.start();
// Begin actual clique heuristic algorithm
int iter = 1;
while (peel >= max_clique_size) {
int batch_size = 0;
// std::cout << "Vertex Pointers Not Initialized Yet" << std::endl;
forAllVertices(hornet, GetPointersAndDegrees { nbhr_pointer, deg });
// forAllVertices(hornet, InitializeOffsets { offsets, hd_data });
// std::cout << "Initialized Vertex Pointers" << std::endl;
// auto start = std::chrono::high_resolution_clock::now();
max_clique_heuristic(hornet, hd_data, vertex_frontier, load_balancing,
vertex_pres, vertex_core_number, offsets, nbhr_pointer,
deg, clique_edges, temp_clique_size, &max_clique_size, &peel, &batch_size);
// auto finish = std::chrono::high_resolution_clock::now();
// std::chrono::duration<double> elapsed = finish - start;
// std::cout << "Iteration " << iter << ": " << elapsed.count() << "s. \n";
hipMemcpy(&max_clique_size, temp_clique_size, sizeof(int), hipMemcpyDeviceToHost);
// // atomicMax(&max_clique_size, temp_clique_size);
// if (temp_clique_size > max_clique_size) {
// max_clique_size = temp_clique_size;
// }
// std::cout << "CurrentMaxClique: " << max_clique_size << "\n";
if (batch_size > 0){
oper_bidirect_batch(hornet, hd_data().src, hd_data().dst, batch_size, DELETE);
gpu::memsetZero(hd_data().counter);
}
peel--;
iter++;
}
Tclique.stop();
Tclique.print("CliqueHeuristic");
std::cout << "MaxCliqueFound: " << max_clique_size << std::endl;
}
void KCore::release() {
gpu::free(vertex_pres);
gpu::free(vertex_color);
gpu::free(vertex_deg);
gpu::free(vertex_core_number);
// gpu::free(vertex_clique_number);
gpu::free(hd_data().src);
gpu::free(hd_data().dst);
hd_data().src = nullptr;
hd_data().dst = nullptr;
}
}
| 70111d43f166daf7b601b337e8edb6fc2d152306.cu | #include <Device/Util/Timer.cuh>
#include "Static/KCore/CoreNumbers.cuh"
#include <fstream>
#include <chrono>
#include <nvToolsExt.h>
#define DELETE 1
using namespace timer;
namespace hornets_nest {
KCore::KCore(HornetGraph &hornet) : // Constructor
StaticAlgorithm(hornet),
vqueue(hornet),
peel_vqueue(hornet),
active_queue(hornet),
iter_queue(hornet),
vertex_frontier(hornet),
load_balancing(hornet)
{
gpu::allocate(vertex_pres, hornet.nV()); // Creates arrays of length n for whether vertices are present, their current degree, and their color
gpu::allocate(vertex_color, hornet.nV());
gpu::allocate(vertex_deg, hornet.nV());
gpu::allocate(vertex_core_number, hornet.nV()); // Keep track of core numbers of vertices
gpu::allocate(vertex_nbhr_pointer, hornet.nV()); // Keep track of clique numbers of vertices
gpu::allocate(hd_data().src, hornet.nE()); // Allocate space for endpoints of edges and counter
gpu::allocate(hd_data().dst, hornet.nE());
gpu::allocate(hd_data().counter, 1);
gpu::allocate(edge_in_clique, hornet.nE());
gpu::allocate(vertex_nbhr_offsets, hornet.nV());
gpu::allocate(device_clique_size, 1);
}
KCore::~KCore() { // Deconstructor, frees up all GPU memory used by algorithm
gpu::free(vertex_pres);
gpu::free(vertex_color);
gpu::free(vertex_deg);
gpu::free(vertex_core_number);
gpu::free(vertex_nbhr_pointer);
gpu::free(hd_data().src);
gpu::free(hd_data().dst);
gpu::free(edge_in_clique);
gpu::free(vertex_nbhr_offsets);
gpu::free(device_clique_size);
}
// Why are we creating separate structs for all of these? is it because each struct can only have 1 operator?
// I think that these structs are defined to do things with the member data of KCore object. But why structs?
struct ActiveVertices { // Create data structure to keep track of active vertices
vid_t *vertex_pres; // What is vid_t? Is it a vertex array?
vid_t *deg;
TwoLevelQueue<vid_t> active_queue; // What is a TwoLevelQueue? I think it has something to do with load balancing...
OPERATOR(Vertex &v) { // What does OPERATOR mean? Does it just do this for everything in the struct?
vid_t id = v.id(); // I think that it simply looks at a given vertex and if it has nonzero degree, pops it in the active queue, marks it as present, and adds its degree to the degree array.
if (v.degree() > 0) {
vertex_pres[id] = 1;
active_queue.insert(id);
deg[id] = v.degree();
}
}
};
struct FixedCoreNumVertices{
uint32_t *core_number;
uint32_t curr_coreness;
TwoLevelQueue<vid_t> vertex_frontier;
OPERATOR(Vertex &v){
vid_t id = v.id();
if(core_number[id] == curr_coreness){
vertex_frontier.insert(id);
}
}
};
struct InitializeOffsets{
int *vertex_nbhr_offsets;
// vid_t *vertex_degrees;
HostDeviceVar<KCoreData> hd;
OPERATOR(Vertex &v){
uint32_t deg = v.degree();
vid_t id = v.id();
vertex_nbhr_offsets[id] = atomicAdd(hd().counter, deg);
// atomicAdd(hd().counter, deg);
}
};
// bool check_clique(Vertex &v, Vertex &u){
// #pragma omp parallel for
// bool is_clique = true;
// for (Edge::iterator i = v.edge_begin(); i != v.edge_end(); i++){
// bool found = false;
// if (WeightT *i.weight() == 1){
// vid_t id = *i.src_id();
// #pragma omp parallel for
// for (Edge::iterator j = u.edge_begin(); j != u.edge_end(); j++){
// if (*j.dst_id() == id){
// bool found = true;
// }
// }
// if (!found){
// is_clique = false;
// }
// }
// }
// return is_clique;
// }
// __device__ __forceinline__
// void workPerBlock(vid_t numVertices,
// vid_t* __restrict__ outMpStart,
// vid_t* __restrict__ outMpEnd,
// int blockSize) {
// vid_t verticesPerMp = numVertices / gridDim.x;
// vid_t remainderBlocks = numVertices % gridDim.x;
// vid_t extraVertexBlocks = (blockIdx.x > remainderBlocks) ? remainderBlocks
// : blockIdx.x;
// vid_t regularVertexBlocks = (blockIdx.x > remainderBlocks) ?
// blockIdx.x - remainderBlocks : 0;
// vid_t mpStart = (verticesPerMp + 1) * extraVertexBlocks +
// verticesPerMp * regularVertexBlocks;
// *outMpStart = mpStart;
// *outMpEnd = mpStart + verticesPerMp + (blockIdx.x < remainderBlocks);
// }
// template<typename HornetDevice>
// __global__ void GetLocalClique(
// HornetDevice hornet ,
// uint32_t *core_number,
// int *vertex_nbhr_offsets,
// bool *edge_in_clique,
// uint32_t max_clique_size,
// int N){
// int k = threadIdx.x + blockIdx.x *blockDim.x;
// if (k>=N) return;
// for
// }
// run(){
// const int BLOCK_SIZE = 256;
// int blocks = (elements)/BLOCK_SIZE + (((elements)%BLOCK_SIZE)?1:0);
// GetLocalClique<<<blocks,BLOCK_SIZE>>>(hornet.device_side(),...,elements);
// }
struct GetPointersAndDegrees{
vid_t **vertex_nbhr_pointer;
vid_t *deg;
OPERATOR(Vertex &v){
vid_t id = v.id();
vertex_nbhr_pointer[id] = v.neighbor_ptr();
deg[id] = v.degree();
}
};
struct GetLocalClique{
uint32_t *core_number;
int *vertex_nbhr_offsets;
bool *edge_in_clique;
vid_t **vertex_nbhr_pointer;
vid_t *deg;
uint32_t *device_clique_size;
OPERATOR(Vertex &v){
// construct std::set of neighbors of current vertex
/* I want a vertex properties that include:
whether a vertex was visited on current sweep and
whether it's in the max clique
*/
uint32_t curr_size = 1;
// Make sure vertex has coreness >= max_clique_size before inserting
vid_t* vNeighPtr = v.neighbor_ptr();
vid_t v_id = v.id();
vid_t length_v = deg[v_id];
int offset = vertex_nbhr_offsets[v_id];
for (vid_t i = 0; i < length_v; i++){
vid_t u_id = vNeighPtr[i];
// Get nbhr info for u
vid_t* uNeighPtr = vertex_nbhr_pointer[u_id];
vid_t length_u = deg[u_id];
// Ignore this vertex if core number is too low
// if (length_u < (vid_t)device_clique_size) continue;
// Loop through neibhbors of v currently in clique and check to see if also nbhrs of u
// #pragma omp parallel for
bool is_clique = true;
bool found = false;
for (vid_t j = 0; j < length_v; j++){
found = false;
if (edge_in_clique[offset - length_v + j]){
vid_t w_id = vNeighPtr[j];
// if (deg[w_id] < (vid_t)device_clique_size) continue;
// #pragma omp parallel for
for (vid_t k = 0; k < length_u; k++){
if (uNeighPtr[k] == w_id){
found = true;
break;
}
}
if (!found){
is_clique = false;
break;
}
}
}
// Check if nbhrs with coreness >= max_clique_size are part of a clique
// If so, increment clique size
if (is_clique){
edge_in_clique[offset + i - length_v] = true;
curr_size += 1;
// printf("Adding vertex to clique \n");
atomicMax(device_clique_size, curr_size);
// if (v_id < 100000) printf("Vertex added!\n");
}
}
}
};
struct PeelVertices { // Data structure to keep track of vertices to peel off
vid_t *vertex_pres;
vid_t *deg;
uint32_t peel;
TwoLevelQueue<vid_t> peel_queue;
TwoLevelQueue<vid_t> iter_queue;
OPERATOR(Vertex &v) { // Mark present vertices with insufficicnt degree for peeling
vid_t id = v.id();
if (vertex_pres[id] == 1 && deg[id] <= peel) {
vertex_pres[id] = 2;
peel_queue.insert(id);
iter_queue.insert(id);
}
}
};
struct CoreRemoveVertices { // Data structure to keep track of vertices to peel off
vid_t *vertex_pres;
uint32_t *core_number;
uint32_t peel;
OPERATOR(Vertex &v) { // Mark present vertices with insufficicnt degree for peeling
vid_t id = v.id();
if (vertex_pres[id] >= 1 && core_number[id] < peel) {
vertex_pres[id] = 0;
}
}
};
struct RemovePres { // Struct to remove vertices marked by PeelVertices
vid_t *vertex_pres;
OPERATOR(Vertex &v) {
vid_t id = v.id();
if (vertex_pres[id] == 2) {
vertex_pres[id] = 0;
}
}
};
struct DecrementDegree { // Struct to decrement degrees of every vertex attached to a removed vertex
vid_t *deg;
OPERATOR(Vertex &v, Edge &e) {
vid_t src = v.id();
vid_t dst = e.dst_id();
atomicAdd(°[src], -1);
atomicAdd(°[dst], -1);
}
};
struct UpdateCoreNumber{ // Update the core number of each vertex peeled off in current iteration
uint32_t *core_number;
vid_t *vertex_pres;
uint32_t peel;
OPERATOR(Vertex &v){
vid_t id = v.id();
if (vertex_pres[id] == 2){
core_number[id] = peel;
}
}
};
struct ExtractSubgraph { // Struct to extract subgraph of vertices that get peeled off? (Why not the other way around?)
HostDeviceVar<KCoreData> hd;
vid_t *vertex_pres;
OPERATOR(Vertex &v, Edge &e) {
vid_t src = v.id();
vid_t dst = e.dst_id();
if (vertex_pres[src] == 2 && vertex_pres[dst] == 2) {
int spot = atomicAdd(hd().counter, 1); // What do atomic operations do?
hd().src[spot] = src; // We do still keep the vertex numbers of the marked vertices
hd().dst[spot] = dst;
}
}
};
struct GetDegOne { // Mark all vertices of degree 1
TwoLevelQueue<vid_t> vqueue;
vid_t *vertex_color;
OPERATOR(Vertex &v) {
vid_t id = v.id();
if (v.degree() == 1) {
vqueue.insert(id);
vertex_color[id] = 1;
}
}
};
struct DegOneEdges {
HostDeviceVar<KCoreData> hd;
vid_t *vertex_color;
OPERATOR(Vertex &v, Edge &e) { // I think that this edge should be coming from the given vertex
vid_t src = v.id();
vid_t dst = e.dst_id();
if (vertex_color[src] || vertex_color[dst]) { // If one of the endpoints has degree 1, add to subgraph hd
int spot = atomicAdd(hd().counter, 1);
hd().src[spot] = src;
hd().dst[spot] = dst;
if (!vertex_color[src] || !vertex_color[dst]) { // Get different subgraph for ones for which both endpoints are not degree 1
int spot_rev = atomicAdd(hd().counter, 1);
hd().src[spot_rev] = dst;
hd().dst[spot_rev] = src;
}
}
}
};
struct SmallCoreEdges {
vid_t *vertex_pres;
HostDeviceVar<KCoreData> hd;
OPERATOR(Vertex &v, Edge &e){
vid_t src = v.id();
vid_t dst = e.dst_id();
if (!vertex_pres[src] || !vertex_pres[dst]){
int spot = atomicAdd(hd().counter, 1);
hd().src[spot] = src;
hd().dst[spot] = dst;
if (vertex_pres[src] || vertex_pres[dst]){
int spot_rev = atomicAdd(hd().counter, 1);
hd().src[spot_rev] = dst;
hd().dst[spot_rev] = src;
}
}
}
};
struct ResetWeight { // Reset edge weight to 0 after finding current iteration of cliques
OPERATOR(Vertex &v, Edge &e){
e.set_weight(0);
}
};
void KCore::reset() { // What does this do?
vqueue.swap();
peel_vqueue.swap();
active_queue.swap();
iter_queue.swap();
}
// Delete all edges in given batch
void oper_bidirect_batch(HornetGraph &hornet, vid_t *src, vid_t *dst,
int size, uint8_t op) {
gpu::BatchUpdate batch_update(src, dst, size, gpu::BatchType::DEVICE); //What does specifying this GPU BatchUpdate object do?
// Delete edges in the forward and backward directions.
hornet.deleteEdgeBatch(batch_update, gpu::batch_property::IN_PLACE); // What do you mean "in forward and backward directions?"
}
void get_core_numbers(HornetGraph &hornet,
TwoLevelQueue<vid_t> &peel_queue,
TwoLevelQueue<vid_t> &active_queue,
TwoLevelQueue<vid_t> &iter_queue,
load_balancing::VertexBased1 load_balancing,
vid_t *deg,
vid_t *vertex_pres,
uint32_t *core_number,
uint32_t *max_peel){
forAllVertices(hornet, ActiveVertices { vertex_pres, deg, active_queue }); // Get active vertices in parallel (puts in input queue)
active_queue.swap(); // Swap input to output queue
int n_active = active_queue.size();
uint32_t peel = 0;
while (n_active > 0) {
// Why do we use a particular queue in forAllVertices? Does it go through all vertices in this queue?
forAllVertices(hornet, active_queue,
PeelVertices { vertex_pres, deg, peel, peel_queue, iter_queue} );
iter_queue.swap();
n_active -= iter_queue.size();
if (iter_queue.size() == 0) {
peel++; // Once we have removed all vertices with core <= current peel, increment peel
peel_queue.swap();
// Shouldn't this be the peel_queue? If not, why?
// Would this be faster if it were peel_queue?
forAllVertices(hornet, peel_queue, UpdateCoreNumber { core_number, vertex_pres, peel });
forAllVertices(hornet, peel_queue, RemovePres { vertex_pres }); // Why do we never update the active queue? Does this modify its data in some way?
} else {
forAllEdges(hornet, iter_queue, DecrementDegree { deg }, load_balancing); // Go through vertices in iter_queue and decrement the degree of their nbhrs
}
}
*max_peel = peel;
}
void max_clique_heuristic(HornetGraph &hornet,
HostDeviceVar<KCoreData>& hd,
TwoLevelQueue<vid_t> &vertex_frontier,
load_balancing::VertexBased1 load_balancing,
vid_t *vertex_pres,
uint32_t *core_number,
int *vertex_nbhr_offsets,
vid_t **vertex_nbhr_pointer,
vid_t *vertex_degree,
bool *edge_in_clique,
uint32_t *device_clique_size,
uint32_t *max_clique_size,
uint32_t *peel,
int *batch_size){
// // uint32_t curr_peel = *peel;
uint32_t clique_size = 0;
clique_size++;
// while (vertex_frontier.size() == 0){
// std::cout << "Peel: " << *peel << std::endl;
forAllVertices(hornet, FixedCoreNumVertices{ core_number, *peel, vertex_frontier });
// std::cout << "Vertex Frontier Size before swap: " << vertex_frontier.size() << std::endl;
vertex_frontier.swap();
// std::cout << "Vertex Frontier Size after swap: " << vertex_frontier.size() << std::endl;
if (vertex_frontier.size() > 0) {
// Get clique numbers of vertices of frontier core number
forAllVertices(hornet, vertex_frontier, GetLocalClique { core_number, vertex_nbhr_offsets, edge_in_clique, vertex_nbhr_pointer, vertex_degree, device_clique_size });
// Remove vertices without sufficiently high core number
// uint32_t *curr_max = clique_size;
forAllVertices(hornet, CoreRemoveVertices { vertex_pres, core_number, clique_size });
forAllEdges(hornet, SmallCoreEdges { vertex_pres, hd }, load_balancing);
}
// peel--;
// }
int size = 0;
cudaMemcpy(&size, hd().counter, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&clique_size, hd().counter, sizeof(int), cudaMemcpyDeviceToHost);
*batch_size = size;
*max_clique_size = clique_size;
// std::cout << "Max Clique Found: " << max_clique_size << std::endl;
}
void KCore::run() {
omp_set_num_threads(72);
vid_t *src = new vid_t[hornet.nE()];
vid_t *dst = new vid_t[hornet.nE()];
uint32_t len = hornet.nE() / 2 + 1;
// uint32_t peel = new uint32_t;
uint32_t ne = hornet.nE();
std::cout << "ne: " << ne << std::endl;
// uint32_t max_clique_size = new uint32_t;
auto pres = vertex_pres;
auto deg = vertex_deg;
auto color = vertex_color;
auto core_number = vertex_core_number;
auto offsets = vertex_nbhr_offsets;
vid_t** nbhr_pointer = vertex_nbhr_pointer;
auto clique_edges = edge_in_clique;
uint32_t* temp_clique_size = device_clique_size;
gpu::memsetZero(temp_clique_size);
// What does this do?
forAllnumV(hornet, [=] __device__ (int i){ pres[i] = 0; } );
forAllnumV(hornet, [=] __device__ (int i){ deg[i] = 0; } );
forAllnumV(hornet, [=] __device__ (int i){ color[i] = 0; } );
forAllnumV(hornet, [=] __device__ (int i){ offsets[i] = 0; } );
// forAllnumV(hornet, [=] __device__ (int* i){ nbhr_pointer[i] = 0; } );
forAllnumE(hornet, [=] __device__ (int i){ clique_edges[i] = false; } );
Timer<DEVICE> TM;
Timer<DEVICE> Tclique;
TM.start();
forAllVertices(hornet, InitializeOffsets { offsets, hd_data });
/* Begin degree 1 vertex preprocessing optimization */
// Find vertices of degree 1.
forAllVertices(hornet, GetDegOne { vqueue, vertex_color });
vqueue.swap();
gpu::memsetZero(hd_data().counter);
// Find the edges incident to these vertices.
gpu::memsetZero(hd_data().counter); // reset counter.
forAllEdges(hornet, vqueue,
DegOneEdges { hd_data, vertex_color }, load_balancing);
// Mark edges with peel 1.
int peel_one_count = 0;
cudaMemcpy(&peel_one_count, hd_data().counter, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(src, hd_data().src, hornet.nV() * sizeof(vid_t),
cudaMemcpyDeviceToHost);
cudaMemcpy(dst, hd_data().dst, hornet.nV() * sizeof(vid_t),
cudaMemcpyDeviceToHost);
// Delete peel 1 edges.
oper_bidirect_batch(hornet, hd_data().src, hd_data().dst, peel_one_count, DELETE);
// Get vertex core numbers
uint32_t peel = 0;
uint32_t max_clique_size = 0;
max_clique_size++;
// uint32_t temp_clique_size;
get_core_numbers(hornet, peel_vqueue, active_queue, iter_queue,
load_balancing, deg, vertex_pres, core_number, &peel);
gpu::memsetZero(hd_data().counter);
TM.stop();
TM.print("CoreNumbers");
// // Get active vertices (with clique number > 0)
// forAllVertices(hornet, CoreActiveVertices { vertex_pres, core_number, active_queue }); // Get active vertices in parallel (puts in input queue)
// active_queue.swap(); // Swap input to output queue
// int n_active = active_queue.size();
Tclique.start();
// Begin actual clique heuristic algorithm
int iter = 1;
while (peel >= max_clique_size) {
int batch_size = 0;
// std::cout << "Vertex Pointers Not Initialized Yet" << std::endl;
forAllVertices(hornet, GetPointersAndDegrees { nbhr_pointer, deg });
// forAllVertices(hornet, InitializeOffsets { offsets, hd_data });
// std::cout << "Initialized Vertex Pointers" << std::endl;
// auto start = std::chrono::high_resolution_clock::now();
max_clique_heuristic(hornet, hd_data, vertex_frontier, load_balancing,
vertex_pres, vertex_core_number, offsets, nbhr_pointer,
deg, clique_edges, temp_clique_size, &max_clique_size, &peel, &batch_size);
// auto finish = std::chrono::high_resolution_clock::now();
// std::chrono::duration<double> elapsed = finish - start;
// std::cout << "Iteration " << iter << ": " << elapsed.count() << "s. \n";
cudaMemcpy(&max_clique_size, temp_clique_size, sizeof(int), cudaMemcpyDeviceToHost);
// // atomicMax(&max_clique_size, temp_clique_size);
// if (temp_clique_size > max_clique_size) {
// max_clique_size = temp_clique_size;
// }
// std::cout << "CurrentMaxClique: " << max_clique_size << "\n";
if (batch_size > 0){
oper_bidirect_batch(hornet, hd_data().src, hd_data().dst, batch_size, DELETE);
gpu::memsetZero(hd_data().counter);
}
peel--;
iter++;
}
Tclique.stop();
Tclique.print("CliqueHeuristic");
std::cout << "MaxCliqueFound: " << max_clique_size << std::endl;
}
void KCore::release() {
gpu::free(vertex_pres);
gpu::free(vertex_color);
gpu::free(vertex_deg);
gpu::free(vertex_core_number);
// gpu::free(vertex_clique_number);
gpu::free(hd_data().src);
gpu::free(hd_data().dst);
hd_data().src = nullptr;
hd_data().dst = nullptr;
}
}
|
9737c888d90d8be0963564d8943b5c8b6df93a34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
#include "NeighborListGPUBinned.cuh"
#include "TextureTools.h"
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for O(N) neighbor list generation on the GPU
*/
//! Texture for reading d_cell_xyzf
scalar4_tex_t cell_xyzf_1d_tex;
//! Warp-centric scan (Kepler and later)
template<int NT>
struct warp_scan_sm30
{
__device__ static int Scan(int tid, unsigned char x, unsigned char* total)
{
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
int first = laneid - tid;
#pragma unroll
for(int offset = 1; offset < NT; offset += offset)
{
int y = __shfl(x,(first + tid - offset) &(WARP_SIZE -1));
if(tid >= offset) x += y;
}
// all threads get the total from the last thread in the cta
*total = __shfl(x,first + NT - 1);
// shift by one (exclusive scan)
int y = __shfl(x,(first + tid - 1) &(WARP_SIZE-1));
x = tid ? y : 0;
return x;
}
};
//! Kernel call for generating neighbor list on the GPU (Kepler optimized version)
/*! \tparam flags Set bit 1 to enable body filtering. Set bit 2 to enable diameter filtering.
\param d_nlist Neighbor list data structure to write
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions at this update are written to this array
\param d_conditions Conditions array for writing overflow condition
\param d_Nmax Maximum number of neighbors per type
\param d_head_list List of indexes to access \a d_nlist
\param d_pos Particle positions
\param d_body Particle body indices
\param d_diameter Particle diameters
\param N Number of particles
\param d_cell_size Number of particles in each cell
\param d_cell_xyzf Cell contents (xyzf array from CellList with flag=type)
\param d_cell_tdb Cell contents (tdb array from CellList with)
\param d_cell_adj Cell adjacency list
\param ci Cell indexer for indexing cells
\param cli Cell list indexer for indexing into d_cell_xyzf
\param cadji Adjacent cell indexer listing the 27 neighboring cells
\param box Simulation box dimensions
\param d_r_cut Cutoff radius stored by pair type r_cut(i,j)
\param r_buff The maximum radius for which to include particles as neighbors
\param ntypes Number of particle types
\param ghost_width Width of ghost cell layer
\note optimized for Kepler
*/
template<unsigned char flags, int threads_per_particle>
__global__ void gpu_compute_nlist_binned_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
}
}
__syncthreads();
// each set of threads_per_particle threads is going to compute the neighbor list for a single particle
int my_pidx;
if (gridDim.y > 1)
{
// fermi workaround
my_pidx = (blockIdx.x + blockIdx.y*65535) * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
}
else
{
my_pidx = blockIdx.x * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
}
// one thread per particle
if (my_pidx >= N) return;
Scalar4 my_postype = d_pos[my_pidx];
Scalar3 my_pos = make_scalar3(my_postype.x, my_postype.y, my_postype.z);
unsigned int my_type = __scalar_as_int(my_postype.w);
unsigned int my_body = d_body[my_pidx];
Scalar my_diam = d_diameter[my_pidx];
unsigned int my_head = d_head_list[my_pidx];
Scalar3 f = box.makeFraction(my_pos, ghost_width);
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
uchar3 periodic = box.getPeriodic();
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
int my_cell = ci(ib,jb,kb);
// index of current neighbor
unsigned int cur_adj = 0;
// current cell
unsigned int neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
// size of current cell
unsigned int neigh_size = d_cell_size[neigh_cell];
// current index in cell
int cur_offset = threadIdx.x % threads_per_particle;
bool done = false;
// total number of neighbors
unsigned int nneigh = 0;
while (! done)
{
// initalize with default
unsigned int neighbor;
unsigned char has_neighbor = 0;
// advance neighbor cell
while (cur_offset >= neigh_size && !done )
{
cur_offset -= neigh_size;
cur_adj++;
if (cur_adj < cadji.getW())
{
neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
neigh_size = d_cell_size[neigh_cell];
}
else
// we are past the end of the cell neighbors
done = true;
}
// if the first thread in the cta has no work, terminate the loop
if (done && !(threadIdx.x % threads_per_particle)) break;
if (!done)
{
Scalar4 cur_xyzf = texFetchScalar4(d_cell_xyzf, cell_xyzf_1d_tex, cli(cur_offset, neigh_cell));
Scalar4 cur_tdb = d_cell_tdb[cli(cur_offset, neigh_cell)];
// advance cur_offset
cur_offset += threads_per_particle;
unsigned int neigh_type = __scalar_as_int(cur_tdb.x);
// Only do the hard work if the particle should be included by r_cut(i,j)
Scalar r_list = s_r_list[typpair_idx(my_type,neigh_type)];
if (r_list > Scalar(0.0))
{
Scalar neigh_diam = cur_tdb.y;
unsigned int neigh_body = __scalar_as_int(cur_tdb.z);
Scalar3 neigh_pos = make_scalar3(cur_xyzf.x,
cur_xyzf.y,
cur_xyzf.z);
int cur_neigh = __scalar_as_int(cur_xyzf.w);
// compute the distance between the two particles
Scalar3 dx = my_pos - neigh_pos;
// wrap the periodic boundary conditions
dx = box.minImage(dx);
// compute dr squared
Scalar drsq = dot(dx,dx);
bool excluded = (my_pidx == cur_neigh);
if (filter_body && my_body != 0xffffffff)
excluded = excluded | (my_body == neigh_body);
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (my_diam + neigh_diam) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_list) * delta;
}
// store result in shared memory
if (drsq <= (r_list*r_list + sqshift) && !excluded)
{
neighbor = cur_neigh;
has_neighbor = 1;
}
}
}
// no syncthreads here, we assume threads_per_particle < warp size
// scan over flags
int k = 0;
#if (__CUDA_ARCH__ >= 300)
unsigned char n = 1;
k = warp_scan_sm30<threads_per_particle>::Scan(threadIdx.x % threads_per_particle, has_neighbor, &n);
#endif
if (has_neighbor && (nneigh + k) < s_Nmax[my_type])
d_nlist[my_head + nneigh + k] = neighbor;
// increment total neighbor count
#if (__CUDA_ARCH__ >= 300)
nneigh += n;
#else
if (has_neighbor)
nneigh++;
#endif
} // end while
if (threadIdx.x % threads_per_particle == 0)
{
// flag if we need to grow the neighbor list
if (nneigh >= s_Nmax[my_type])
atomicMax(&d_conditions[my_type], nneigh);
d_n_neigh[my_pidx] = nneigh;
d_last_updated_pos[my_pidx] = my_postype;
}
}
//! determine maximum possible block size
template<typename T>
int get_max_block_size(T func)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)func);
int max_threads = attr.maxThreadsPerBlock;
// number of threads has to be multiple of warp size
max_threads -= max_threads % max_threads_per_particle;
return max_threads;
}
void gpu_nlist_binned_bind_texture(const Scalar4 *d_cell_xyzf, unsigned int n_elements)
{
// bind the position texture
cell_xyzf_1d_tex.normalized = false;
cell_xyzf_1d_tex.filterMode = hipFilterModePoint;
hipBindTexture(0, cell_xyzf_1d_tex, d_cell_xyzf, sizeof(Scalar4)*n_elements);
}
//! recursive template to launch neighborlist with given template parameters
/* \tparam cur_tpp Number of threads per particle (assumed to be power of two) */
template<int cur_tpp>
inline void launcher(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size)
{
// shared memory = r_listsq + Nmax + stuff needed for neighborlist (computed below)
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + sizeof(unsigned int)*ntypes;
if (tpp == cur_tpp && cur_tpp != 0)
{
if (!diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<0,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (!diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<1,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<2,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<2,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<3,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<3,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
}
else
{
launcher<cur_tpp/2>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
tpp,
filter_body,
diameter_shift,
block_size
);
}
}
//! template specialization to terminate recursion
template<>
inline void launcher<min_threads_per_particle/2>(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size)
{ }
hipError_t gpu_compute_nlist_binned(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const BoxDim& box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const unsigned int threads_per_particle,
const unsigned int block_size,
bool filter_body,
bool diameter_shift,
const Scalar3& ghost_width,
const unsigned int compute_capability)
{
launcher<max_threads_per_particle>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
threads_per_particle,
filter_body,
diameter_shift,
block_size
);
return hipSuccess;
}
| 9737c888d90d8be0963564d8943b5c8b6df93a34.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
#include "NeighborListGPUBinned.cuh"
#include "TextureTools.h"
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for O(N) neighbor list generation on the GPU
*/
//! Texture for reading d_cell_xyzf
scalar4_tex_t cell_xyzf_1d_tex;
//! Warp-centric scan (Kepler and later)
template<int NT>
struct warp_scan_sm30
{
__device__ static int Scan(int tid, unsigned char x, unsigned char* total)
{
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
int first = laneid - tid;
#pragma unroll
for(int offset = 1; offset < NT; offset += offset)
{
int y = __shfl(x,(first + tid - offset) &(WARP_SIZE -1));
if(tid >= offset) x += y;
}
// all threads get the total from the last thread in the cta
*total = __shfl(x,first + NT - 1);
// shift by one (exclusive scan)
int y = __shfl(x,(first + tid - 1) &(WARP_SIZE-1));
x = tid ? y : 0;
return x;
}
};
//! Kernel call for generating neighbor list on the GPU (Kepler optimized version)
/*! \tparam flags Set bit 1 to enable body filtering. Set bit 2 to enable diameter filtering.
\param d_nlist Neighbor list data structure to write
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions at this update are written to this array
\param d_conditions Conditions array for writing overflow condition
\param d_Nmax Maximum number of neighbors per type
\param d_head_list List of indexes to access \a d_nlist
\param d_pos Particle positions
\param d_body Particle body indices
\param d_diameter Particle diameters
\param N Number of particles
\param d_cell_size Number of particles in each cell
\param d_cell_xyzf Cell contents (xyzf array from CellList with flag=type)
\param d_cell_tdb Cell contents (tdb array from CellList with)
\param d_cell_adj Cell adjacency list
\param ci Cell indexer for indexing cells
\param cli Cell list indexer for indexing into d_cell_xyzf
\param cadji Adjacent cell indexer listing the 27 neighboring cells
\param box Simulation box dimensions
\param d_r_cut Cutoff radius stored by pair type r_cut(i,j)
\param r_buff The maximum radius for which to include particles as neighbors
\param ntypes Number of particle types
\param ghost_width Width of ghost cell layer
\note optimized for Kepler
*/
template<unsigned char flags, int threads_per_particle>
__global__ void gpu_compute_nlist_binned_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
}
}
__syncthreads();
// each set of threads_per_particle threads is going to compute the neighbor list for a single particle
int my_pidx;
if (gridDim.y > 1)
{
// fermi workaround
my_pidx = (blockIdx.x + blockIdx.y*65535) * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
}
else
{
my_pidx = blockIdx.x * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
}
// one thread per particle
if (my_pidx >= N) return;
Scalar4 my_postype = d_pos[my_pidx];
Scalar3 my_pos = make_scalar3(my_postype.x, my_postype.y, my_postype.z);
unsigned int my_type = __scalar_as_int(my_postype.w);
unsigned int my_body = d_body[my_pidx];
Scalar my_diam = d_diameter[my_pidx];
unsigned int my_head = d_head_list[my_pidx];
Scalar3 f = box.makeFraction(my_pos, ghost_width);
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
uchar3 periodic = box.getPeriodic();
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
int my_cell = ci(ib,jb,kb);
// index of current neighbor
unsigned int cur_adj = 0;
// current cell
unsigned int neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
// size of current cell
unsigned int neigh_size = d_cell_size[neigh_cell];
// current index in cell
int cur_offset = threadIdx.x % threads_per_particle;
bool done = false;
// total number of neighbors
unsigned int nneigh = 0;
while (! done)
{
// initalize with default
unsigned int neighbor;
unsigned char has_neighbor = 0;
// advance neighbor cell
while (cur_offset >= neigh_size && !done )
{
cur_offset -= neigh_size;
cur_adj++;
if (cur_adj < cadji.getW())
{
neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
neigh_size = d_cell_size[neigh_cell];
}
else
// we are past the end of the cell neighbors
done = true;
}
// if the first thread in the cta has no work, terminate the loop
if (done && !(threadIdx.x % threads_per_particle)) break;
if (!done)
{
Scalar4 cur_xyzf = texFetchScalar4(d_cell_xyzf, cell_xyzf_1d_tex, cli(cur_offset, neigh_cell));
Scalar4 cur_tdb = d_cell_tdb[cli(cur_offset, neigh_cell)];
// advance cur_offset
cur_offset += threads_per_particle;
unsigned int neigh_type = __scalar_as_int(cur_tdb.x);
// Only do the hard work if the particle should be included by r_cut(i,j)
Scalar r_list = s_r_list[typpair_idx(my_type,neigh_type)];
if (r_list > Scalar(0.0))
{
Scalar neigh_diam = cur_tdb.y;
unsigned int neigh_body = __scalar_as_int(cur_tdb.z);
Scalar3 neigh_pos = make_scalar3(cur_xyzf.x,
cur_xyzf.y,
cur_xyzf.z);
int cur_neigh = __scalar_as_int(cur_xyzf.w);
// compute the distance between the two particles
Scalar3 dx = my_pos - neigh_pos;
// wrap the periodic boundary conditions
dx = box.minImage(dx);
// compute dr squared
Scalar drsq = dot(dx,dx);
bool excluded = (my_pidx == cur_neigh);
if (filter_body && my_body != 0xffffffff)
excluded = excluded | (my_body == neigh_body);
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (my_diam + neigh_diam) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_list) * delta;
}
// store result in shared memory
if (drsq <= (r_list*r_list + sqshift) && !excluded)
{
neighbor = cur_neigh;
has_neighbor = 1;
}
}
}
// no syncthreads here, we assume threads_per_particle < warp size
// scan over flags
int k = 0;
#if (__CUDA_ARCH__ >= 300)
unsigned char n = 1;
k = warp_scan_sm30<threads_per_particle>::Scan(threadIdx.x % threads_per_particle, has_neighbor, &n);
#endif
if (has_neighbor && (nneigh + k) < s_Nmax[my_type])
d_nlist[my_head + nneigh + k] = neighbor;
// increment total neighbor count
#if (__CUDA_ARCH__ >= 300)
nneigh += n;
#else
if (has_neighbor)
nneigh++;
#endif
} // end while
if (threadIdx.x % threads_per_particle == 0)
{
// flag if we need to grow the neighbor list
if (nneigh >= s_Nmax[my_type])
atomicMax(&d_conditions[my_type], nneigh);
d_n_neigh[my_pidx] = nneigh;
d_last_updated_pos[my_pidx] = my_postype;
}
}
//! determine maximum possible block size
template<typename T>
int get_max_block_size(T func)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)func);
int max_threads = attr.maxThreadsPerBlock;
// number of threads has to be multiple of warp size
max_threads -= max_threads % max_threads_per_particle;
return max_threads;
}
void gpu_nlist_binned_bind_texture(const Scalar4 *d_cell_xyzf, unsigned int n_elements)
{
// bind the position texture
cell_xyzf_1d_tex.normalized = false;
cell_xyzf_1d_tex.filterMode = cudaFilterModePoint;
cudaBindTexture(0, cell_xyzf_1d_tex, d_cell_xyzf, sizeof(Scalar4)*n_elements);
}
//! recursive template to launch neighborlist with given template parameters
/* \tparam cur_tpp Number of threads per particle (assumed to be power of two) */
template<int cur_tpp>
inline void launcher(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size)
{
// shared memory = r_listsq + Nmax + stuff needed for neighborlist (computed below)
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + sizeof(unsigned int)*ntypes;
if (tpp == cur_tpp && cur_tpp != 0)
{
if (!diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
gpu_compute_nlist_binned_kernel<0,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (!diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
gpu_compute_nlist_binned_kernel<1,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<2,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
gpu_compute_nlist_binned_kernel<2,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<3,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
gpu_compute_nlist_binned_kernel<3,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
}
else
{
launcher<cur_tpp/2>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
tpp,
filter_body,
diameter_shift,
block_size
);
}
}
//! template specialization to terminate recursion
template<>
inline void launcher<min_threads_per_particle/2>(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size)
{ }
cudaError_t gpu_compute_nlist_binned(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const BoxDim& box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const unsigned int threads_per_particle,
const unsigned int block_size,
bool filter_body,
bool diameter_shift,
const Scalar3& ghost_width,
const unsigned int compute_capability)
{
launcher<max_threads_per_particle>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
threads_per_particle,
filter_body,
diameter_shift,
block_size
);
return cudaSuccess;
}
|
58db6059d91cca850a04a0f87354063ce3e504b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
__global__ void loop()
{
printf("This is iteration number %d\n", threadIdx.x + blockIdx.x * blockDim.x);
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, be sure to use more than 1 block in
* the execution configuration.
*/
hipLaunchKernelGGL(( loop), dim3(2),dim3(5), 0, 0, );
hipDeviceSynchronize();
}
| 58db6059d91cca850a04a0f87354063ce3e504b8.cu | #include <stdio.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
__global__ void loop()
{
printf("This is iteration number %d\n", threadIdx.x + blockIdx.x * blockDim.x);
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, be sure to use more than 1 block in
* the execution configuration.
*/
loop<<<2,5>>>();
cudaDeviceSynchronize();
}
|
37c96089dd2b2a1b805e00d753e9429492bf4656.hip | // !!! This is a file automatically generated by hipify!!!
//
// desc: transaction delivery.
// date: 2018-4-8
// author: Xie Shangwei
//
//
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include "tpcc_table.h"
#include "tx.h"
#include "utility.h"
#include "table_operator.h"
__device__
void tx_delivery(){
printf("********** transaction : delivery *********\n");
long D_ID;
long W_ID;
long D_W_ID;
long O_CARRIER_ID;
//generate parameters.
int rid = d_random(0, 20);
struct district *dis_p;
rid = table_scan(DISTRICT, LONG, 0, 0, NO, NULL, rid);
if(rid != -1)
dis_p = (struct district *)get(DISTRICT, rid);
else{
printf("rid = -1, can not get the content.\n");
}
//dis_p = (struct district *)content;
//printf("ok\n");
//d_memcpy(&tmp_district, content, sizeof(struct district));
D_W_ID = dis_p->D_W_ID;
D_ID = dis_p->D_ID;
W_ID = D_W_ID;
O_CARRIER_ID = d_random(1, 11);
printf("D_ID %ld\n", dis_p->D_ID);
printf("parameters:\n");
printf("D_W_ID : %ld\n", D_W_ID);
printf("D_ID : %ld\n", D_ID);
printf("W_ID : %ld\n", W_ID);
printf("O_CARRIER_ID : %ld\n", O_CARRIER_ID);
// transaction begin.
// first step.
struct new_order tmp_no;
struct new_order *no_p;
int offset_DID = (long)&tmp_no.NO_D_ID - (long)&tmp_no.NO_O_ID;
int start_id = 0;
rid = table_scan(NEW_ORDER, LONG , 0, offset_DID, EQ, &D_ID, start_id);
while(rid != -1){
no_p = (struct new_order *)get(NEW_ORDER, rid);
if(no_p->NO_W_ID == W_ID){
break;
}
}
if(rid == -1)
printf("there is no new_order satisfy condition\n");
long NO_O_ID = no_p->NO_O_ID;
mark_slot_free(d_new_orders_flag, rid);
printf("NO_O_ID : %ld\n", NO_O_ID);
//return ;
// delete record in the new_order table.
struct order *or_p;
start_id = 0;
//int offset_O_D_ID = (long)&tmp_or.O_D_ID - (long)&tmp_or.O_ID;
rid = table_scan(ORDER, LONG, 0, 0, EQ, &NO_O_ID, start_id);
// printf("rid : %d\n", rid);
// or_p = (struct order *)get(ORDER, rid);
// printf(" wid : %ld , did : %ld, customer_id : %ld\n, ", or_p->O_W_ID, or_p->O_D_ID, or_p->O_C_ID);
//return;
while(rid != -1){
or_p = (struct order *)get(ORDER, rid);
if(or_p->O_W_ID == W_ID &&
or_p->O_D_ID == D_ID ){
// printf("to be break\n");
break;
}
//printf("wont break\n");
rid = table_scan(ORDER, LONG, 0, 0, EQ, &NO_O_ID, rid+1);
//printf("here rid = %ld\n", rid);
}
//printf("rid = %ld\n", rid);
//return;
if(rid == -1)
printf("there is no record in table order satisfy condition.\n");
long O_C_ID = or_p->O_C_ID;
printf("O_C_ID : %ld\n", O_C_ID);
//int offse_O_CARRIER_ID = (long)&tmp_or.O_CARRIER_ID - (long)&tmp_or.O_ID;
//d_memcpy((void *)or_p+offse_O_CARRIER_ID, &O_CARRIER_ID, sizeof(long));
or_p->O_CARRIER_ID = O_CARRIER_ID;
printf("after set , carrier id is : %ld\n", (struct order *)or_p->O_CARRIER_ID);
// next step operate in table order_line.
start_id = 0;
struct order_line *orl_p;
//int offset_OL_W_ID = (long)&tmp_orl.OL_W_ID - (long)&tmp_orl.OL_O_ID;
double sum_of_amout = 0;
rid = table_scan(ORDER_LINE, LONG, 0, 0, EQ, &NO_O_ID, start_id);
printf("order line rid = %ld\n", rid);
//return;
while(rid != -1){
orl_p = (struct order_line *)get(ORDER_LINE, rid);
if(orl_p->OL_D_ID == D_ID &&
orl_p->OL_W_ID == W_ID ){
orl_p -> OL_DELIVERY_D = 1234567;
sum_of_amout += orl_p->OL_AMOUNT;
//printf("here rid = %ld, amout : %lf\n", rid, orl_p->OL_AMOUNT);
}
rid = table_scan(ORDER_LINE, LONG, 0, 0, EQ, &NO_O_ID, rid+1);
}
printf("the OL_AMOUNT : %lf\n", sum_of_amout);
//return;
start_id = 0;
struct customer *cus_p;
//int offset_C_W_ID = (long)&cus.C_W_ID - (long)&cus.C_ID;
rid = table_scan(CUSTOMER, LONG, 0, 0, EQ, &O_C_ID, start_id);
//return;
while(rid != -1){
cus_p = (struct customer *)get(CUSTOMER, rid);
if(cus_p->C_W_ID == W_ID &&
cus_p->C_D_ID == D_ID){
cus_p->C_BALANCE += sum_of_amout;
cus_p->C_DELIVERY_CNT++;
//printf("custmer rid : %d\n", rid);
break;
}
rid = table_scan(CUSTOMER, LONG, 0, 0, EQ, &O_C_ID, rid+1);
}
if(rid == -1)
printf("there is no record satisfy condition.\n");
printf("customer id : %d\n", rid);
printf("********* delivery process over! *********\n");
} | 37c96089dd2b2a1b805e00d753e9429492bf4656.cu | //
// desc: transaction delivery.
// date: 2018-4-8
// author: Xie Shangwei
//
//
#include <stdio.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include "tpcc_table.h"
#include "tx.h"
#include "utility.h"
#include "table_operator.h"
__device__
void tx_delivery(){
printf("********** transaction : delivery *********\n");
long D_ID;
long W_ID;
long D_W_ID;
long O_CARRIER_ID;
//generate parameters.
int rid = d_random(0, 20);
struct district *dis_p;
rid = table_scan(DISTRICT, LONG, 0, 0, NO, NULL, rid);
if(rid != -1)
dis_p = (struct district *)get(DISTRICT, rid);
else{
printf("rid = -1, can not get the content.\n");
}
//dis_p = (struct district *)content;
//printf("ok\n");
//d_memcpy(&tmp_district, content, sizeof(struct district));
D_W_ID = dis_p->D_W_ID;
D_ID = dis_p->D_ID;
W_ID = D_W_ID;
O_CARRIER_ID = d_random(1, 11);
printf("D_ID %ld\n", dis_p->D_ID);
printf("parameters:\n");
printf("D_W_ID : %ld\n", D_W_ID);
printf("D_ID : %ld\n", D_ID);
printf("W_ID : %ld\n", W_ID);
printf("O_CARRIER_ID : %ld\n", O_CARRIER_ID);
// transaction begin.
// first step.
struct new_order tmp_no;
struct new_order *no_p;
int offset_DID = (long)&tmp_no.NO_D_ID - (long)&tmp_no.NO_O_ID;
int start_id = 0;
rid = table_scan(NEW_ORDER, LONG , 0, offset_DID, EQ, &D_ID, start_id);
while(rid != -1){
no_p = (struct new_order *)get(NEW_ORDER, rid);
if(no_p->NO_W_ID == W_ID){
break;
}
}
if(rid == -1)
printf("there is no new_order satisfy condition\n");
long NO_O_ID = no_p->NO_O_ID;
mark_slot_free(d_new_orders_flag, rid);
printf("NO_O_ID : %ld\n", NO_O_ID);
//return ;
// delete record in the new_order table.
struct order *or_p;
start_id = 0;
//int offset_O_D_ID = (long)&tmp_or.O_D_ID - (long)&tmp_or.O_ID;
rid = table_scan(ORDER, LONG, 0, 0, EQ, &NO_O_ID, start_id);
// printf("rid : %d\n", rid);
// or_p = (struct order *)get(ORDER, rid);
// printf(" wid : %ld , did : %ld, customer_id : %ld\n, ", or_p->O_W_ID, or_p->O_D_ID, or_p->O_C_ID);
//return;
while(rid != -1){
or_p = (struct order *)get(ORDER, rid);
if(or_p->O_W_ID == W_ID &&
or_p->O_D_ID == D_ID ){
// printf("to be break\n");
break;
}
//printf("wont break\n");
rid = table_scan(ORDER, LONG, 0, 0, EQ, &NO_O_ID, rid+1);
//printf("here rid = %ld\n", rid);
}
//printf("rid = %ld\n", rid);
//return;
if(rid == -1)
printf("there is no record in table order satisfy condition.\n");
long O_C_ID = or_p->O_C_ID;
printf("O_C_ID : %ld\n", O_C_ID);
//int offse_O_CARRIER_ID = (long)&tmp_or.O_CARRIER_ID - (long)&tmp_or.O_ID;
//d_memcpy((void *)or_p+offse_O_CARRIER_ID, &O_CARRIER_ID, sizeof(long));
or_p->O_CARRIER_ID = O_CARRIER_ID;
printf("after set , carrier id is : %ld\n", (struct order *)or_p->O_CARRIER_ID);
// next step operate in table order_line.
start_id = 0;
struct order_line *orl_p;
//int offset_OL_W_ID = (long)&tmp_orl.OL_W_ID - (long)&tmp_orl.OL_O_ID;
double sum_of_amout = 0;
rid = table_scan(ORDER_LINE, LONG, 0, 0, EQ, &NO_O_ID, start_id);
printf("order line rid = %ld\n", rid);
//return;
while(rid != -1){
orl_p = (struct order_line *)get(ORDER_LINE, rid);
if(orl_p->OL_D_ID == D_ID &&
orl_p->OL_W_ID == W_ID ){
orl_p -> OL_DELIVERY_D = 1234567;
sum_of_amout += orl_p->OL_AMOUNT;
//printf("here rid = %ld, amout : %lf\n", rid, orl_p->OL_AMOUNT);
}
rid = table_scan(ORDER_LINE, LONG, 0, 0, EQ, &NO_O_ID, rid+1);
}
printf("the OL_AMOUNT : %lf\n", sum_of_amout);
//return;
start_id = 0;
struct customer *cus_p;
//int offset_C_W_ID = (long)&cus.C_W_ID - (long)&cus.C_ID;
rid = table_scan(CUSTOMER, LONG, 0, 0, EQ, &O_C_ID, start_id);
//return;
while(rid != -1){
cus_p = (struct customer *)get(CUSTOMER, rid);
if(cus_p->C_W_ID == W_ID &&
cus_p->C_D_ID == D_ID){
cus_p->C_BALANCE += sum_of_amout;
cus_p->C_DELIVERY_CNT++;
//printf("custmer rid : %d\n", rid);
break;
}
rid = table_scan(CUSTOMER, LONG, 0, 0, EQ, &O_C_ID, rid+1);
}
if(rid == -1)
printf("there is no record satisfy condition.\n");
printf("customer id : %d\n", rid);
printf("********* delivery process over! *********\n");
} |
8f75c8c5554e5653c180a18549c5c568d1a37a43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void initBore_select_gpu( float *values, const float *center,
const float *x0,
const float *Hl,
const float *ul,
const float *vl,
const float *Hr,
const float *ur,
const float *vr) {
values[0] = center[0] < *x0 ? *Hl : *Hr;
values[1] = center[0] < *x0 ? *ul : *ur;
values[2] = center[0] < *x0 ? *vl : *vr;
}
// CUDA kernel function
__global__ void op_cuda_initBore_select(
float *arg0,
const float *__restrict arg1,
const float *arg2,
const float *arg3,
const float *arg4,
const float *arg5,
const float *arg6,
const float *arg7,
const float *arg8,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initBore_select_gpu(arg0+n*4,
arg1+n*2,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7,
arg8);
}
}
//host stub function
void op_par_loop_initBore_select(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8){
float*arg2h = (float *)arg2.data;
float*arg3h = (float *)arg3.data;
float*arg4h = (float *)arg4.data;
float*arg5h = (float *)arg5.data;
float*arg6h = (float *)arg6.data;
float*arg7h = (float *)arg7.data;
float*arg8h = (float *)arg8.data;
int nargs = 9;
op_arg args[9];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(20);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[20].name = name;
OP_kernels[20].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initBore_select");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OP_consts_h + consts_bytes;
arg2.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg2.data)[d] = arg2h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg4.data = OP_consts_h + consts_bytes;
arg4.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg4.data)[d] = arg4h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg5.data = OP_consts_h + consts_bytes;
arg5.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg5.data)[d] = arg5h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg6.data = OP_consts_h + consts_bytes;
arg6.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg6.data)[d] = arg6h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg7.data = OP_consts_h + consts_bytes;
arg7.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg7.data)[d] = arg7h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg8.data = OP_consts_h + consts_bytes;
arg8.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg8.data)[d] = arg8h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_20
int nthread = OP_BLOCK_SIZE_20;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_initBore_select), dim3(nblocks),dim3(nthread), 0, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
(float *) arg5.data_d,
(float *) arg6.data_d,
(float *) arg7.data_d,
(float *) arg8.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[20].time += wall_t2 - wall_t1;
OP_kernels[20].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[20].transfer += (float)set->size * arg1.size;
}
| 8f75c8c5554e5653c180a18549c5c568d1a37a43.cu | //
// auto-generated by op2.py
//
//user function
__device__ void initBore_select_gpu( float *values, const float *center,
const float *x0,
const float *Hl,
const float *ul,
const float *vl,
const float *Hr,
const float *ur,
const float *vr) {
values[0] = center[0] < *x0 ? *Hl : *Hr;
values[1] = center[0] < *x0 ? *ul : *ur;
values[2] = center[0] < *x0 ? *vl : *vr;
}
// CUDA kernel function
__global__ void op_cuda_initBore_select(
float *arg0,
const float *__restrict arg1,
const float *arg2,
const float *arg3,
const float *arg4,
const float *arg5,
const float *arg6,
const float *arg7,
const float *arg8,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initBore_select_gpu(arg0+n*4,
arg1+n*2,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7,
arg8);
}
}
//host stub function
void op_par_loop_initBore_select(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8){
float*arg2h = (float *)arg2.data;
float*arg3h = (float *)arg3.data;
float*arg4h = (float *)arg4.data;
float*arg5h = (float *)arg5.data;
float*arg6h = (float *)arg6.data;
float*arg7h = (float *)arg7.data;
float*arg8h = (float *)arg8.data;
int nargs = 9;
op_arg args[9];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(20);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[20].name = name;
OP_kernels[20].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initBore_select");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OP_consts_h + consts_bytes;
arg2.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg2.data)[d] = arg2h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg4.data = OP_consts_h + consts_bytes;
arg4.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg4.data)[d] = arg4h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg5.data = OP_consts_h + consts_bytes;
arg5.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg5.data)[d] = arg5h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg6.data = OP_consts_h + consts_bytes;
arg6.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg6.data)[d] = arg6h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg7.data = OP_consts_h + consts_bytes;
arg7.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg7.data)[d] = arg7h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg8.data = OP_consts_h + consts_bytes;
arg8.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg8.data)[d] = arg8h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_20
int nthread = OP_BLOCK_SIZE_20;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
op_cuda_initBore_select<<<nblocks,nthread>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
(float *) arg5.data_d,
(float *) arg6.data_d,
(float *) arg7.data_d,
(float *) arg8.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[20].time += wall_t2 - wall_t1;
OP_kernels[20].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[20].transfer += (float)set->size * arg1.size;
}
|
ffdb932b798a98863b849f02d9378939b9c76571.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.h"
__host__ __device__
double f(double x) {
return exp(cos(x))-2;
};
__host__ __device__
double fp(double x) {
return -sin(x) * exp(cos(x));
};
// implements newton solve for
// f(x) = 0
// where
// f(x) = exp(cos(x)) - 2
void newton_host(int n, double *x) {
for(int i=0; i<n; ++i) {
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
// TODO : implement newton_device() kernel that performs the work in newton_host
// in parallel on the GPU
__global__
void newton_device(int n, double *x) {
auto index = threadIdx.x + blockDim.x*blockIdx.x;
if(index<n) {
auto x0 = x[index];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[index] = x0;
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 20);
size_t N = 1 << pow;
auto size_in_bytes = N * sizeof(double);
std::cout << "memory copy overlap test of length N = " << N
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< std::endl;
hipInit(0);
double* xd = malloc_device<double>(N);
double* xh = malloc_host<double>(N, 1.5);
double* x = malloc_host<double>(N);
// compute kernel launch configuration
auto block_dim = 128;
auto grid_dim = (N+block_dim-1)/block_dim;
auto time_h2d = -get_time();
copy_to_device(xh, xd, N);
time_h2d += get_time();
hipDeviceSynchronize();
auto time_kernel = -get_time();
hipLaunchKernelGGL(( newton_device), dim3(grid_dim), dim3(block_dim), 0, 0, N, xd);
hipDeviceSynchronize();
time_kernel += get_time();
auto time_d2h = -get_time();
copy_to_host(xd, x, N);
time_d2h += get_time();
std::cout << "-------\ntimings\n-------" << std::endl;
std::cout << "H2D : " << time_h2d << std::endl;
std::cout << "D2H : " << time_d2h << std::endl;
std::cout << "kernel : " << time_kernel << std::endl;
// check for errors
auto errors = 0;
for(auto i=0; i<N; ++i) {
if(::fabs(f(x[i]))>1e-10) {
errors++;
}
}
if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors" << std::endl;
else std::cout << "\n============ PASSED" << std::endl;
hipFree(xd);
free(xh);
free(x);
return 0;
}
| ffdb932b798a98863b849f02d9378939b9c76571.cu | #include <iostream>
#include <cuda.h>
#include "util.h"
__host__ __device__
double f(double x) {
return exp(cos(x))-2;
};
__host__ __device__
double fp(double x) {
return -sin(x) * exp(cos(x));
};
// implements newton solve for
// f(x) = 0
// where
// f(x) = exp(cos(x)) - 2
void newton_host(int n, double *x) {
for(int i=0; i<n; ++i) {
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
// TODO : implement newton_device() kernel that performs the work in newton_host
// in parallel on the GPU
__global__
void newton_device(int n, double *x) {
auto index = threadIdx.x + blockDim.x*blockIdx.x;
if(index<n) {
auto x0 = x[index];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[index] = x0;
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 20);
size_t N = 1 << pow;
auto size_in_bytes = N * sizeof(double);
std::cout << "memory copy overlap test of length N = " << N
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< std::endl;
cuInit(0);
double* xd = malloc_device<double>(N);
double* xh = malloc_host<double>(N, 1.5);
double* x = malloc_host<double>(N);
// compute kernel launch configuration
auto block_dim = 128;
auto grid_dim = (N+block_dim-1)/block_dim;
auto time_h2d = -get_time();
copy_to_device(xh, xd, N);
time_h2d += get_time();
cudaThreadSynchronize();
auto time_kernel = -get_time();
newton_device<<<grid_dim, block_dim>>>(N, xd);
cudaThreadSynchronize();
time_kernel += get_time();
auto time_d2h = -get_time();
copy_to_host(xd, x, N);
time_d2h += get_time();
std::cout << "-------\ntimings\n-------" << std::endl;
std::cout << "H2D : " << time_h2d << std::endl;
std::cout << "D2H : " << time_d2h << std::endl;
std::cout << "kernel : " << time_kernel << std::endl;
// check for errors
auto errors = 0;
for(auto i=0; i<N; ++i) {
if(std::fabs(f(x[i]))>1e-10) {
errors++;
}
}
if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors" << std::endl;
else std::cout << "\n============ PASSED" << std::endl;
cudaFree(xd);
free(xh);
free(x);
return 0;
}
|
9e7bee6763a42c1e6637d190e8fb69d554cc15c6.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by root on 2020/11/23.
//
#include "stdio.h"
#include "hip/hip_runtime.h"
#define N 256
int n_streams = 4;
__global__ void kernel_1() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_1\n", sum);
}
__global__ void kernel_2() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_2\n", sum);
}
__global__ void kernel_3() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_3\n", sum);
}
__global__ void kernel_4() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_4\n", sum);
}
int main() {
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1); // set max_connections to 1
printf("CUDA_DEVICE_MAX_CONNECTIONS: %s\n", getenv("CUDA_DEVICE_MAX_CONNECTIONS"));
hipStream_t *streams = (hipStream_t *) malloc(n_streams * sizeof(hipStream_t));
for (int i = 0; i < n_streams; i++) {
hipStreamCreate(&streams[i]);
}
float elapsed_time = 0.0f;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 block(1);
dim3 grid(1);
// hipEventRecord(start);
// // Deep-first schedule 0.6253ms
// for (int i = 0; i < n_streams; i++) {
// hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], );
// hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], );
// hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], );
//// kernel_3<<<grid, block, 0>>>();
// hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], );
// }
// hipEventRecord(stop);
//
// hipEventSynchronize(stop);
// hipEventElapsedTime(&elapsed_time, start, stop);
// printf("time elapsed between start and stop: %.4f\n", elapsed_time);
hipEventRecord(start);
// Breadth-first schedule 0.2397ms
for (int i = 0; i < n_streams; i++) {
hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], );
}
for (int i = 0; i < n_streams; i++) {
hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], );
}
for (int i = 0; i < n_streams; i++) {
hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], );
}
for (int i = 0; i < n_streams; i++) {
hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], );
}
hipEventRecord(stop);
hipEventSynchronize(stop);
elapsed_time = 0.0f;
hipEventElapsedTime(&elapsed_time, start, stop);
printf("time elapsed between start and stop: %.4f", elapsed_time);
}
| 9e7bee6763a42c1e6637d190e8fb69d554cc15c6.cu | //
// Created by root on 2020/11/23.
//
#include "stdio.h"
#include "cuda_runtime.h"
#define N 256
int n_streams = 4;
__global__ void kernel_1() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_1\n", sum);
}
__global__ void kernel_2() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_2\n", sum);
}
__global__ void kernel_3() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_3\n", sum);
}
__global__ void kernel_4() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_4\n", sum);
}
int main() {
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1); // set max_connections to 1
printf("CUDA_DEVICE_MAX_CONNECTIONS: %s\n", getenv("CUDA_DEVICE_MAX_CONNECTIONS"));
cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof(cudaStream_t));
for (int i = 0; i < n_streams; i++) {
cudaStreamCreate(&streams[i]);
}
float elapsed_time = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 block(1);
dim3 grid(1);
// cudaEventRecord(start);
// // Deep-first schedule 0.6253ms
// for (int i = 0; i < n_streams; i++) {
// kernel_1<<<grid, block, 0, streams[i]>>>();
// kernel_2<<<grid, block, 0, streams[i]>>>();
// kernel_3<<<grid, block, 0, streams[i]>>>();
//// kernel_3<<<grid, block, 0>>>();
// kernel_4<<<grid, block, 0, streams[i]>>>();
// }
// cudaEventRecord(stop);
//
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&elapsed_time, start, stop);
// printf("time elapsed between start and stop: %.4f\n", elapsed_time);
cudaEventRecord(start);
// Breadth-first schedule 0.2397ms
for (int i = 0; i < n_streams; i++) {
kernel_1<<<grid, block, 0, streams[i]>>>();
}
for (int i = 0; i < n_streams; i++) {
kernel_2<<<grid, block, 0, streams[i]>>>();
}
for (int i = 0; i < n_streams; i++) {
kernel_3<<<grid, block, 0, streams[i]>>>();
}
for (int i = 0; i < n_streams; i++) {
kernel_4<<<grid, block, 0, streams[i]>>>();
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
elapsed_time = 0.0f;
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("time elapsed between start and stop: %.4f", elapsed_time);
}
|
4cb700123dcdd355eea0491958da906d9f88496a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Simulacion simplificada de bombardeo de particulas de alta energia
*
* Computacion Paralela (Grado en Informatica)
* 2017/2018
*
* (c) 2018 Arturo Gonzalez Escribano
*
* Modificada por el Grupo 04
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cputils.h>
#include"kernel.cu"
#define PI 3.14159f
#define MAX_THREADS_PER_BLOCK 1024
/* Estructura para almacenar los datos de una tormenta de particulas */
typedef struct {
int size;
int *posval;
} Storm;
/* FUNCIONES AUXILIARES: No se utilizan dentro de la medida de tiempo, dejar como estan */
/* Funcion de DEBUG: Imprimir el estado de la capa */
void debug_print(int layer_size, float *layer, int *posiciones, float *maximos, int num_storms ) {
int i,k;
if ( layer_size <= 35 ) {
/* Recorrer capa */
for( k=0; k<layer_size; k++ ) {
/* Escribir valor del punto */
printf("%10.4f |", layer[k] );
/* Calcular el numero de caracteres normalizado con el maximo a 60 */
int ticks = (int)( 60 * layer[k] / maximos[num_storms-1] );
/* Escribir todos los caracteres menos el ultimo */
for (i=0; i<ticks-1; i++ ) printf("o");
/* Para maximos locales escribir ultimo caracter especial */
if ( k>0 && k<layer_size-1 && layer[k] > layer[k-1] && layer[k] > layer[k+1] )
printf("x");
else
printf("o");
/* Si el punto es uno de los maximos especiales, annadir marca */
for (i=0; i<num_storms; i++)
if ( posiciones[i] == k ) printf(" M%d", i );
/* Fin de linea */
printf("\n");
}
}
}
/*
* Funcion: Lectura de fichero con datos de tormenta de particulas
*/
Storm read_storm_file( char *fname ) {
FILE *fstorm = cp_abrir_fichero( fname );
if ( fstorm == NULL ) {
fprintf(stderr,"Error: Opening storm file %s\n", fname );
exit( EXIT_FAILURE );
}
Storm storm;
int ok = fscanf(fstorm, "%d", &(storm.size) );
if ( ok != 1 ) {
fprintf(stderr,"Error: Reading size of storm file %s\n", fname );
exit( EXIT_FAILURE );
}
storm.posval = (int *)malloc( sizeof(int) * storm.size * 2 );
if ( storm.posval == NULL ) {
fprintf(stderr,"Error: Allocating memory for storm file %s, with size %d\n", fname, storm.size );
exit( EXIT_FAILURE );
}
int elem;
for ( elem=0; elem<storm.size; elem++ ) {
ok = fscanf(fstorm, "%d %d\n",
&(storm.posval[elem*2]),
&(storm.posval[elem*2+1]) );
if ( ok != 2 ) {
fprintf(stderr,"Error: Reading element %d in storm file %s\n", elem, fname );
exit( EXIT_FAILURE );
}
}
fclose( fstorm );
return storm;
}
/*
* PROGRAMA PRINCIPAL
*/
int main(int argc, char *argv[]) {
int i,j,k;
/* 1.1. Leer argumentos */
if (argc<3) {
fprintf(stderr,"Usage: %s <size> <storm_1_file> [ <storm_i_file> ] ... \n", argv[0] );
exit( EXIT_FAILURE );
}
int layer_size = atoi( argv[1] );
int num_storms = argc-2;
Storm storms[ num_storms ];
/* 1.2. Leer datos de storms */
for( i=2; i<argc; i++ )
storms[i-2] = read_storm_file( argv[i] );
/* 1.3. Inicializar maximos a cero */
float maximos[ num_storms ];
int posiciones[ num_storms ];
for (i=0; i<num_storms; i++) {
maximos[i] = 0.0f;
posiciones[i] = 0;
}
/* 2. Inicia medida de tiempo */
hipSetDevice(0);
hipDeviceSynchronize();
double ttotal = cp_Wtime();
/*---------------------------------------------------------------------*/
/* COMIENZO: No optimizar/paralelizar el main por encima de este punto */
/*Indicamos la GPU (DEVICE) que vamos a utilizar*/
float *d_layer;
float *d_layerCopy;
int *d_pos;
float *d_max;
/* 3. Reservar memoria para las capas e inicializar a cero */
float *layer = (float *)malloc( sizeof(float) * layer_size );
float *layer_copy = (float *)malloc( sizeof(float) * layer_size );
if ( layer == NULL || layer_copy == NULL ) {
fprintf(stderr,"Error: Allocating the layer memory\n");
exit( EXIT_FAILURE );
}
/* Variable para controlar el error devuelto */
hipError_t error;
/* Reservamos la memoria de la GPU */
// layer
error = hipMalloc((void**) &d_layer, (float) layer_size*sizeof(float));
if (error != hipSuccess)
printf("Error CUDA: %s \n", hipGetErrorString(error));
// layer_copy
error = hipMalloc((void**) &d_layerCopy, (float) layer_size*sizeof(float));
if (error != hipSuccess)
printf("Error CUDA: %s \n", hipGetErrorString(error));
// valor del maximo
error = hipMalloc((void**) &d_max, (float) num_storms*sizeof(float));
if (error != hipSuccess)
printf("Error CUDA: %s \n", hipGetErrorString(error));
// posiciones del maximo
error = hipMalloc((void**) &d_pos, (int) num_storms*sizeof(int));
if (error != hipSuccess)
printf("Error CUDA: %s \n", hipGetErrorString(error));
for(k=0 ; k<layer_size ; k++) {
layer[k] = 0.0f;
layer_copy[k] = 0.0f;
}
/* 4. Fase de bombardeos */
for(i=0 ; i<num_storms ; i++) {
/* 4.1. Suma energia de impactos */
// copiamos layer a GPU
error = hipMemcpy(d_layer, layer, layer_size*sizeof(float), hipMemcpyHostToDevice);
if (error != hipSuccess)
printf("Error CUDA: %s \n", hipGetErrorString(error));
// copiamos layer_copy en GPU
error = hipMemcpy(d_layerCopy, layer_copy, layer_size*sizeof(float), hipMemcpyHostToDevice);
if (error != hipSuccess)
printf("Error CUDA: %s \n", hipGetErrorString(error));
// tamaos de los bloques y grid
dim3 nThreads(MAX_THREADS_PER_BLOCK);
dim3 nBlocks((layer_size+nThreads.x-1)/nThreads.x);
// funcion actualiza
for( j=0; j<storms[i].size; j++ ) {
float energia = (float)storms[i].posval[j*2+1] / 1000;
int posicion = storms[i].posval[j*2];
hipLaunchKernelGGL(( gpuActualiza), dim3(nThreads), dim3(nBlocks), 0, 0, d_layer, posicion, energia);
}
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
/* Funcin del kernel copiarLayer */
hipLaunchKernelGGL(( gpuCopiarLayer), dim3(nThreads), dim3(nBlocks), 0, 0, d_layer, d_layerCopy);
/* 4.2.2. Actualizamos la capa sin contar los extremos */
// Atenuacion
// gpuAtenuacion<<<nThreads, nBlocks>>>(d_layer, d_layerCopy, layer_size);
for( k=1; k<layer_size-1; k++ ) layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
/* 4.3. Localizamos maximo */
/* enviamos los datos de maximos al gpu */
error = hipMemcpy(d_max, maximos, num_storms*sizeof(float), hipMemcpyHostToDevice);
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
error = hipMemcpy(d_pos, posiciones, num_storms*sizeof(int), hipMemcpyHostToDevice);
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
/* Calculamos los maximos */
hipLaunchKernelGGL(( gpuMaximos), dim3(nThreads), dim3(nBlocks), 0, 0, d_layer, d_pos, d_max, layer_size, i);
/* traemos los datos al host de nuevo */
error = hipMemcpy(layer_copy, d_layerCopy, layer_size*sizeof(float), hipMemcpyDeviceToHost );
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
error = hipMemcpy(layer, d_layer, layer_size*sizeof(float), hipMemcpyDeviceToHost );
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
error = hipMemcpy(maximos, d_max, num_storms*sizeof(float), hipMemcpyDeviceToHost );
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
error = hipMemcpy(posiciones, d_pos, num_storms*sizeof(int), hipMemcpyDeviceToHost );
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
}
/* liberamos la memoria en el gpu */
error = hipFree(d_layer);
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
error = hipFree(d_layerCopy);
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
error = hipFree(d_max);
if (error != hipSuccess) printf("Error CUDA: %s\n", hipGetErrorString(error));
error = hipFree(d_pos);
if (error != hipSuccess)
printf("Error CUDA: %s\n", hipGetErrorString(error));
/*----------------------------------------------------------*/
/* FINAL: No optimizar/paralelizar por debajo de este punto */
/* 6. Final de medida de tiempo */
hipDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 7. DEBUG: Dibujar resultado (Solo para capas con hasta 35 puntos) */
#ifdef DEBUG
debug_print( layer_size, layer, posiciones, maximos, num_storms );
#endif
/* 8. Salida de resultados para tablon */
printf("\n");
/* 8.1. Tiempo total de la computacion */
printf("Time: %lf\n", ttotal );
/* 8.2. Escribir los maximos */
printf("Result:");
for (i=0; i<num_storms; i++)
printf(" %d %f", posiciones[i], maximos[i] );
printf("\n");
/* 9. Liberar recursos */
for( i=0; i<argc-2; i++ )
free( storms[i].posval );
/* 10. Final correcto */
return 0;
}
| 4cb700123dcdd355eea0491958da906d9f88496a.cu | /*
* Simulacion simplificada de bombardeo de particulas de alta energia
*
* Computacion Paralela (Grado en Informatica)
* 2017/2018
*
* (c) 2018 Arturo Gonzalez Escribano
*
* Modificada por el Grupo 04
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cputils.h>
#include"kernel.cu"
#define PI 3.14159f
#define MAX_THREADS_PER_BLOCK 1024
/* Estructura para almacenar los datos de una tormenta de particulas */
typedef struct {
int size;
int *posval;
} Storm;
/* FUNCIONES AUXILIARES: No se utilizan dentro de la medida de tiempo, dejar como estan */
/* Funcion de DEBUG: Imprimir el estado de la capa */
void debug_print(int layer_size, float *layer, int *posiciones, float *maximos, int num_storms ) {
int i,k;
if ( layer_size <= 35 ) {
/* Recorrer capa */
for( k=0; k<layer_size; k++ ) {
/* Escribir valor del punto */
printf("%10.4f |", layer[k] );
/* Calcular el numero de caracteres normalizado con el maximo a 60 */
int ticks = (int)( 60 * layer[k] / maximos[num_storms-1] );
/* Escribir todos los caracteres menos el ultimo */
for (i=0; i<ticks-1; i++ ) printf("o");
/* Para maximos locales escribir ultimo caracter especial */
if ( k>0 && k<layer_size-1 && layer[k] > layer[k-1] && layer[k] > layer[k+1] )
printf("x");
else
printf("o");
/* Si el punto es uno de los maximos especiales, annadir marca */
for (i=0; i<num_storms; i++)
if ( posiciones[i] == k ) printf(" M%d", i );
/* Fin de linea */
printf("\n");
}
}
}
/*
* Funcion: Lectura de fichero con datos de tormenta de particulas
*/
Storm read_storm_file( char *fname ) {
FILE *fstorm = cp_abrir_fichero( fname );
if ( fstorm == NULL ) {
fprintf(stderr,"Error: Opening storm file %s\n", fname );
exit( EXIT_FAILURE );
}
Storm storm;
int ok = fscanf(fstorm, "%d", &(storm.size) );
if ( ok != 1 ) {
fprintf(stderr,"Error: Reading size of storm file %s\n", fname );
exit( EXIT_FAILURE );
}
storm.posval = (int *)malloc( sizeof(int) * storm.size * 2 );
if ( storm.posval == NULL ) {
fprintf(stderr,"Error: Allocating memory for storm file %s, with size %d\n", fname, storm.size );
exit( EXIT_FAILURE );
}
int elem;
for ( elem=0; elem<storm.size; elem++ ) {
ok = fscanf(fstorm, "%d %d\n",
&(storm.posval[elem*2]),
&(storm.posval[elem*2+1]) );
if ( ok != 2 ) {
fprintf(stderr,"Error: Reading element %d in storm file %s\n", elem, fname );
exit( EXIT_FAILURE );
}
}
fclose( fstorm );
return storm;
}
/*
* PROGRAMA PRINCIPAL
*/
int main(int argc, char *argv[]) {
int i,j,k;
/* 1.1. Leer argumentos */
if (argc<3) {
fprintf(stderr,"Usage: %s <size> <storm_1_file> [ <storm_i_file> ] ... \n", argv[0] );
exit( EXIT_FAILURE );
}
int layer_size = atoi( argv[1] );
int num_storms = argc-2;
Storm storms[ num_storms ];
/* 1.2. Leer datos de storms */
for( i=2; i<argc; i++ )
storms[i-2] = read_storm_file( argv[i] );
/* 1.3. Inicializar maximos a cero */
float maximos[ num_storms ];
int posiciones[ num_storms ];
for (i=0; i<num_storms; i++) {
maximos[i] = 0.0f;
posiciones[i] = 0;
}
/* 2. Inicia medida de tiempo */
cudaSetDevice(0);
cudaDeviceSynchronize();
double ttotal = cp_Wtime();
/*---------------------------------------------------------------------*/
/* COMIENZO: No optimizar/paralelizar el main por encima de este punto */
/*Indicamos la GPU (DEVICE) que vamos a utilizar*/
float *d_layer;
float *d_layerCopy;
int *d_pos;
float *d_max;
/* 3. Reservar memoria para las capas e inicializar a cero */
float *layer = (float *)malloc( sizeof(float) * layer_size );
float *layer_copy = (float *)malloc( sizeof(float) * layer_size );
if ( layer == NULL || layer_copy == NULL ) {
fprintf(stderr,"Error: Allocating the layer memory\n");
exit( EXIT_FAILURE );
}
/* Variable para controlar el error devuelto */
cudaError_t error;
/* Reservamos la memoria de la GPU */
// layer
error = cudaMalloc((void**) &d_layer, (float) layer_size*sizeof(float));
if (error != cudaSuccess)
printf("Error CUDA: %s \n", cudaGetErrorString(error));
// layer_copy
error = cudaMalloc((void**) &d_layerCopy, (float) layer_size*sizeof(float));
if (error != cudaSuccess)
printf("Error CUDA: %s \n", cudaGetErrorString(error));
// valor del maximo
error = cudaMalloc((void**) &d_max, (float) num_storms*sizeof(float));
if (error != cudaSuccess)
printf("Error CUDA: %s \n", cudaGetErrorString(error));
// posiciones del maximo
error = cudaMalloc((void**) &d_pos, (int) num_storms*sizeof(int));
if (error != cudaSuccess)
printf("Error CUDA: %s \n", cudaGetErrorString(error));
for(k=0 ; k<layer_size ; k++) {
layer[k] = 0.0f;
layer_copy[k] = 0.0f;
}
/* 4. Fase de bombardeos */
for(i=0 ; i<num_storms ; i++) {
/* 4.1. Suma energia de impactos */
// copiamos layer a GPU
error = cudaMemcpy(d_layer, layer, layer_size*sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
printf("Error CUDA: %s \n", cudaGetErrorString(error));
// copiamos layer_copy en GPU
error = cudaMemcpy(d_layerCopy, layer_copy, layer_size*sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
printf("Error CUDA: %s \n", cudaGetErrorString(error));
// tamaños de los bloques y grid
dim3 nThreads(MAX_THREADS_PER_BLOCK);
dim3 nBlocks((layer_size+nThreads.x-1)/nThreads.x);
// funcion actualiza
for( j=0; j<storms[i].size; j++ ) {
float energia = (float)storms[i].posval[j*2+1] / 1000;
int posicion = storms[i].posval[j*2];
gpuActualiza<<<nThreads, nBlocks>>>(d_layer, posicion, energia);
}
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
/* Función del kernel copiarLayer */
gpuCopiarLayer<<<nThreads, nBlocks>>>(d_layer, d_layerCopy);
/* 4.2.2. Actualizamos la capa sin contar los extremos */
// Atenuacion
// gpuAtenuacion<<<nThreads, nBlocks>>>(d_layer, d_layerCopy, layer_size);
for( k=1; k<layer_size-1; k++ ) layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
/* 4.3. Localizamos maximo */
/* enviamos los datos de maximos al gpu */
error = cudaMemcpy(d_max, maximos, num_storms*sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
error = cudaMemcpy(d_pos, posiciones, num_storms*sizeof(int), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
/* Calculamos los maximos */
gpuMaximos<<<nThreads, nBlocks>>>(d_layer, d_pos, d_max, layer_size, i);
/* traemos los datos al host de nuevo */
error = cudaMemcpy(layer_copy, d_layerCopy, layer_size*sizeof(float), cudaMemcpyDeviceToHost );
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
error = cudaMemcpy(layer, d_layer, layer_size*sizeof(float), cudaMemcpyDeviceToHost );
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
error = cudaMemcpy(maximos, d_max, num_storms*sizeof(float), cudaMemcpyDeviceToHost );
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
error = cudaMemcpy(posiciones, d_pos, num_storms*sizeof(int), cudaMemcpyDeviceToHost );
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
}
/* liberamos la memoria en el gpu */
error = cudaFree(d_layer);
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
error = cudaFree(d_layerCopy);
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
error = cudaFree(d_max);
if (error != cudaSuccess) printf("Error CUDA: %s\n", cudaGetErrorString(error));
error = cudaFree(d_pos);
if (error != cudaSuccess)
printf("Error CUDA: %s\n", cudaGetErrorString(error));
/*----------------------------------------------------------*/
/* FINAL: No optimizar/paralelizar por debajo de este punto */
/* 6. Final de medida de tiempo */
cudaDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 7. DEBUG: Dibujar resultado (Solo para capas con hasta 35 puntos) */
#ifdef DEBUG
debug_print( layer_size, layer, posiciones, maximos, num_storms );
#endif
/* 8. Salida de resultados para tablon */
printf("\n");
/* 8.1. Tiempo total de la computacion */
printf("Time: %lf\n", ttotal );
/* 8.2. Escribir los maximos */
printf("Result:");
for (i=0; i<num_storms; i++)
printf(" %d %f", posiciones[i], maximos[i] );
printf("\n");
/* 9. Liberar recursos */
for( i=0; i<argc-2; i++ )
free( storms[i].posval );
/* 10. Final correcto */
return 0;
}
|
9a870dac4b830859ff4ee542f200b57ebfda9c7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/loss_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DetLossKernel(const Dtype* bottom_data, const Dtype* target_data,
Dtype* bottom_diff, const int width) {
const int index = (blockIdx.x * blockDim.x * blockDim.y) + (threadIdx.x * width) + threadIdx.y;
bottom_diff[index] = ((1 - target_data[index]) * tanhf(bottom_data[index]) * 0.1 +
target_data[index] * (tanhf(bottom_data[index]) - 1)) / (blockDim.x * blockDim.y) * 10;
}
template <typename Dtype>
void DetLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* target_data = bottom[1]->gpu_data();
const int channels = bottom[1]->channels();
const int width = bottom[1]->width();
const int height = bottom[1]->height();
// const int count = bottom[1]->count();
dim3 theradsPerBlock(height,width);
hipLaunchKernelGGL(( DetLossKernel), dim3(channels), dim3(theradsPerBlock), 0, 0,
bottom_data, target_data, bottom_diff, width);
// Dtype* loss = top[0]->mutable_gpu_data();
Dtype loss;
// hipMalloc((void**) &loss, sizeof(Dtype));
caffe_gpu_asum(channels*width*height, bottom_diff, &loss);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void DetLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype loss_weight = top[0]->cpu_diff()[0];
caffe_gpu_scal(bottom[0]->count(), loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DetLossLayer);
} // namespace caffe
| 9a870dac4b830859ff4ee542f200b57ebfda9c7c.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/loss_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DetLossKernel(const Dtype* bottom_data, const Dtype* target_data,
Dtype* bottom_diff, const int width) {
const int index = (blockIdx.x * blockDim.x * blockDim.y) + (threadIdx.x * width) + threadIdx.y;
bottom_diff[index] = ((1 - target_data[index]) * tanhf(bottom_data[index]) * 0.1 +
target_data[index] * (tanhf(bottom_data[index]) - 1)) / (blockDim.x * blockDim.y) * 10;
}
template <typename Dtype>
void DetLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* target_data = bottom[1]->gpu_data();
const int channels = bottom[1]->channels();
const int width = bottom[1]->width();
const int height = bottom[1]->height();
// const int count = bottom[1]->count();
dim3 theradsPerBlock(height,width);
DetLossKernel<<<channels, theradsPerBlock>>>
(bottom_data, target_data, bottom_diff, width);
// Dtype* loss = top[0]->mutable_gpu_data();
Dtype loss;
// cudaMalloc((void**) &loss, sizeof(Dtype));
caffe_gpu_asum(channels*width*height, bottom_diff, &loss);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void DetLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype loss_weight = top[0]->cpu_diff()[0];
caffe_gpu_scal(bottom[0]->count(), loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DetLossLayer);
} // namespace caffe
|
b6ecf5a58368b5ac4b70c06e0819ef28e0d02e07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//================= Device matching functions =====================//
template <int size>
__device__ void InvertMatrix(float elem[size][size], float res[size][size])
{
int indx[size];
float b[size];
float vv[size];
for (int i=0;i<size;i++)
indx[i] = 0;
int imax = 0;
float d = 1.0;
for (int i=0;i<size;i++) { // find biggest element for each row
float big = 0.0;
for (int j=0;j<size;j++) {
float temp = fabs(elem[i][j]);
if (temp>big)
big = temp;
}
if (big>0.0)
vv[i] = 1.0/big;
else
vv[i] = 1e16;
}
for (int j=0;j<size;j++) {
for (int i=0;i<j;i++) { // i<j
float sum = elem[i][j]; // i<j (lower left)
for (int k=0;k<i;k++) // k<i<j
sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left)
elem[i][j] = sum; // i<j (lower left)
}
float big = 0.0;
for (int i=j;i<size;i++) { // i>=j
float sum = elem[i][j]; // i>=j (upper right)
for (int k=0;k<j;k++) // k<j<=i
sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left)
elem[i][j] = sum; // i>=j (upper right)
float dum = vv[i]*fabs(sum);
if (dum>=big) {
big = dum;
imax = i;
}
}
if (j!=imax) { // imax>j
for (int k=0;k<size;k++) {
float dum = elem[imax][k]; // upper right and lower left
elem[imax][k] = elem[j][k];
elem[j][k] = dum;
}
d = -d;
vv[imax] = vv[j];
}
indx[j] = imax;
if (elem[j][j]==0.0) // j==j (upper right)
elem[j][j] = 1e-16;
if (j!=(size-1)) {
float dum = 1.0/elem[j][j];
for (int i=j+1;i<size;i++) // i>j
elem[i][j] *= dum; // i>j (upper right)
}
}
for (int j=0;j<size;j++) {
for (int k=0;k<size;k++)
b[k] = 0.0;
b[j] = 1.0;
int ii = -1;
for (int i=0;i<size;i++) {
int ip = indx[i];
float sum = b[ip];
b[ip] = b[i];
if (ii!=-1)
for (int j=ii;j<i;j++)
sum -= elem[i][j]*b[j]; // i>j (upper right)
else if (sum!=0.0)
ii = i;
b[i] = sum;
}
for (int i=size-1;i>=0;i--) {
float sum = b[i];
for (int j=i+1;j<size;j++)
sum -= elem[i][j]*b[j]; // i<j (lower left)
b[i] = sum/elem[i][i]; // i==i (upper right)
}
for (int i=0;i<size;i++)
res[i][j] = b[i];
}
}
__global__ void ComputeHomographies(float *coord, int *randPts, float *homo, int numPts)
{
float a[8][8], ia[8][8];
float b[8];
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int idx = blockDim.x*bx + tx;
const int numLoops = blockDim.x*gridDim.x;
for (int i=0;i<4;i++) {
int pt = randPts[i*numLoops+idx];
float x1 = coord[pt+0*numPts];
float y1 = coord[pt+1*numPts];
float x2 = coord[pt+2*numPts];
float y2 = coord[pt+3*numPts];
float *row1 = a[2*i+0];
row1[0] = x1;
row1[1] = y1;
row1[2] = 1.0;
row1[3] = row1[4] = row1[5] = 0.0;
row1[6] = -x2*x1;
row1[7] = -x2*y1;
float *row2 = a[2*i+1];
row2[0] = row2[1] = row2[2] = 0.0;
row2[3] = x1;
row2[4] = y1;
row2[5] = 1.0;
row2[6] = -y2*x1;
row2[7] = -y2*y1;
b[2*i+0] = x2;
b[2*i+1] = y2;
}
InvertMatrix<8>(a, ia);
__syncthreads();
for (int j=0;j<8;j++) {
float sum = 0.0f;
for (int i=0;i<8;i++)
sum += ia[j][i]*b[i];
homo[j*numLoops+idx] = sum;
}
__syncthreads();
} | b6ecf5a58368b5ac4b70c06e0819ef28e0d02e07.cu | #include "includes.h"
//================= Device matching functions =====================//
template <int size>
__device__ void InvertMatrix(float elem[size][size], float res[size][size])
{
int indx[size];
float b[size];
float vv[size];
for (int i=0;i<size;i++)
indx[i] = 0;
int imax = 0;
float d = 1.0;
for (int i=0;i<size;i++) { // find biggest element for each row
float big = 0.0;
for (int j=0;j<size;j++) {
float temp = fabs(elem[i][j]);
if (temp>big)
big = temp;
}
if (big>0.0)
vv[i] = 1.0/big;
else
vv[i] = 1e16;
}
for (int j=0;j<size;j++) {
for (int i=0;i<j;i++) { // i<j
float sum = elem[i][j]; // i<j (lower left)
for (int k=0;k<i;k++) // k<i<j
sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left)
elem[i][j] = sum; // i<j (lower left)
}
float big = 0.0;
for (int i=j;i<size;i++) { // i>=j
float sum = elem[i][j]; // i>=j (upper right)
for (int k=0;k<j;k++) // k<j<=i
sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left)
elem[i][j] = sum; // i>=j (upper right)
float dum = vv[i]*fabs(sum);
if (dum>=big) {
big = dum;
imax = i;
}
}
if (j!=imax) { // imax>j
for (int k=0;k<size;k++) {
float dum = elem[imax][k]; // upper right and lower left
elem[imax][k] = elem[j][k];
elem[j][k] = dum;
}
d = -d;
vv[imax] = vv[j];
}
indx[j] = imax;
if (elem[j][j]==0.0) // j==j (upper right)
elem[j][j] = 1e-16;
if (j!=(size-1)) {
float dum = 1.0/elem[j][j];
for (int i=j+1;i<size;i++) // i>j
elem[i][j] *= dum; // i>j (upper right)
}
}
for (int j=0;j<size;j++) {
for (int k=0;k<size;k++)
b[k] = 0.0;
b[j] = 1.0;
int ii = -1;
for (int i=0;i<size;i++) {
int ip = indx[i];
float sum = b[ip];
b[ip] = b[i];
if (ii!=-1)
for (int j=ii;j<i;j++)
sum -= elem[i][j]*b[j]; // i>j (upper right)
else if (sum!=0.0)
ii = i;
b[i] = sum;
}
for (int i=size-1;i>=0;i--) {
float sum = b[i];
for (int j=i+1;j<size;j++)
sum -= elem[i][j]*b[j]; // i<j (lower left)
b[i] = sum/elem[i][i]; // i==i (upper right)
}
for (int i=0;i<size;i++)
res[i][j] = b[i];
}
}
__global__ void ComputeHomographies(float *coord, int *randPts, float *homo, int numPts)
{
float a[8][8], ia[8][8];
float b[8];
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int idx = blockDim.x*bx + tx;
const int numLoops = blockDim.x*gridDim.x;
for (int i=0;i<4;i++) {
int pt = randPts[i*numLoops+idx];
float x1 = coord[pt+0*numPts];
float y1 = coord[pt+1*numPts];
float x2 = coord[pt+2*numPts];
float y2 = coord[pt+3*numPts];
float *row1 = a[2*i+0];
row1[0] = x1;
row1[1] = y1;
row1[2] = 1.0;
row1[3] = row1[4] = row1[5] = 0.0;
row1[6] = -x2*x1;
row1[7] = -x2*y1;
float *row2 = a[2*i+1];
row2[0] = row2[1] = row2[2] = 0.0;
row2[3] = x1;
row2[4] = y1;
row2[5] = 1.0;
row2[6] = -y2*x1;
row2[7] = -y2*y1;
b[2*i+0] = x2;
b[2*i+1] = y2;
}
InvertMatrix<8>(a, ia);
__syncthreads();
for (int j=0;j<8;j++) {
float sum = 0.0f;
for (int i=0;i<8;i++)
sum += ia[j][i]*b[i];
homo[j*numLoops+idx] = sum;
}
__syncthreads();
} |
a14c1b54febc92a634a13437351927479533881f.hip | // !!! This is a file automatically generated by hipify!!!
%%cu
#include <iostream>
#include <time.h>
#include <ctime>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define N (512*512)
const int half = N/2;
__global__ void kernel_1 (int * dev_arr){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
dev_arr[idx] = idx;
}
}
__global__ void kernel_2 (int * dev_arr){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int th_idx = threadIdx.x;
__shared__ int temporary[512];
if (idx < half){
temporary[th_idx] = dev_arr[idx];
int swap_idx= N - idx - 1 ;
dev_arr[idx] = dev_arr[swap_idx];
dev_arr[swap_idx] = temporary[th_idx];
}
}
void fill_array(int * arr){
for(int i = 0; i<N ; i++){
arr[i] = i;
}
}
void reverse_array(int * arr){
for (int i = 0; i < half; i++){
int id = N - i - 1;
int temporary = arr[i];
arr[i] = arr[id];
arr[id] = temporary;
}
}
int main() {
//CPU
int cpu_arr[N];
clock_t start_cpu;
double time_cpu;
start_cpu = clock();
fill_array(cpu_arr);
reverse_array(cpu_arr);
time_cpu =(double)(clock() - start_cpu)/CLOCKS_PER_SEC;
printf("Time in CPU %f\n", time_cpu);
//GPU
int * host_arr = new int[N];
int* dev_arr;
float elapsedTime = 0.0f;
hipDeviceReset();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipEventSynchronize(start);
hipMalloc((void**)&dev_arr, N * sizeof (int));
hipLaunchKernelGGL(( kernel_1) , dim3(dim3(((N + 511) / 512), 1)), dim3(dim3(512, 1)) , 0, 0, dev_arr);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernel_2) , dim3(dim3(((N + 511) / 512), 1)), dim3(dim3(512, 1)) , 0, 0, dev_arr);
hipDeviceSynchronize();
hipMemcpy(host_arr, dev_arr, N * sizeof (int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time in GPU %f\n", elapsedTime/1000);
hipFree(dev_arr);
return 0;
}
| a14c1b54febc92a634a13437351927479533881f.cu | %%cu
#include <iostream>
#include <time.h>
#include <ctime>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N (512*512)
const int half = N/2;
__global__ void kernel_1 (int * dev_arr){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
dev_arr[idx] = idx;
}
}
__global__ void kernel_2 (int * dev_arr){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int th_idx = threadIdx.x;
__shared__ int temporary[512];
if (idx < half){
temporary[th_idx] = dev_arr[idx];
int swap_idx= N - idx - 1 ;
dev_arr[idx] = dev_arr[swap_idx];
dev_arr[swap_idx] = temporary[th_idx];
}
}
void fill_array(int * arr){
for(int i = 0; i<N ; i++){
arr[i] = i;
}
}
void reverse_array(int * arr){
for (int i = 0; i < half; i++){
int id = N - i - 1;
int temporary = arr[i];
arr[i] = arr[id];
arr[id] = temporary;
}
}
int main() {
//CPU
int cpu_arr[N];
clock_t start_cpu;
double time_cpu;
start_cpu = clock();
fill_array(cpu_arr);
reverse_array(cpu_arr);
time_cpu =(double)(clock() - start_cpu)/CLOCKS_PER_SEC;
printf("Time in CPU %f\n", time_cpu);
//GPU
int * host_arr = new int[N];
int* dev_arr;
float elapsedTime = 0.0f;
cudaDeviceReset();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
cudaMalloc((void**)&dev_arr, N * sizeof (int));
kernel_1 <<<dim3(((N + 511) / 512), 1), dim3(512, 1) >>> (dev_arr);
cudaThreadSynchronize();
kernel_2 <<<dim3(((N + 511) / 512), 1), dim3(512, 1) >>> (dev_arr);
cudaThreadSynchronize();
cudaMemcpy(host_arr, dev_arr, N * sizeof (int), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time in GPU %f\n", elapsedTime/1000);
cudaFree(dev_arr);
return 0;
}
|
115efbf743a5ed29142bf114286f1011b1e55d26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2010 PPI FUDAN University
* CUDA-SURF v0.5
* Author: Max Lv
* Revision: 25
*/
// Texture source image
texture<float, 1, hipReadModeElementType> TexSrc;
//-------------------------------------------------------
//! Convert image to single channel 32F
IplImage *getGray(const IplImage *img)
{
// Check we have been supplied a non-null img pointer
IplImage *gray8, *gray32;
gray32 = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
if ( img->nChannels == 1 )
gray8 = (IplImage *) cvClone( img );
else
{
gray8 = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );
cvCvtColor( img, gray8, CV_BGR2GRAY );
}
cvConvertScale( gray8, gray32, 1.0 / 255.0, 0);
cvReleaseImage( &gray8 );
return gray32;
}
__global__ void integralRowKernel(float *i_data,
int width, int height)
{
int r = blockDim.x * blockIdx.x + threadIdx.x;
if (r >= height)
return;
float rs = 0.f;
for (int c = 0; c < width; c++)
{
rs += tex1Dfetch(TexSrc, r * width + c);
i_data[r * width + c] = rs;
}
}
__global__ void integralColumnKernel(float *i_data,
int width, int height)
{
int c = blockDim.x * blockIdx.x + threadIdx.x;
if (c >= width)
return;
float rs = i_data[c];
for (int r = 1; r < height; r++)
{
rs += i_data[r * width + c];
i_data[r * width + c] = rs;
}
}
//-------------------------------------------------------
//! Computes the integral image of image img. Assumes source image to be a
//! 32-bit floating point. Returns IplImage of 32-bit float form.
void Integral(IplImage *img, float *d_idata, float *d_data)
{
// set up variables for data access
int height = img->height;
int width = img->width;
float *data = (float *) img->imageData;
CUDA_SAFE_CALL(hipMemcpy(d_data, data, width * height * sizeof(float),
hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipBindTexture(0, TexSrc,
d_data, width * height * sizeof(float)));
hipLaunchKernelGGL(( integralRowKernel), dim3((height + 15) / 16), dim3(16) , 0, 0, d_idata, width, height);
hipLaunchKernelGGL(( integralColumnKernel), dim3((width + 7) / 8), dim3(8) , 0, 0, d_idata, width, height);
// release the gray image
CUDA_SAFE_CALL(hipUnbindTexture(TexSrc));
//CUDA_SAFE_CALL(hipFree(d_data));
}
| 115efbf743a5ed29142bf114286f1011b1e55d26.cu | /*
* Copyright 2010 PPI FUDAN University
* CUDA-SURF v0.5
* Author: Max Lv
* Revision: 25
*/
// Texture source image
texture<float, 1, cudaReadModeElementType> TexSrc;
//-------------------------------------------------------
//! Convert image to single channel 32F
IplImage *getGray(const IplImage *img)
{
// Check we have been supplied a non-null img pointer
IplImage *gray8, *gray32;
gray32 = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
if ( img->nChannels == 1 )
gray8 = (IplImage *) cvClone( img );
else
{
gray8 = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );
cvCvtColor( img, gray8, CV_BGR2GRAY );
}
cvConvertScale( gray8, gray32, 1.0 / 255.0, 0);
cvReleaseImage( &gray8 );
return gray32;
}
__global__ void integralRowKernel(float *i_data,
int width, int height)
{
int r = blockDim.x * blockIdx.x + threadIdx.x;
if (r >= height)
return;
float rs = 0.f;
for (int c = 0; c < width; c++)
{
rs += tex1Dfetch(TexSrc, r * width + c);
i_data[r * width + c] = rs;
}
}
__global__ void integralColumnKernel(float *i_data,
int width, int height)
{
int c = blockDim.x * blockIdx.x + threadIdx.x;
if (c >= width)
return;
float rs = i_data[c];
for (int r = 1; r < height; r++)
{
rs += i_data[r * width + c];
i_data[r * width + c] = rs;
}
}
//-------------------------------------------------------
//! Computes the integral image of image img. Assumes source image to be a
//! 32-bit floating point. Returns IplImage of 32-bit float form.
void Integral(IplImage *img, float *d_idata, float *d_data)
{
// set up variables for data access
int height = img->height;
int width = img->width;
float *data = (float *) img->imageData;
CUDA_SAFE_CALL(cudaMemcpy(d_data, data, width * height * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaBindTexture(0, TexSrc,
d_data, width * height * sizeof(float)));
integralRowKernel<<< (height + 15) / 16, 16 >>> (d_idata, width, height);
integralColumnKernel<<< (width + 7) / 8, 8 >>> (d_idata, width, height);
// release the gray image
CUDA_SAFE_CALL(cudaUnbindTexture(TexSrc));
//CUDA_SAFE_CALL(cudaFree(d_data));
}
|
c1d9c31ce156cedb082a327256c994e62f8ea239.hip | // !!! This is a file automatically generated by hipify!!!
#include "./common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
__device__ int d_N;
__device__ short int d_array[3][3][3];
__global__ void Kernel_add_one(){
int thid = threadIdx.x;
int blid = blockIdx.x;
if(thid < d_N && blid < d_N){
for (size_t k = 0; k < d_N; k++) {
d_array[blid][thid][k]++;
}
}
else{}
}//end-kernel
__global__ void Kernel_print_array(){
printf("d_N: %d\n",d_N );
printf("%s\n","device array" );
for (size_t i = 0; i < d_N; i++) {
for (size_t j = 0; j < d_N; j++) {
for (size_t k = 0; k < d_N; k++) {
printf("%d ",d_array[i][j][k] );
}
}
printf("\n");
}
}//end-kernel
int main(void)
{
int dev_num =0;
int N = 3;
short int array[3][3][3];
//----set up device START-----
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,dev_num);
printf("Using Device %d:%s\n",dev_num,deviceProp.name);
hipSetDevice(dev_num);
//----set up device END-----
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
for (size_t k = 0; k < N; k++) {
array[i][j][k] = j;
}
}
}
printf("%s\n","before array" );
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
for (size_t k = 0; k < N; k++) {
printf("%d ",array[i][j][k] );
}
}
printf("\n");
}
// copy the host variable to the global
CHECK(hipMemcpyToSymbol(d_N, &N, sizeof(int)));
// CHECK(hipMemcpyToSymbol(d_array, &array[0][0][0], sizeof(short int) * (N * N * N)));
CHECK(hipMemcpyToSymbol(d_array, array, sizeof(short int) * (N * N * N)));
// invoke the kernel
hipLaunchKernelGGL(( Kernel_print_array), dim3(1), dim3(1) , 0, 0, );
hipDeviceSynchronize();
hipLaunchKernelGGL(( Kernel_add_one), dim3(N), dim3(N) , 0, 0, );
hipDeviceSynchronize();
hipLaunchKernelGGL(( Kernel_print_array), dim3(1), dim3(1) , 0, 0, );
// copy the global variable back to the host
// CHECK(hipMemcpyFromSymbol(&array[0][0][0], d_array, sizeof(short int) * (N * N * N)));
CHECK(hipMemcpyFromSymbol(array, d_array, sizeof(short int) * (N * N * N)));
printf("%s\n","after array" );
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
for (size_t k = 0; k < N; k++) {
printf("%d ",array[i][j][k] );
}
}
printf("\n");
}
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| c1d9c31ce156cedb082a327256c994e62f8ea239.cu | #include "./common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
__device__ int d_N;
__device__ short int d_array[3][3][3];
__global__ void Kernel_add_one(){
int thid = threadIdx.x;
int blid = blockIdx.x;
if(thid < d_N && blid < d_N){
for (size_t k = 0; k < d_N; k++) {
d_array[blid][thid][k]++;
}
}
else{}
}//end-kernel
__global__ void Kernel_print_array(){
printf("d_N: %d\n",d_N );
printf("%s\n","device array" );
for (size_t i = 0; i < d_N; i++) {
for (size_t j = 0; j < d_N; j++) {
for (size_t k = 0; k < d_N; k++) {
printf("%d ",d_array[i][j][k] );
}
}
printf("\n");
}
}//end-kernel
int main(void)
{
int dev_num =0;
int N = 3;
short int array[3][3][3];
//----set up device START-----
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev_num);
printf("Using Device %d:%s\n",dev_num,deviceProp.name);
cudaSetDevice(dev_num);
//----set up device END-----
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
for (size_t k = 0; k < N; k++) {
array[i][j][k] = j;
}
}
}
printf("%s\n","before array" );
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
for (size_t k = 0; k < N; k++) {
printf("%d ",array[i][j][k] );
}
}
printf("\n");
}
// copy the host variable to the global
CHECK(cudaMemcpyToSymbol(d_N, &N, sizeof(int)));
// CHECK(cudaMemcpyToSymbol(d_array, &array[0][0][0], sizeof(short int) * (N * N * N)));
CHECK(cudaMemcpyToSymbol(d_array, array, sizeof(short int) * (N * N * N)));
// invoke the kernel
Kernel_print_array<<<1, 1 >>>();
cudaDeviceSynchronize();
Kernel_add_one<<<N, N >>>();
cudaDeviceSynchronize();
Kernel_print_array<<<1, 1 >>>();
// copy the global variable back to the host
// CHECK(cudaMemcpyFromSymbol(&array[0][0][0], d_array, sizeof(short int) * (N * N * N)));
CHECK(cudaMemcpyFromSymbol(array, d_array, sizeof(short int) * (N * N * N)));
printf("%s\n","after array" );
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
for (size_t k = 0; k < N; k++) {
printf("%d ",array[i][j][k] );
}
}
printf("\n");
}
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
57f021205bb5ce4cf2bcd14b4a37f5abd0549123.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/cudf.h>
#include <bitmask/legacy/bit_mask.cuh>
#include <cudf/copying.hpp>
#include <cudf/groupby.hpp>
#include <cudf/legacy/bitmask.hpp>
#include <cudf/legacy/table.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <hash/concurrent_unordered_map.cuh>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <utilities/column_utils.hpp>
#include <utilities/cuda_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include "groupby_kernels.cuh"
#include "groupby/common/aggregation_requests.hpp"
#include "groupby/common/type_info.hpp"
#include "groupby/common/utils.hpp"
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/fill.h>
#include <type_traits>
#include <vector>
#include <utilities/integer_utils.hpp>
namespace cudf {
namespace groupby {
namespace hash {
namespace {
template <bool keys_have_nulls, bool values_have_nulls>
auto build_aggregation_map(table const& input_keys, table const& input_values,
device_table const& d_input_keys,
device_table const& d_input_values,
std::vector<operators> const& ops, Options options,
hipStream_t stream) {
gdf_size_type constexpr unused_key{std::numeric_limits<gdf_size_type>::max()};
gdf_size_type constexpr unused_value{
std::numeric_limits<gdf_size_type>::max()};
CUDF_EXPECTS(input_keys.num_rows() < unused_key,
"Groupby input size too large.");
// The exact output size is unknown a priori, therefore, use the input size as
// an upper bound.
gdf_size_type const output_size_estimate{input_keys.num_rows()};
cudf::table sparse_output_values{
output_size_estimate,
target_dtypes(column_dtypes(input_values), ops),
column_dtype_infos(input_values),
values_have_nulls,
false,
stream};
initialize_with_identity(sparse_output_values, ops, stream);
auto d_sparse_output_values =
device_table::create(sparse_output_values, stream);
rmm::device_vector<operators> d_ops(ops);
// If we ignore null keys, then nulls are not equivalent
bool const null_keys_are_equal{not options.ignore_null_keys};
bool const skip_key_rows_with_nulls{keys_have_nulls and
not null_keys_are_equal};
row_hasher<keys_have_nulls> hasher{d_input_keys};
row_equality_comparator<keys_have_nulls> rows_equal{
d_input_keys, d_input_keys, null_keys_are_equal};
using map_type =
concurrent_unordered_map<gdf_size_type, gdf_size_type, decltype(hasher),
decltype(rows_equal)>;
auto map = map_type::create(compute_hash_table_size(input_keys.num_rows()),
unused_key, unused_value, hasher, rows_equal);
// TODO: Explore optimal block size and work per thread.
cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256};
if (skip_key_rows_with_nulls) {
auto row_bitmask{cudf::row_bitmask(input_keys, stream)};
hipLaunchKernelGGL(( build_aggregation_map<true, values_have_nulls>)
, dim3(grid_params.num_blocks), dim3(grid_params.num_threads_per_block), 0,
stream, *map, d_input_keys, d_input_values,
*d_sparse_output_values, d_ops.data().get(),
row_bitmask.data().get());
} else {
hipLaunchKernelGGL(( build_aggregation_map<false, values_have_nulls>)
, dim3(grid_params.num_blocks), dim3(grid_params.num_threads_per_block), 0,
stream, *map, d_input_keys, d_input_values,
*d_sparse_output_values, d_ops.data().get(), nullptr);
}
CHECK_STREAM(stream);
return std::make_pair(std::move(map), sparse_output_values);
}
template <bool keys_have_nulls, bool values_have_nulls, typename Map>
auto extract_results(table const& input_keys, table const& input_values,
device_table const& d_input_keys,
table const& sparse_output_values, Map const& map,
hipStream_t stream) {
cudf::table output_keys{
cudf::allocate_like(
input_keys,
keys_have_nulls ? RETAIN : NEVER,
stream)};
cudf::table output_values{
cudf::allocate_like(
sparse_output_values,
values_have_nulls ? RETAIN : NEVER,
stream)};
auto d_sparse_output_values =
device_table::create(sparse_output_values, stream);
auto d_output_keys = device_table::create(output_keys, stream);
auto d_output_values = device_table::create(output_values, stream);
gdf_size_type* d_result_size{nullptr};
RMM_TRY(RMM_ALLOC(&d_result_size, sizeof(gdf_size_type), stream));
CUDA_TRY(hipMemsetAsync(d_result_size, 0, sizeof(gdf_size_type), stream));
cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256};
hipLaunchKernelGGL(( extract_groupby_result<keys_have_nulls, values_have_nulls>)
, dim3(grid_params.num_blocks), dim3(grid_params.num_threads_per_block), 0,
stream, map, d_input_keys, *d_output_keys, *d_sparse_output_values,
*d_output_values, d_result_size);
CHECK_STREAM(stream);
gdf_size_type result_size{-1};
CUDA_TRY(hipMemcpyAsync(&result_size, d_result_size, sizeof(gdf_size_type),
hipMemcpyDeviceToHost, stream));
// Update size and null count of output columns
auto update_column = [result_size](gdf_column* col) {
CUDF_EXPECTS(col != nullptr, "Attempt to update Null column.");
col->size = result_size;
set_null_count(*col);
return col;
};
std::transform(output_keys.begin(), output_keys.end(), output_keys.begin(),
update_column);
std::transform(output_values.begin(), output_values.end(),
output_values.begin(), update_column);
return std::make_pair(output_keys, output_values);
}
/**---------------------------------------------------------------------------*
* @brief Computes the groupby operation for a set of keys, values, and
* operators using a hash-based implementation.
*
* The algorithm has two primary steps:
* 1.) Build a hash map
* 2.) Extract the non-empty entries from the hash table
*
* 1.) The hash map is built by inserting every row `i` from the `keys` and
* `values` tables as a single (key,value) pair. When the pair is inserted, if
* the key was not already present in the map, then the corresponding value is
* simply copied to the output. If the key was already present in the map,
* then the inserted `values` row is aggregated with the existing row. This
* aggregation is done for every element `j` in the row by applying aggregation
* operation `j` between the new and existing element.
*
* This process yields a hash map and table holding the resulting aggregation
* rows. The aggregation output table is sparse, i.e., not every row is
* populated. This is because the size of the output is not known a priori, and
* so the output aggregation table is allocated to be as large as the input (the
* upper bound of the output size).
*
* 2.) The final result is materialized by extracting the non-empty keys from
* the hash map and the non-empty rows from the sparse output aggregation table.
* Every non-empty key and value row is appended to the output key and value
* tables.
*
* @tparam keys_have_nulls Indicates keys have one or more null values
* @tparam values_have_nulls Indicates values have one or more null values
* @param keys Table whose rows are used as keys of the groupby
* @param values Table whose rows are aggregated in the groupby
* @param ops Set of aggregation operations to perform for each element in a row
* in the values table
* @param options Options to control behavior of the groupby operation
* @param stream CUDA stream on which all memory allocations and kernels will be
* executed
* @return A pair of the output keys table and output values table
*---------------------------------------------------------------------------**/
template <bool keys_have_nulls, bool values_have_nulls>
auto compute_hash_groupby(cudf::table const& keys, cudf::table const& values,
std::vector<operators> const& ops, Options options,
hipStream_t stream) {
CUDF_EXPECTS(values.num_columns() == static_cast<gdf_size_type>(ops.size()),
"Size mismatch between number of value columns and number of "
"aggregations.");
// An "aggregation request" is the combination of a `gdf_column*` to a column
// of values, and an aggregation operation enum indicating the aggregation
// requested to be performed on the column
std::vector<AggRequestType> original_requests(values.num_columns());
std::transform(values.begin(), values.end(), ops.begin(),
original_requests.begin(),
[](gdf_column const* col, operators op) {
return std::make_pair(const_cast<gdf_column*>(col), op);
});
// Some aggregations are "compound", meaning they need be satisfied via the
// composition of 1 or more "simple" aggregation requests. For example, MEAN
// is satisfied via the division of the SUM by the COUNT aggregation. We
// translate these compound requests into simple requests, and compute the
// groupby operation for these simple requests. Later, we translate the simple
// requests back to compound request results.
std::vector<SimpleAggRequestCounter> simple_agg_columns =
compound_to_simple(original_requests);
std::vector<gdf_column*> simple_values_columns;
std::vector<operators> simple_operators;
for (auto const& p : simple_agg_columns) {
const AggRequestType& agg_req_type = p.first;
simple_values_columns.push_back(
const_cast<gdf_column*>(agg_req_type.first));
simple_operators.push_back(agg_req_type.second);
}
cudf::table simple_values_table{simple_values_columns};
auto const d_input_keys = device_table::create(keys);
auto const d_input_values = device_table::create(simple_values_table);
// Step 1: Build hash map
auto result = build_aggregation_map<keys_have_nulls, values_have_nulls>(
keys, simple_values_table, *d_input_keys, *d_input_values,
simple_operators, options, stream);
auto const map{std::move(result.first)};
cudf::table sparse_output_values{result.second};
// Step 2: Extract non-empty entries
cudf::table output_keys;
cudf::table simple_output_values;
std::tie(output_keys, simple_output_values) =
extract_results<keys_have_nulls, values_have_nulls>(
keys, values, *d_input_keys, sparse_output_values, *map, stream);
// Delete intermediate results storage
sparse_output_values.destroy();
// If any of the original requests were compound, compute them from the
// results of simple aggregation requests
cudf::table final_output_values = compute_original_requests(
original_requests, simple_agg_columns, simple_output_values, stream);
return std::make_pair(output_keys, final_output_values);
}
/**---------------------------------------------------------------------------*
* @brief Returns appropriate callable instantiation of `compute_hash_groupby`
* based on presence of null values in keys and values.
*
* @param keys The groupby key columns
* @param values The groupby value columns
* @return Instantiated callable of compute_hash_groupby
*---------------------------------------------------------------------------**/
auto groupby_null_specialization(table const& keys, table const& values) {
if (cudf::has_nulls(keys)) {
if (cudf::has_nulls(values)) {
return compute_hash_groupby<true, true>;
} else {
return compute_hash_groupby<true, false>;
}
} else {
if (cudf::has_nulls(values)) {
return compute_hash_groupby<false, true>;
} else {
return compute_hash_groupby<false, false>;
}
}
}
} // namespace
namespace detail {
std::pair<cudf::table, cudf::table> groupby(cudf::table const& keys,
cudf::table const& values,
std::vector<operators> const& ops,
Options options,
hipStream_t stream = 0) {
CUDF_EXPECTS(keys.num_rows() == values.num_rows(),
"Size mismatch between number of rows in keys and values.");
verify_operators(values, ops);
// Empty inputs
if (keys.num_rows() == 0) {
return std::make_pair(
cudf::empty_like(keys),
cudf::table(0, target_dtypes(column_dtypes(values), ops),
column_dtype_infos(values)));
}
auto compute_groupby = groupby_null_specialization(keys, values);
cudf::table output_keys;
cudf::table output_values;
std::tie(output_keys, output_values) =
compute_groupby(keys, values, ops, options, stream);
update_nvcategories(keys, output_keys, values, output_values);
return std::make_pair(output_keys, output_values);
}
} // namespace detail
std::pair<cudf::table, cudf::table> groupby(cudf::table const& keys,
cudf::table const& values,
std::vector<operators> const& ops,
Options options) {
return detail::groupby(keys, values, ops, options);
}
} // namespace hash
} // namespace groupby
} // namespace cudf
| 57f021205bb5ce4cf2bcd14b4a37f5abd0549123.cu |
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/cudf.h>
#include <bitmask/legacy/bit_mask.cuh>
#include <cudf/copying.hpp>
#include <cudf/groupby.hpp>
#include <cudf/legacy/bitmask.hpp>
#include <cudf/legacy/table.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <hash/concurrent_unordered_map.cuh>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <utilities/column_utils.hpp>
#include <utilities/cuda_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include "groupby_kernels.cuh"
#include "groupby/common/aggregation_requests.hpp"
#include "groupby/common/type_info.hpp"
#include "groupby/common/utils.hpp"
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/fill.h>
#include <type_traits>
#include <vector>
#include <utilities/integer_utils.hpp>
namespace cudf {
namespace groupby {
namespace hash {
namespace {
template <bool keys_have_nulls, bool values_have_nulls>
auto build_aggregation_map(table const& input_keys, table const& input_values,
device_table const& d_input_keys,
device_table const& d_input_values,
std::vector<operators> const& ops, Options options,
cudaStream_t stream) {
gdf_size_type constexpr unused_key{std::numeric_limits<gdf_size_type>::max()};
gdf_size_type constexpr unused_value{
std::numeric_limits<gdf_size_type>::max()};
CUDF_EXPECTS(input_keys.num_rows() < unused_key,
"Groupby input size too large.");
// The exact output size is unknown a priori, therefore, use the input size as
// an upper bound.
gdf_size_type const output_size_estimate{input_keys.num_rows()};
cudf::table sparse_output_values{
output_size_estimate,
target_dtypes(column_dtypes(input_values), ops),
column_dtype_infos(input_values),
values_have_nulls,
false,
stream};
initialize_with_identity(sparse_output_values, ops, stream);
auto d_sparse_output_values =
device_table::create(sparse_output_values, stream);
rmm::device_vector<operators> d_ops(ops);
// If we ignore null keys, then nulls are not equivalent
bool const null_keys_are_equal{not options.ignore_null_keys};
bool const skip_key_rows_with_nulls{keys_have_nulls and
not null_keys_are_equal};
row_hasher<keys_have_nulls> hasher{d_input_keys};
row_equality_comparator<keys_have_nulls> rows_equal{
d_input_keys, d_input_keys, null_keys_are_equal};
using map_type =
concurrent_unordered_map<gdf_size_type, gdf_size_type, decltype(hasher),
decltype(rows_equal)>;
auto map = map_type::create(compute_hash_table_size(input_keys.num_rows()),
unused_key, unused_value, hasher, rows_equal);
// TODO: Explore optimal block size and work per thread.
cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256};
if (skip_key_rows_with_nulls) {
auto row_bitmask{cudf::row_bitmask(input_keys, stream)};
build_aggregation_map<true, values_have_nulls>
<<<grid_params.num_blocks, grid_params.num_threads_per_block, 0,
stream>>>(*map, d_input_keys, d_input_values,
*d_sparse_output_values, d_ops.data().get(),
row_bitmask.data().get());
} else {
build_aggregation_map<false, values_have_nulls>
<<<grid_params.num_blocks, grid_params.num_threads_per_block, 0,
stream>>>(*map, d_input_keys, d_input_values,
*d_sparse_output_values, d_ops.data().get(), nullptr);
}
CHECK_STREAM(stream);
return std::make_pair(std::move(map), sparse_output_values);
}
template <bool keys_have_nulls, bool values_have_nulls, typename Map>
auto extract_results(table const& input_keys, table const& input_values,
device_table const& d_input_keys,
table const& sparse_output_values, Map const& map,
cudaStream_t stream) {
cudf::table output_keys{
cudf::allocate_like(
input_keys,
keys_have_nulls ? RETAIN : NEVER,
stream)};
cudf::table output_values{
cudf::allocate_like(
sparse_output_values,
values_have_nulls ? RETAIN : NEVER,
stream)};
auto d_sparse_output_values =
device_table::create(sparse_output_values, stream);
auto d_output_keys = device_table::create(output_keys, stream);
auto d_output_values = device_table::create(output_values, stream);
gdf_size_type* d_result_size{nullptr};
RMM_TRY(RMM_ALLOC(&d_result_size, sizeof(gdf_size_type), stream));
CUDA_TRY(cudaMemsetAsync(d_result_size, 0, sizeof(gdf_size_type), stream));
cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256};
extract_groupby_result<keys_have_nulls, values_have_nulls>
<<<grid_params.num_blocks, grid_params.num_threads_per_block, 0,
stream>>>(map, d_input_keys, *d_output_keys, *d_sparse_output_values,
*d_output_values, d_result_size);
CHECK_STREAM(stream);
gdf_size_type result_size{-1};
CUDA_TRY(cudaMemcpyAsync(&result_size, d_result_size, sizeof(gdf_size_type),
cudaMemcpyDeviceToHost, stream));
// Update size and null count of output columns
auto update_column = [result_size](gdf_column* col) {
CUDF_EXPECTS(col != nullptr, "Attempt to update Null column.");
col->size = result_size;
set_null_count(*col);
return col;
};
std::transform(output_keys.begin(), output_keys.end(), output_keys.begin(),
update_column);
std::transform(output_values.begin(), output_values.end(),
output_values.begin(), update_column);
return std::make_pair(output_keys, output_values);
}
/**---------------------------------------------------------------------------*
* @brief Computes the groupby operation for a set of keys, values, and
* operators using a hash-based implementation.
*
* The algorithm has two primary steps:
* 1.) Build a hash map
* 2.) Extract the non-empty entries from the hash table
*
* 1.) The hash map is built by inserting every row `i` from the `keys` and
* `values` tables as a single (key,value) pair. When the pair is inserted, if
* the key was not already present in the map, then the corresponding value is
* simply copied to the output. If the key was already present in the map,
* then the inserted `values` row is aggregated with the existing row. This
* aggregation is done for every element `j` in the row by applying aggregation
* operation `j` between the new and existing element.
*
* This process yields a hash map and table holding the resulting aggregation
* rows. The aggregation output table is sparse, i.e., not every row is
* populated. This is because the size of the output is not known a priori, and
* so the output aggregation table is allocated to be as large as the input (the
* upper bound of the output size).
*
* 2.) The final result is materialized by extracting the non-empty keys from
* the hash map and the non-empty rows from the sparse output aggregation table.
* Every non-empty key and value row is appended to the output key and value
* tables.
*
* @tparam keys_have_nulls Indicates keys have one or more null values
* @tparam values_have_nulls Indicates values have one or more null values
* @param keys Table whose rows are used as keys of the groupby
* @param values Table whose rows are aggregated in the groupby
* @param ops Set of aggregation operations to perform for each element in a row
* in the values table
* @param options Options to control behavior of the groupby operation
* @param stream CUDA stream on which all memory allocations and kernels will be
* executed
* @return A pair of the output keys table and output values table
*---------------------------------------------------------------------------**/
template <bool keys_have_nulls, bool values_have_nulls>
auto compute_hash_groupby(cudf::table const& keys, cudf::table const& values,
std::vector<operators> const& ops, Options options,
cudaStream_t stream) {
CUDF_EXPECTS(values.num_columns() == static_cast<gdf_size_type>(ops.size()),
"Size mismatch between number of value columns and number of "
"aggregations.");
// An "aggregation request" is the combination of a `gdf_column*` to a column
// of values, and an aggregation operation enum indicating the aggregation
// requested to be performed on the column
std::vector<AggRequestType> original_requests(values.num_columns());
std::transform(values.begin(), values.end(), ops.begin(),
original_requests.begin(),
[](gdf_column const* col, operators op) {
return std::make_pair(const_cast<gdf_column*>(col), op);
});
// Some aggregations are "compound", meaning they need be satisfied via the
// composition of 1 or more "simple" aggregation requests. For example, MEAN
// is satisfied via the division of the SUM by the COUNT aggregation. We
// translate these compound requests into simple requests, and compute the
// groupby operation for these simple requests. Later, we translate the simple
// requests back to compound request results.
std::vector<SimpleAggRequestCounter> simple_agg_columns =
compound_to_simple(original_requests);
std::vector<gdf_column*> simple_values_columns;
std::vector<operators> simple_operators;
for (auto const& p : simple_agg_columns) {
const AggRequestType& agg_req_type = p.first;
simple_values_columns.push_back(
const_cast<gdf_column*>(agg_req_type.first));
simple_operators.push_back(agg_req_type.second);
}
cudf::table simple_values_table{simple_values_columns};
auto const d_input_keys = device_table::create(keys);
auto const d_input_values = device_table::create(simple_values_table);
// Step 1: Build hash map
auto result = build_aggregation_map<keys_have_nulls, values_have_nulls>(
keys, simple_values_table, *d_input_keys, *d_input_values,
simple_operators, options, stream);
auto const map{std::move(result.first)};
cudf::table sparse_output_values{result.second};
// Step 2: Extract non-empty entries
cudf::table output_keys;
cudf::table simple_output_values;
std::tie(output_keys, simple_output_values) =
extract_results<keys_have_nulls, values_have_nulls>(
keys, values, *d_input_keys, sparse_output_values, *map, stream);
// Delete intermediate results storage
sparse_output_values.destroy();
// If any of the original requests were compound, compute them from the
// results of simple aggregation requests
cudf::table final_output_values = compute_original_requests(
original_requests, simple_agg_columns, simple_output_values, stream);
return std::make_pair(output_keys, final_output_values);
}
/**---------------------------------------------------------------------------*
* @brief Returns appropriate callable instantiation of `compute_hash_groupby`
* based on presence of null values in keys and values.
*
* @param keys The groupby key columns
* @param values The groupby value columns
* @return Instantiated callable of compute_hash_groupby
*---------------------------------------------------------------------------**/
auto groupby_null_specialization(table const& keys, table const& values) {
if (cudf::has_nulls(keys)) {
if (cudf::has_nulls(values)) {
return compute_hash_groupby<true, true>;
} else {
return compute_hash_groupby<true, false>;
}
} else {
if (cudf::has_nulls(values)) {
return compute_hash_groupby<false, true>;
} else {
return compute_hash_groupby<false, false>;
}
}
}
} // namespace
namespace detail {
std::pair<cudf::table, cudf::table> groupby(cudf::table const& keys,
cudf::table const& values,
std::vector<operators> const& ops,
Options options,
cudaStream_t stream = 0) {
CUDF_EXPECTS(keys.num_rows() == values.num_rows(),
"Size mismatch between number of rows in keys and values.");
verify_operators(values, ops);
// Empty inputs
if (keys.num_rows() == 0) {
return std::make_pair(
cudf::empty_like(keys),
cudf::table(0, target_dtypes(column_dtypes(values), ops),
column_dtype_infos(values)));
}
auto compute_groupby = groupby_null_specialization(keys, values);
cudf::table output_keys;
cudf::table output_values;
std::tie(output_keys, output_values) =
compute_groupby(keys, values, ops, options, stream);
update_nvcategories(keys, output_keys, values, output_values);
return std::make_pair(output_keys, output_values);
}
} // namespace detail
std::pair<cudf::table, cudf::table> groupby(cudf::table const& keys,
cudf::table const& values,
std::vector<operators> const& ops,
Options options) {
return detail::groupby(keys, values, ops, options);
}
} // namespace hash
} // namespace groupby
} // namespace cudf
|
5404ca9e7b1556de8130e9b393cc4a87a1980771.hip | // !!! This is a file automatically generated by hipify!!!
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* photon_mapping_pass.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: talemari <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/05/29 12:16:47 by talemari #+# #+# */
/* Updated: 2017/06/08 16:58:55 by talemari ### ########.fr */
/* */
/* ************************************************************************** */
#include "rt.cuh"
#include "../../inc/cuda_call.cuh"
#include <hip/hip_runtime.h>
static void init_photon_group(t_raytracing_tools *r, size_t photon_count,
t_photon *init_photon_list);
static float get_total_intensity(t_light *lights);
void photon_mapping_pass(t_raytracing_tools *r)
{
t_photon *init_photon_list;
int photon_count;
photon_count = r->scene->photons_per_pass;
gpu_errchk(hipHostMalloc(&init_photon_list, sizeof(t_photon) *
photon_count));
init_photon_group(r, photon_count, init_photon_list);
shoot_photon_wrapper(r, photon_count, init_photon_list);
hipHostFree(init_photon_list);
}
static void init_photon_group(t_raytracing_tools *r, size_t photon_count,
t_photon *init_photon_list)
{
t_light *l_ptr;
float total_intensity;
float ratio;
int i;
total_intensity = get_total_intensity(r->scene->lights);
l_ptr = r->scene->lights;
ratio = 0;
i = -1;
while (l_ptr)
{
if (v_isnan(l_ptr->pos))
{
l_ptr = l_ptr->next;
continue ;
}
ratio += photon_count * l_ptr->intensity / total_intensity;
while (++i < ratio)
{
init_photon_list[i].pos = l_ptr->pos;
init_photon_list[i].col = vec_to_col(l_ptr->col);
init_photon_list[i].n = v_new(NAN, NAN, NAN);
}
l_ptr = l_ptr->next;
}
}
static float get_total_intensity(t_light *lights)
{
t_light *l_ptr;
float total_intensity;
l_ptr = lights;
total_intensity = 0;
while (l_ptr)
{
if (v_isnan(l_ptr->pos))
{
l_ptr = l_ptr->next;
continue ;
}
total_intensity += l_ptr->intensity;
l_ptr = l_ptr->next;
}
return (total_intensity);
}
| 5404ca9e7b1556de8130e9b393cc4a87a1980771.cu | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* photon_mapping_pass.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: talemari <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/05/29 12:16:47 by talemari #+# #+# */
/* Updated: 2017/06/08 16:58:55 by talemari ### ########.fr */
/* */
/* ************************************************************************** */
#include "rt.cuh"
#include "../../inc/cuda_call.cuh"
#include <cuda.h>
static void init_photon_group(t_raytracing_tools *r, size_t photon_count,
t_photon *init_photon_list);
static float get_total_intensity(t_light *lights);
void photon_mapping_pass(t_raytracing_tools *r)
{
t_photon *init_photon_list;
int photon_count;
photon_count = r->scene->photons_per_pass;
gpu_errchk(cudaMallocHost(&init_photon_list, sizeof(t_photon) *
photon_count));
init_photon_group(r, photon_count, init_photon_list);
shoot_photon_wrapper(r, photon_count, init_photon_list);
cudaFreeHost(init_photon_list);
}
static void init_photon_group(t_raytracing_tools *r, size_t photon_count,
t_photon *init_photon_list)
{
t_light *l_ptr;
float total_intensity;
float ratio;
int i;
total_intensity = get_total_intensity(r->scene->lights);
l_ptr = r->scene->lights;
ratio = 0;
i = -1;
while (l_ptr)
{
if (v_isnan(l_ptr->pos))
{
l_ptr = l_ptr->next;
continue ;
}
ratio += photon_count * l_ptr->intensity / total_intensity;
while (++i < ratio)
{
init_photon_list[i].pos = l_ptr->pos;
init_photon_list[i].col = vec_to_col(l_ptr->col);
init_photon_list[i].n = v_new(NAN, NAN, NAN);
}
l_ptr = l_ptr->next;
}
}
static float get_total_intensity(t_light *lights)
{
t_light *l_ptr;
float total_intensity;
l_ptr = lights;
total_intensity = 0;
while (l_ptr)
{
if (v_isnan(l_ptr->pos))
{
l_ptr = l_ptr->next;
continue ;
}
total_intensity += l_ptr->intensity;
l_ptr = l_ptr->next;
}
return (total_intensity);
}
|
f1bfa927ad8b85d9bdf662a63497dd304c62c23d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 12-Jun-2012 19:13:14
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *ind_arg0,
double *ind_arg1,
double *ind_arg2,
int *ind_map,
short *arg_map,
double *arg8,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg9_l[1];
double arg10_l[1];
double arg11_l[1];
double arg12_l[1];
double *arg0_vec[4];
double *arg1_vec[4];
double *arg2_vec[4] = {
arg9_l,
arg10_l,
arg11_l,
arg12_l
};
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*3];
ind_arg1_size = ind_arg_sizes[1+blockId*3];
ind_arg2_size = ind_arg_sizes[2+blockId*3];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*3];
ind_arg1_map = &ind_map[4*set_size] + ind_arg_offs[1+blockId*3];
ind_arg2_map = &ind_map[8*set_size] + ind_arg_offs[2+blockId*3];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*1);
ind_arg2_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*1; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%1+ind_arg1_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<1; d++)
arg9_l[d] = ZERO_double;
for (int d=0; d<1; d++)
arg10_l[d] = ZERO_double;
for (int d=0; d<1; d++)
arg11_l[d] = ZERO_double;
for (int d=0; d<1; d++)
arg12_l[d] = ZERO_double;
arg0_vec[0] = ind_arg0_s+arg_map[0*set_size+n+offset_b]*2;
arg0_vec[1] = ind_arg0_s+arg_map[1*set_size+n+offset_b]*2;
arg0_vec[2] = ind_arg0_s+arg_map[2*set_size+n+offset_b]*2;
arg0_vec[3] = ind_arg0_s+arg_map[3*set_size+n+offset_b]*2;
arg1_vec[0] = ind_arg1_s+arg_map[4*set_size+n+offset_b]*1;
arg1_vec[1] = ind_arg1_s+arg_map[5*set_size+n+offset_b]*1;
arg1_vec[2] = ind_arg1_s+arg_map[6*set_size+n+offset_b]*1;
arg1_vec[3] = ind_arg1_s+arg_map[7*set_size+n+offset_b]*1;
// user-supplied kernel call
res_calc( arg0_vec,
arg1_vec,
arg8+(n+offset_b),
arg2_vec);
col2 = colors[n+offset_b];
}
// store local variables
int arg9_map;
int arg10_map;
int arg11_map;
int arg12_map;
if (col2>=0) {
arg9_map = arg_map[8*set_size+n+offset_b];
arg10_map = arg_map[9*set_size+n+offset_b];
arg11_map = arg_map[10*set_size+n+offset_b];
arg12_map = arg_map[11*set_size+n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<1; d++)
ind_arg2_s[d+arg9_map*1] += arg9_l[d];
for (int d=0; d<1; d++)
ind_arg2_s[d+arg10_map*1] += arg10_l[d];
for (int d=0; d<1; d++)
ind_arg2_s[d+arg11_map*1] += arg11_l[d];
for (int d=0; d<1; d++)
ind_arg2_s[d+arg12_map*1] += arg12_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2[n%1+ind_arg2_map[n/1]*1] += ind_arg2_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg4,
op_arg arg8,
op_arg arg9 ){
int nargs = 13;
op_arg args[13];
arg0.idx = 0;
args[0] = arg0;
for (int v = 1; v < 4; v++) {
args[0 + v] = op_arg_dat(arg0.dat, v, arg0.map, 2, "double", OP_READ);
}
arg4.idx = 0;
args[4] = arg4;
for (int v = 1; v < 4; v++) {
args[4 + v] = op_arg_dat(arg4.dat, v, arg4.map, 1, "double", OP_READ);
}
args[8] = arg8;
arg9.idx = 0;
args[9] = arg9;
for (int v = 1; v < 4; v++) {
args[9 + v] = op_arg_dat(arg9.dat, v, arg9.map, 1, "double", OP_INC);
}
int ninds = 3;
int inds[13] = {0,0,0,0,1,1,1,1,-1,2,2,2,2};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers_core(&cpu_t1, &wall_t1);
if (set->size >0) {
int op2_stride = set->size + set->exec_size + set->nonexec_size;
op_decl_const_char(1, "int", sizeof(int), (char *)&op2_stride, "op2_stride");
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args);
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d,
(double *)arg4.data_d,
(double *)arg9.data_d,
Plan->ind_map,
Plan->loc_map,
(double *)arg8.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(0);
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
op_timing_realloc(0);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
OP_kernels[0].time += wall_t2 - wall_t1;
}
| f1bfa927ad8b85d9bdf662a63497dd304c62c23d.cu | //
// auto-generated by op2.m on 12-Jun-2012 19:13:14
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *ind_arg0,
double *ind_arg1,
double *ind_arg2,
int *ind_map,
short *arg_map,
double *arg8,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg9_l[1];
double arg10_l[1];
double arg11_l[1];
double arg12_l[1];
double *arg0_vec[4];
double *arg1_vec[4];
double *arg2_vec[4] = {
arg9_l,
arg10_l,
arg11_l,
arg12_l
};
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*3];
ind_arg1_size = ind_arg_sizes[1+blockId*3];
ind_arg2_size = ind_arg_sizes[2+blockId*3];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*3];
ind_arg1_map = &ind_map[4*set_size] + ind_arg_offs[1+blockId*3];
ind_arg2_map = &ind_map[8*set_size] + ind_arg_offs[2+blockId*3];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*1);
ind_arg2_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*1; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%1+ind_arg1_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<1; d++)
arg9_l[d] = ZERO_double;
for (int d=0; d<1; d++)
arg10_l[d] = ZERO_double;
for (int d=0; d<1; d++)
arg11_l[d] = ZERO_double;
for (int d=0; d<1; d++)
arg12_l[d] = ZERO_double;
arg0_vec[0] = ind_arg0_s+arg_map[0*set_size+n+offset_b]*2;
arg0_vec[1] = ind_arg0_s+arg_map[1*set_size+n+offset_b]*2;
arg0_vec[2] = ind_arg0_s+arg_map[2*set_size+n+offset_b]*2;
arg0_vec[3] = ind_arg0_s+arg_map[3*set_size+n+offset_b]*2;
arg1_vec[0] = ind_arg1_s+arg_map[4*set_size+n+offset_b]*1;
arg1_vec[1] = ind_arg1_s+arg_map[5*set_size+n+offset_b]*1;
arg1_vec[2] = ind_arg1_s+arg_map[6*set_size+n+offset_b]*1;
arg1_vec[3] = ind_arg1_s+arg_map[7*set_size+n+offset_b]*1;
// user-supplied kernel call
res_calc( arg0_vec,
arg1_vec,
arg8+(n+offset_b),
arg2_vec);
col2 = colors[n+offset_b];
}
// store local variables
int arg9_map;
int arg10_map;
int arg11_map;
int arg12_map;
if (col2>=0) {
arg9_map = arg_map[8*set_size+n+offset_b];
arg10_map = arg_map[9*set_size+n+offset_b];
arg11_map = arg_map[10*set_size+n+offset_b];
arg12_map = arg_map[11*set_size+n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<1; d++)
ind_arg2_s[d+arg9_map*1] += arg9_l[d];
for (int d=0; d<1; d++)
ind_arg2_s[d+arg10_map*1] += arg10_l[d];
for (int d=0; d<1; d++)
ind_arg2_s[d+arg11_map*1] += arg11_l[d];
for (int d=0; d<1; d++)
ind_arg2_s[d+arg12_map*1] += arg12_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2[n%1+ind_arg2_map[n/1]*1] += ind_arg2_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg4,
op_arg arg8,
op_arg arg9 ){
int nargs = 13;
op_arg args[13];
arg0.idx = 0;
args[0] = arg0;
for (int v = 1; v < 4; v++) {
args[0 + v] = op_arg_dat(arg0.dat, v, arg0.map, 2, "double", OP_READ);
}
arg4.idx = 0;
args[4] = arg4;
for (int v = 1; v < 4; v++) {
args[4 + v] = op_arg_dat(arg4.dat, v, arg4.map, 1, "double", OP_READ);
}
args[8] = arg8;
arg9.idx = 0;
args[9] = arg9;
for (int v = 1; v < 4; v++) {
args[9 + v] = op_arg_dat(arg9.dat, v, arg9.map, 1, "double", OP_INC);
}
int ninds = 3;
int inds[13] = {0,0,0,0,1,1,1,1,-1,2,2,2,2};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers_core(&cpu_t1, &wall_t1);
if (set->size >0) {
int op2_stride = set->size + set->exec_size + set->nonexec_size;
op_decl_const_char(1, "int", sizeof(int), (char *)&op2_stride, "op2_stride");
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args);
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
op_cuda_res_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d,
(double *)arg4.data_d,
(double *)arg9.data_d,
Plan->ind_map,
Plan->loc_map,
(double *)arg8.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(0);
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
op_timing_realloc(0);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
f8ee84b3e3757acc040d9bc55e4d6460ccbe07b6.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <limits>
namespace at {
namespace native {
const char asinh_name[] = "asinh";
void asinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto asinh_string = jiterator_stringify(
template <typename T> T asinh(T a) { return std::asinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/asinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"asinh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
}
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
} // namespace native
} // namespace at
| f8ee84b3e3757acc040d9bc55e4d6460ccbe07b6.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at {
namespace native {
const char asinh_name[] = "asinh";
void asinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto asinh_string = jiterator_stringify(
template <typename T> T asinh(T a) { return std::asinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/asinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"asinh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
}
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
} // namespace native
} // namespace at
|
20c8cbb3792837813d174d9cff000d50e0608b93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include "cuda_draw.h"
namespace cudraw {
/******************************************************************************
Internal Functions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
******************************************************************************/
namespace {
__device__ unsigned char __cudraw__d_font[128][8] = {
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0000 (nul)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0001
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0002
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0003
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0004
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0005
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0006
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0007
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0008
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0009
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000A
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000B
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000C
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000D
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000E
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000F
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0010
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0011
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0012
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0013
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0014
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0015
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0016
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0017
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0018
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0019
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001A
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001B
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001C
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001D
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001E
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001F
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0020 (space)
{ 0x18, 0x3C, 0x3C, 0x18, 0x18, 0x00, 0x18, 0x00}, // U+0021 (!)
{ 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0022 (")
{ 0x36, 0x36, 0x7F, 0x36, 0x7F, 0x36, 0x36, 0x00}, // U+0023 (#)
{ 0x0C, 0x3E, 0x03, 0x1E, 0x30, 0x1F, 0x0C, 0x00}, // U+0024 ($)
{ 0x00, 0x63, 0x33, 0x18, 0x0C, 0x66, 0x63, 0x00}, // U+0025 (%)
{ 0x1C, 0x36, 0x1C, 0x6E, 0x3B, 0x33, 0x6E, 0x00}, // U+0026 (&)
{ 0x06, 0x06, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0027 (')
{ 0x18, 0x0C, 0x06, 0x06, 0x06, 0x0C, 0x18, 0x00}, // U+0028 (()
{ 0x06, 0x0C, 0x18, 0x18, 0x18, 0x0C, 0x06, 0x00}, // U+0029 ())
{ 0x00, 0x66, 0x3C, 0xFF, 0x3C, 0x66, 0x00, 0x00}, // U+002A (*)
{ 0x00, 0x0C, 0x0C, 0x3F, 0x0C, 0x0C, 0x00, 0x00}, // U+002B (+)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x0C, 0x06}, // U+002C (,)
{ 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00}, // U+002D (-)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x0C, 0x00}, // U+002E (.)
{ 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x01, 0x00}, // U+002F (/)
{ 0x3E, 0x63, 0x73, 0x7B, 0x6F, 0x67, 0x3E, 0x00}, // U+0030 (0)
{ 0x0C, 0x0E, 0x0C, 0x0C, 0x0C, 0x0C, 0x3F, 0x00}, // U+0031 (1)
{ 0x1E, 0x33, 0x30, 0x1C, 0x06, 0x33, 0x3F, 0x00}, // U+0032 (2)
{ 0x1E, 0x33, 0x30, 0x1C, 0x30, 0x33, 0x1E, 0x00}, // U+0033 (3)
{ 0x38, 0x3C, 0x36, 0x33, 0x7F, 0x30, 0x78, 0x00}, // U+0034 (4)
{ 0x3F, 0x03, 0x1F, 0x30, 0x30, 0x33, 0x1E, 0x00}, // U+0035 (5)
{ 0x1C, 0x06, 0x03, 0x1F, 0x33, 0x33, 0x1E, 0x00}, // U+0036 (6)
{ 0x3F, 0x33, 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x00}, // U+0037 (7)
{ 0x1E, 0x33, 0x33, 0x1E, 0x33, 0x33, 0x1E, 0x00}, // U+0038 (8)
{ 0x1E, 0x33, 0x33, 0x3E, 0x30, 0x18, 0x0E, 0x00}, // U+0039 (9)
{ 0x00, 0x0C, 0x0C, 0x00, 0x00, 0x0C, 0x0C, 0x00}, // U+003A (:)
{ 0x00, 0x0C, 0x0C, 0x00, 0x00, 0x0C, 0x0C, 0x06}, // U+003B (;)
{ 0x18, 0x0C, 0x06, 0x03, 0x06, 0x0C, 0x18, 0x00}, // U+003C (<)
{ 0x00, 0x00, 0x3F, 0x00, 0x00, 0x3F, 0x00, 0x00}, // U+003D (=)
{ 0x06, 0x0C, 0x18, 0x30, 0x18, 0x0C, 0x06, 0x00}, // U+003E (>)
{ 0x1E, 0x33, 0x30, 0x18, 0x0C, 0x00, 0x0C, 0x00}, // U+003F (?)
{ 0x3E, 0x63, 0x7B, 0x7B, 0x7B, 0x03, 0x1E, 0x00}, // U+0040 (@)
{ 0x0C, 0x1E, 0x33, 0x33, 0x3F, 0x33, 0x33, 0x00}, // U+0041 (A)
{ 0x3F, 0x66, 0x66, 0x3E, 0x66, 0x66, 0x3F, 0x00}, // U+0042 (B)
{ 0x3C, 0x66, 0x03, 0x03, 0x03, 0x66, 0x3C, 0x00}, // U+0043 (C)
{ 0x1F, 0x36, 0x66, 0x66, 0x66, 0x36, 0x1F, 0x00}, // U+0044 (D)
{ 0x7F, 0x46, 0x16, 0x1E, 0x16, 0x46, 0x7F, 0x00}, // U+0045 (E)
{ 0x7F, 0x46, 0x16, 0x1E, 0x16, 0x06, 0x0F, 0x00}, // U+0046 (F)
{ 0x3C, 0x66, 0x03, 0x03, 0x73, 0x66, 0x7C, 0x00}, // U+0047 (G)
{ 0x33, 0x33, 0x33, 0x3F, 0x33, 0x33, 0x33, 0x00}, // U+0048 (H)
{ 0x1E, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x1E, 0x00}, // U+0049 (I)
{ 0x78, 0x30, 0x30, 0x30, 0x33, 0x33, 0x1E, 0x00}, // U+004A (J)
{ 0x67, 0x66, 0x36, 0x1E, 0x36, 0x66, 0x67, 0x00}, // U+004B (K)
{ 0x0F, 0x06, 0x06, 0x06, 0x46, 0x66, 0x7F, 0x00}, // U+004C (L)
{ 0x63, 0x77, 0x7F, 0x7F, 0x6B, 0x63, 0x63, 0x00}, // U+004D (M)
{ 0x63, 0x67, 0x6F, 0x7B, 0x73, 0x63, 0x63, 0x00}, // U+004E (N)
{ 0x1C, 0x36, 0x63, 0x63, 0x63, 0x36, 0x1C, 0x00}, // U+004F (O)
{ 0x3F, 0x66, 0x66, 0x3E, 0x06, 0x06, 0x0F, 0x00}, // U+0050 (P)
{ 0x1E, 0x33, 0x33, 0x33, 0x3B, 0x1E, 0x38, 0x00}, // U+0051 (Q)
{ 0x3F, 0x66, 0x66, 0x3E, 0x36, 0x66, 0x67, 0x00}, // U+0052 (R)
{ 0x1E, 0x33, 0x07, 0x0E, 0x38, 0x33, 0x1E, 0x00}, // U+0053 (S)
{ 0x3F, 0x2D, 0x0C, 0x0C, 0x0C, 0x0C, 0x1E, 0x00}, // U+0054 (T)
{ 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x3F, 0x00}, // U+0055 (U)
{ 0x33, 0x33, 0x33, 0x33, 0x33, 0x1E, 0x0C, 0x00}, // U+0056 (V)
{ 0x63, 0x63, 0x63, 0x6B, 0x7F, 0x77, 0x63, 0x00}, // U+0057 (W)
{ 0x63, 0x63, 0x36, 0x1C, 0x1C, 0x36, 0x63, 0x00}, // U+0058 (X)
{ 0x33, 0x33, 0x33, 0x1E, 0x0C, 0x0C, 0x1E, 0x00}, // U+0059 (Y)
{ 0x7F, 0x63, 0x31, 0x18, 0x4C, 0x66, 0x7F, 0x00}, // U+005A (Z)
{ 0x1E, 0x06, 0x06, 0x06, 0x06, 0x06, 0x1E, 0x00}, // U+005B ([)
{ 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x40, 0x00}, // U+005C (\)
{ 0x1E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1E, 0x00}, // U+005D (])
{ 0x08, 0x1C, 0x36, 0x63, 0x00, 0x00, 0x00, 0x00}, // U+005E (^)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF}, // U+005F (_)
{ 0x0C, 0x0C, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0060 (`)
{ 0x00, 0x00, 0x1E, 0x30, 0x3E, 0x33, 0x6E, 0x00}, // U+0061 (a)
{ 0x07, 0x06, 0x06, 0x3E, 0x66, 0x66, 0x3B, 0x00}, // U+0062 (b)
{ 0x00, 0x00, 0x1E, 0x33, 0x03, 0x33, 0x1E, 0x00}, // U+0063 (c)
{ 0x38, 0x30, 0x30, 0x3e, 0x33, 0x33, 0x6E, 0x00}, // U+0064 (d)
{ 0x00, 0x00, 0x1E, 0x33, 0x3f, 0x03, 0x1E, 0x00}, // U+0065 (e)
{ 0x1C, 0x36, 0x06, 0x0f, 0x06, 0x06, 0x0F, 0x00}, // U+0066 (f)
{ 0x00, 0x00, 0x6E, 0x33, 0x33, 0x3E, 0x30, 0x1F}, // U+0067 (g)
{ 0x07, 0x06, 0x36, 0x6E, 0x66, 0x66, 0x67, 0x00}, // U+0068 (h)
{ 0x0C, 0x00, 0x0E, 0x0C, 0x0C, 0x0C, 0x1E, 0x00}, // U+0069 (i)
{ 0x30, 0x00, 0x30, 0x30, 0x30, 0x33, 0x33, 0x1E}, // U+006A (j)
{ 0x07, 0x06, 0x66, 0x36, 0x1E, 0x36, 0x67, 0x00}, // U+006B (k)
{ 0x0E, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x1E, 0x00}, // U+006C (l)
{ 0x00, 0x00, 0x33, 0x7F, 0x7F, 0x6B, 0x63, 0x00}, // U+006D (m)
{ 0x00, 0x00, 0x1F, 0x33, 0x33, 0x33, 0x33, 0x00}, // U+006E (n)
{ 0x00, 0x00, 0x1E, 0x33, 0x33, 0x33, 0x1E, 0x00}, // U+006F (o)
{ 0x00, 0x00, 0x3B, 0x66, 0x66, 0x3E, 0x06, 0x0F}, // U+0070 (p)
{ 0x00, 0x00, 0x6E, 0x33, 0x33, 0x3E, 0x30, 0x78}, // U+0071 (q)
{ 0x00, 0x00, 0x3B, 0x6E, 0x66, 0x06, 0x0F, 0x00}, // U+0072 (r)
{ 0x00, 0x00, 0x3E, 0x03, 0x1E, 0x30, 0x1F, 0x00}, // U+0073 (s)
{ 0x08, 0x0C, 0x3E, 0x0C, 0x0C, 0x2C, 0x18, 0x00}, // U+0074 (t)
{ 0x00, 0x00, 0x33, 0x33, 0x33, 0x33, 0x6E, 0x00}, // U+0075 (u)
{ 0x00, 0x00, 0x33, 0x33, 0x33, 0x1E, 0x0C, 0x00}, // U+0076 (v)
{ 0x00, 0x00, 0x63, 0x6B, 0x7F, 0x7F, 0x36, 0x00}, // U+0077 (w)
{ 0x00, 0x00, 0x63, 0x36, 0x1C, 0x36, 0x63, 0x00}, // U+0078 (x)
{ 0x00, 0x00, 0x33, 0x33, 0x33, 0x3E, 0x30, 0x1F}, // U+0079 (y)
{ 0x00, 0x00, 0x3F, 0x19, 0x0C, 0x26, 0x3F, 0x00}, // U+007A (z)
{ 0x38, 0x0C, 0x0C, 0x07, 0x0C, 0x0C, 0x38, 0x00}, // U+007B ({)
{ 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x18, 0x00}, // U+007C (|)
{ 0x07, 0x0C, 0x0C, 0x38, 0x0C, 0x0C, 0x07, 0x00}, // U+007D (})
{ 0x6E, 0x3B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+007E (~)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} // U+007F
};
// return distance
// type 1: point outside segment near first point
// type 2: point outside segment near second point
// type 3: point inside the segment
__device__ float minDistanceToLineSegment(
int x0, int y0, int x1, int y1, int x, int y, int& type) {
// vector AB
float AB_x = x1 - x0;
float AB_y = y1 - y0;
// vector BE
float BE_x = x - x1;
float BE_y = y - y1;
// vector AE
float AE_x = x - x0;
float AE_y = y - y0;
// Calculating the dot product
float AB_BE = AB_x * BE_x + AB_y * BE_y;
float AB_AE = AB_x * AE_x + AB_y * AE_y;
// Minimum distance from
// point E to the line segment
float res = 0;
type = 0;
if (AB_BE > 0) {
// Finding the magnitude
res = sqrtf((x - x1) * (x - x1) + (y - y1) * (y - y1));
type = 1;
} else if (AB_AE < 0) {
res = sqrtf((x - x0) * (x - x0) + (y - y0) * (y - y0));
type = 2;
} else {
// Finding the perpendicular distance
float mod = sqrt(AB_x * AB_x + AB_y * AB_y);
res = abs(AB_x * AE_y - AB_y * AE_x) / mod;
type = 3;
}
return res;
}
// Thread per block: 1D - 512
// Blocks per grid: 2D - height, (width + 511) / 512
template<class T>
__global__ void checkPixel(
size_t width, size_t height, uint8_t* d_img, T shape, AABB aabb) {
size_t i = blockIdx.x;
size_t j = blockIdx.y * 512 + threadIdx.x;
if (i < height && j < width && aabb.inside(j, i)) {
size_t index_single = i * width + j;
Color prev_color(d_img[3 * index_single],
d_img[3 * index_single + 1],
d_img[3 * index_single + 2]);
auto color = shape.checkPixel(j, i, prev_color);
d_img[3 * index_single] = color.r;
d_img[3 * index_single + 1] = color.g;
d_img[3 * index_single + 2] = color.b;
}
}
}
/******************************************************************************
Internal Functions <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
******************************************************************************/
/******************************************************************************
Utilities >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
******************************************************************************/
uint8_t* allocateImage(size_t width, size_t height) {
uint8_t* d_res = nullptr;
hipMalloc(&d_res, width * height * 3);
return d_res;
}
uint8_t* uploadImage(size_t width, size_t height, uint8_t* img) {
uint8_t* d_res = allocateImage(width, height);
CHECK(d_res);
hipMemcpy(d_res, img, width * height * 3, hipMemcpyHostToDevice);
return d_res;
}
void uploadImage(size_t width, size_t height, uint8_t* img, uint8_t* d_img) {
CHECK(img);
CHECK(d_img);
hipMemcpy(d_img, img, width * height * 3, hipMemcpyHostToDevice);
}
void downloadImage(size_t width, size_t height, uint8_t* d_img, uint8_t* img) {
hipMemcpy(img, d_img, width * height * 3, hipMemcpyDeviceToHost);
}
void copyImage(uint8_t* d_dst, uint8_t* d_src, size_t size) {
hipMemcpy(d_dst, d_src, size, hipMemcpyDeviceToDevice);
}
void freeImage(uint8_t* d_img) {
hipFree(d_img);
}
/******************************************************************************
Utilities <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
******************************************************************************/
/******************************************************************************
API >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
******************************************************************************/
__device__ Color Line::checkPixel(int x, int y, Color prev_color) {
int type = 0;
float dist = minDistanceToLineSegment(x0, y0, x1, y1, x, y, type);
if (type == 3 && dist <= width / 2.f) {
float frac = dist - floorf(dist);
if (dist >= width / 2.f - 1.f) {
Color c = prev_color;
c.overlay(color, frac);
return c;
} else {
return color;
}
} else {
// TODO: add cap types
return prev_color;
}
}
__device__ __host__ AABB Line::getAABB() const {
AABB res(x0 < x1 ? x0 : x1,
y0 < y1 ? y0 : y1,
x0 < x1 ? x1 : x0,
y0 < y1 ? y1 : y0);
res.inflate(width);
return res;
}
__device__ Color String::checkPixel(int in_x, int in_y, Color prev_color) {
int normal_in_x = (float)(in_x - x) / font_size * 8.f + x;
int normal_in_y = (float)(in_y - y) / font_size * 8.f + y;
int index = (normal_in_x - x) / 8;
if (index >= 0 && index < length && normal_in_y >= y && normal_in_y < y + 8) {
char c = d_buffer[index];
int local_x = (normal_in_x - x) % 8;
int local_y = normal_in_y - y;
if (c >= 0 && c < 128) {
bool has_pixel = (__cudraw__d_font[c][local_y] >> local_x) & 1;
if (has_pixel) {
return color;
}
}
}
return prev_color;
}
__device__ __host__ AABB String::getAABB() const {
AABB res(x, y, x + font_size * (float)length, y + font_size);
return res;
}
__device__ Color Rect::checkPixel(int x, int y, Color prev_color) {
// TODO: we can optimize this
int half_w = ceilf(width / 2.f);
int x_min = min(x0, x1);
int x_max = max(x0, x1);
int y_min = min(y0, y1);
int y_max = max(y0, y1);
if (x >= x_min - half_w && x <= x_max + half_w &&
y >= y_min - half_w && y <= y_max + half_w) {
Color c = prev_color;
if (fill && x >= x_min && x <= x_max && y >= y_min && y <= y_max) {
c.overlay(fill_color, fill_alpha);
}
int dist_min_x = min(abs(x - x0), abs(x - x1));
int dist_min_y = min(abs(y - y0), abs(y - y1));
int dist_min = min(dist_min_x, dist_min_y);
float dist = width / 2.f + 1.f - (float)dist_min;
if (dist >= 1) {
c = color;
} else if (dist >= 0) {
float frac = dist - floorf(dist);
c.overlay(color, frac);
}
return c;
}
return prev_color;
}
__device__ __host__ AABB Rect::getAABB() const {
AABB res(x0 < x1 ? x0 : x1,
y0 < y1 ? y0 : y1,
x0 < x1 ? x1 : x0,
y0 < y1 ? y1 : y0);
res.inflate(width);
return res;
}
template<class T>
void scanPixel(size_t width, size_t height, uint8_t* d_img, const T& shape) {
CHECK(d_img);
AABB aabb = shape.getAABB();
dim3 gridDims(height, (width + 511) / 512);
hipLaunchKernelGGL(( checkPixel), dim3(gridDims), dim3(512), 0, 0, width, height, d_img, shape, aabb);
}
// Explicit template instantiation
template void scanPixel<Line>(size_t, size_t, uint8_t*, const Line&);
template void scanPixel<String>(size_t, size_t, uint8_t*, const String&);
template void scanPixel<Rect>(size_t, size_t, uint8_t*, const Rect&);
/******************************************************************************
API <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
******************************************************************************/
}
| 20c8cbb3792837813d174d9cff000d50e0608b93.cu |
#include <stdio.h>
#include <assert.h>
#include "cuda_draw.h"
namespace cudraw {
/******************************************************************************
Internal Functions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
******************************************************************************/
namespace {
__device__ unsigned char __cudraw__d_font[128][8] = {
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0000 (nul)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0001
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0002
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0003
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0004
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0005
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0006
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0007
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0008
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0009
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000A
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000B
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000C
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000D
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000E
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+000F
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0010
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0011
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0012
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0013
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0014
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0015
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0016
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0017
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0018
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0019
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001A
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001B
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001C
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001D
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001E
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+001F
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0020 (space)
{ 0x18, 0x3C, 0x3C, 0x18, 0x18, 0x00, 0x18, 0x00}, // U+0021 (!)
{ 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0022 (")
{ 0x36, 0x36, 0x7F, 0x36, 0x7F, 0x36, 0x36, 0x00}, // U+0023 (#)
{ 0x0C, 0x3E, 0x03, 0x1E, 0x30, 0x1F, 0x0C, 0x00}, // U+0024 ($)
{ 0x00, 0x63, 0x33, 0x18, 0x0C, 0x66, 0x63, 0x00}, // U+0025 (%)
{ 0x1C, 0x36, 0x1C, 0x6E, 0x3B, 0x33, 0x6E, 0x00}, // U+0026 (&)
{ 0x06, 0x06, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0027 (')
{ 0x18, 0x0C, 0x06, 0x06, 0x06, 0x0C, 0x18, 0x00}, // U+0028 (()
{ 0x06, 0x0C, 0x18, 0x18, 0x18, 0x0C, 0x06, 0x00}, // U+0029 ())
{ 0x00, 0x66, 0x3C, 0xFF, 0x3C, 0x66, 0x00, 0x00}, // U+002A (*)
{ 0x00, 0x0C, 0x0C, 0x3F, 0x0C, 0x0C, 0x00, 0x00}, // U+002B (+)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x0C, 0x06}, // U+002C (,)
{ 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00}, // U+002D (-)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x0C, 0x00}, // U+002E (.)
{ 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x01, 0x00}, // U+002F (/)
{ 0x3E, 0x63, 0x73, 0x7B, 0x6F, 0x67, 0x3E, 0x00}, // U+0030 (0)
{ 0x0C, 0x0E, 0x0C, 0x0C, 0x0C, 0x0C, 0x3F, 0x00}, // U+0031 (1)
{ 0x1E, 0x33, 0x30, 0x1C, 0x06, 0x33, 0x3F, 0x00}, // U+0032 (2)
{ 0x1E, 0x33, 0x30, 0x1C, 0x30, 0x33, 0x1E, 0x00}, // U+0033 (3)
{ 0x38, 0x3C, 0x36, 0x33, 0x7F, 0x30, 0x78, 0x00}, // U+0034 (4)
{ 0x3F, 0x03, 0x1F, 0x30, 0x30, 0x33, 0x1E, 0x00}, // U+0035 (5)
{ 0x1C, 0x06, 0x03, 0x1F, 0x33, 0x33, 0x1E, 0x00}, // U+0036 (6)
{ 0x3F, 0x33, 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x00}, // U+0037 (7)
{ 0x1E, 0x33, 0x33, 0x1E, 0x33, 0x33, 0x1E, 0x00}, // U+0038 (8)
{ 0x1E, 0x33, 0x33, 0x3E, 0x30, 0x18, 0x0E, 0x00}, // U+0039 (9)
{ 0x00, 0x0C, 0x0C, 0x00, 0x00, 0x0C, 0x0C, 0x00}, // U+003A (:)
{ 0x00, 0x0C, 0x0C, 0x00, 0x00, 0x0C, 0x0C, 0x06}, // U+003B (;)
{ 0x18, 0x0C, 0x06, 0x03, 0x06, 0x0C, 0x18, 0x00}, // U+003C (<)
{ 0x00, 0x00, 0x3F, 0x00, 0x00, 0x3F, 0x00, 0x00}, // U+003D (=)
{ 0x06, 0x0C, 0x18, 0x30, 0x18, 0x0C, 0x06, 0x00}, // U+003E (>)
{ 0x1E, 0x33, 0x30, 0x18, 0x0C, 0x00, 0x0C, 0x00}, // U+003F (?)
{ 0x3E, 0x63, 0x7B, 0x7B, 0x7B, 0x03, 0x1E, 0x00}, // U+0040 (@)
{ 0x0C, 0x1E, 0x33, 0x33, 0x3F, 0x33, 0x33, 0x00}, // U+0041 (A)
{ 0x3F, 0x66, 0x66, 0x3E, 0x66, 0x66, 0x3F, 0x00}, // U+0042 (B)
{ 0x3C, 0x66, 0x03, 0x03, 0x03, 0x66, 0x3C, 0x00}, // U+0043 (C)
{ 0x1F, 0x36, 0x66, 0x66, 0x66, 0x36, 0x1F, 0x00}, // U+0044 (D)
{ 0x7F, 0x46, 0x16, 0x1E, 0x16, 0x46, 0x7F, 0x00}, // U+0045 (E)
{ 0x7F, 0x46, 0x16, 0x1E, 0x16, 0x06, 0x0F, 0x00}, // U+0046 (F)
{ 0x3C, 0x66, 0x03, 0x03, 0x73, 0x66, 0x7C, 0x00}, // U+0047 (G)
{ 0x33, 0x33, 0x33, 0x3F, 0x33, 0x33, 0x33, 0x00}, // U+0048 (H)
{ 0x1E, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x1E, 0x00}, // U+0049 (I)
{ 0x78, 0x30, 0x30, 0x30, 0x33, 0x33, 0x1E, 0x00}, // U+004A (J)
{ 0x67, 0x66, 0x36, 0x1E, 0x36, 0x66, 0x67, 0x00}, // U+004B (K)
{ 0x0F, 0x06, 0x06, 0x06, 0x46, 0x66, 0x7F, 0x00}, // U+004C (L)
{ 0x63, 0x77, 0x7F, 0x7F, 0x6B, 0x63, 0x63, 0x00}, // U+004D (M)
{ 0x63, 0x67, 0x6F, 0x7B, 0x73, 0x63, 0x63, 0x00}, // U+004E (N)
{ 0x1C, 0x36, 0x63, 0x63, 0x63, 0x36, 0x1C, 0x00}, // U+004F (O)
{ 0x3F, 0x66, 0x66, 0x3E, 0x06, 0x06, 0x0F, 0x00}, // U+0050 (P)
{ 0x1E, 0x33, 0x33, 0x33, 0x3B, 0x1E, 0x38, 0x00}, // U+0051 (Q)
{ 0x3F, 0x66, 0x66, 0x3E, 0x36, 0x66, 0x67, 0x00}, // U+0052 (R)
{ 0x1E, 0x33, 0x07, 0x0E, 0x38, 0x33, 0x1E, 0x00}, // U+0053 (S)
{ 0x3F, 0x2D, 0x0C, 0x0C, 0x0C, 0x0C, 0x1E, 0x00}, // U+0054 (T)
{ 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x3F, 0x00}, // U+0055 (U)
{ 0x33, 0x33, 0x33, 0x33, 0x33, 0x1E, 0x0C, 0x00}, // U+0056 (V)
{ 0x63, 0x63, 0x63, 0x6B, 0x7F, 0x77, 0x63, 0x00}, // U+0057 (W)
{ 0x63, 0x63, 0x36, 0x1C, 0x1C, 0x36, 0x63, 0x00}, // U+0058 (X)
{ 0x33, 0x33, 0x33, 0x1E, 0x0C, 0x0C, 0x1E, 0x00}, // U+0059 (Y)
{ 0x7F, 0x63, 0x31, 0x18, 0x4C, 0x66, 0x7F, 0x00}, // U+005A (Z)
{ 0x1E, 0x06, 0x06, 0x06, 0x06, 0x06, 0x1E, 0x00}, // U+005B ([)
{ 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x40, 0x00}, // U+005C (\)
{ 0x1E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1E, 0x00}, // U+005D (])
{ 0x08, 0x1C, 0x36, 0x63, 0x00, 0x00, 0x00, 0x00}, // U+005E (^)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF}, // U+005F (_)
{ 0x0C, 0x0C, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+0060 (`)
{ 0x00, 0x00, 0x1E, 0x30, 0x3E, 0x33, 0x6E, 0x00}, // U+0061 (a)
{ 0x07, 0x06, 0x06, 0x3E, 0x66, 0x66, 0x3B, 0x00}, // U+0062 (b)
{ 0x00, 0x00, 0x1E, 0x33, 0x03, 0x33, 0x1E, 0x00}, // U+0063 (c)
{ 0x38, 0x30, 0x30, 0x3e, 0x33, 0x33, 0x6E, 0x00}, // U+0064 (d)
{ 0x00, 0x00, 0x1E, 0x33, 0x3f, 0x03, 0x1E, 0x00}, // U+0065 (e)
{ 0x1C, 0x36, 0x06, 0x0f, 0x06, 0x06, 0x0F, 0x00}, // U+0066 (f)
{ 0x00, 0x00, 0x6E, 0x33, 0x33, 0x3E, 0x30, 0x1F}, // U+0067 (g)
{ 0x07, 0x06, 0x36, 0x6E, 0x66, 0x66, 0x67, 0x00}, // U+0068 (h)
{ 0x0C, 0x00, 0x0E, 0x0C, 0x0C, 0x0C, 0x1E, 0x00}, // U+0069 (i)
{ 0x30, 0x00, 0x30, 0x30, 0x30, 0x33, 0x33, 0x1E}, // U+006A (j)
{ 0x07, 0x06, 0x66, 0x36, 0x1E, 0x36, 0x67, 0x00}, // U+006B (k)
{ 0x0E, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x1E, 0x00}, // U+006C (l)
{ 0x00, 0x00, 0x33, 0x7F, 0x7F, 0x6B, 0x63, 0x00}, // U+006D (m)
{ 0x00, 0x00, 0x1F, 0x33, 0x33, 0x33, 0x33, 0x00}, // U+006E (n)
{ 0x00, 0x00, 0x1E, 0x33, 0x33, 0x33, 0x1E, 0x00}, // U+006F (o)
{ 0x00, 0x00, 0x3B, 0x66, 0x66, 0x3E, 0x06, 0x0F}, // U+0070 (p)
{ 0x00, 0x00, 0x6E, 0x33, 0x33, 0x3E, 0x30, 0x78}, // U+0071 (q)
{ 0x00, 0x00, 0x3B, 0x6E, 0x66, 0x06, 0x0F, 0x00}, // U+0072 (r)
{ 0x00, 0x00, 0x3E, 0x03, 0x1E, 0x30, 0x1F, 0x00}, // U+0073 (s)
{ 0x08, 0x0C, 0x3E, 0x0C, 0x0C, 0x2C, 0x18, 0x00}, // U+0074 (t)
{ 0x00, 0x00, 0x33, 0x33, 0x33, 0x33, 0x6E, 0x00}, // U+0075 (u)
{ 0x00, 0x00, 0x33, 0x33, 0x33, 0x1E, 0x0C, 0x00}, // U+0076 (v)
{ 0x00, 0x00, 0x63, 0x6B, 0x7F, 0x7F, 0x36, 0x00}, // U+0077 (w)
{ 0x00, 0x00, 0x63, 0x36, 0x1C, 0x36, 0x63, 0x00}, // U+0078 (x)
{ 0x00, 0x00, 0x33, 0x33, 0x33, 0x3E, 0x30, 0x1F}, // U+0079 (y)
{ 0x00, 0x00, 0x3F, 0x19, 0x0C, 0x26, 0x3F, 0x00}, // U+007A (z)
{ 0x38, 0x0C, 0x0C, 0x07, 0x0C, 0x0C, 0x38, 0x00}, // U+007B ({)
{ 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x18, 0x00}, // U+007C (|)
{ 0x07, 0x0C, 0x0C, 0x38, 0x0C, 0x0C, 0x07, 0x00}, // U+007D (})
{ 0x6E, 0x3B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // U+007E (~)
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} // U+007F
};
// return distance
// type 1: point outside segment near first point
// type 2: point outside segment near second point
// type 3: point inside the segment
__device__ float minDistanceToLineSegment(
int x0, int y0, int x1, int y1, int x, int y, int& type) {
// vector AB
float AB_x = x1 - x0;
float AB_y = y1 - y0;
// vector BE
float BE_x = x - x1;
float BE_y = y - y1;
// vector AE
float AE_x = x - x0;
float AE_y = y - y0;
// Calculating the dot product
float AB_BE = AB_x * BE_x + AB_y * BE_y;
float AB_AE = AB_x * AE_x + AB_y * AE_y;
// Minimum distance from
// point E to the line segment
float res = 0;
type = 0;
if (AB_BE > 0) {
// Finding the magnitude
res = sqrtf((x - x1) * (x - x1) + (y - y1) * (y - y1));
type = 1;
} else if (AB_AE < 0) {
res = sqrtf((x - x0) * (x - x0) + (y - y0) * (y - y0));
type = 2;
} else {
// Finding the perpendicular distance
float mod = sqrt(AB_x * AB_x + AB_y * AB_y);
res = abs(AB_x * AE_y - AB_y * AE_x) / mod;
type = 3;
}
return res;
}
// Thread per block: 1D - 512
// Blocks per grid: 2D - height, (width + 511) / 512
template<class T>
__global__ void checkPixel(
size_t width, size_t height, uint8_t* d_img, T shape, AABB aabb) {
size_t i = blockIdx.x;
size_t j = blockIdx.y * 512 + threadIdx.x;
if (i < height && j < width && aabb.inside(j, i)) {
size_t index_single = i * width + j;
Color prev_color(d_img[3 * index_single],
d_img[3 * index_single + 1],
d_img[3 * index_single + 2]);
auto color = shape.checkPixel(j, i, prev_color);
d_img[3 * index_single] = color.r;
d_img[3 * index_single + 1] = color.g;
d_img[3 * index_single + 2] = color.b;
}
}
}
/******************************************************************************
Internal Functions <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
******************************************************************************/
/******************************************************************************
Utilities >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
******************************************************************************/
uint8_t* allocateImage(size_t width, size_t height) {
uint8_t* d_res = nullptr;
cudaMalloc(&d_res, width * height * 3);
return d_res;
}
uint8_t* uploadImage(size_t width, size_t height, uint8_t* img) {
uint8_t* d_res = allocateImage(width, height);
CHECK(d_res);
cudaMemcpy(d_res, img, width * height * 3, cudaMemcpyHostToDevice);
return d_res;
}
void uploadImage(size_t width, size_t height, uint8_t* img, uint8_t* d_img) {
CHECK(img);
CHECK(d_img);
cudaMemcpy(d_img, img, width * height * 3, cudaMemcpyHostToDevice);
}
void downloadImage(size_t width, size_t height, uint8_t* d_img, uint8_t* img) {
cudaMemcpy(img, d_img, width * height * 3, cudaMemcpyDeviceToHost);
}
void copyImage(uint8_t* d_dst, uint8_t* d_src, size_t size) {
cudaMemcpy(d_dst, d_src, size, cudaMemcpyDeviceToDevice);
}
void freeImage(uint8_t* d_img) {
cudaFree(d_img);
}
/******************************************************************************
Utilities <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
******************************************************************************/
/******************************************************************************
API >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
******************************************************************************/
__device__ Color Line::checkPixel(int x, int y, Color prev_color) {
int type = 0;
float dist = minDistanceToLineSegment(x0, y0, x1, y1, x, y, type);
if (type == 3 && dist <= width / 2.f) {
float frac = dist - floorf(dist);
if (dist >= width / 2.f - 1.f) {
Color c = prev_color;
c.overlay(color, frac);
return c;
} else {
return color;
}
} else {
// TODO: add cap types
return prev_color;
}
}
__device__ __host__ AABB Line::getAABB() const {
AABB res(x0 < x1 ? x0 : x1,
y0 < y1 ? y0 : y1,
x0 < x1 ? x1 : x0,
y0 < y1 ? y1 : y0);
res.inflate(width);
return res;
}
__device__ Color String::checkPixel(int in_x, int in_y, Color prev_color) {
int normal_in_x = (float)(in_x - x) / font_size * 8.f + x;
int normal_in_y = (float)(in_y - y) / font_size * 8.f + y;
int index = (normal_in_x - x) / 8;
if (index >= 0 && index < length && normal_in_y >= y && normal_in_y < y + 8) {
char c = d_buffer[index];
int local_x = (normal_in_x - x) % 8;
int local_y = normal_in_y - y;
if (c >= 0 && c < 128) {
bool has_pixel = (__cudraw__d_font[c][local_y] >> local_x) & 1;
if (has_pixel) {
return color;
}
}
}
return prev_color;
}
__device__ __host__ AABB String::getAABB() const {
AABB res(x, y, x + font_size * (float)length, y + font_size);
return res;
}
__device__ Color Rect::checkPixel(int x, int y, Color prev_color) {
// TODO: we can optimize this
int half_w = ceilf(width / 2.f);
int x_min = min(x0, x1);
int x_max = max(x0, x1);
int y_min = min(y0, y1);
int y_max = max(y0, y1);
if (x >= x_min - half_w && x <= x_max + half_w &&
y >= y_min - half_w && y <= y_max + half_w) {
Color c = prev_color;
if (fill && x >= x_min && x <= x_max && y >= y_min && y <= y_max) {
c.overlay(fill_color, fill_alpha);
}
int dist_min_x = min(abs(x - x0), abs(x - x1));
int dist_min_y = min(abs(y - y0), abs(y - y1));
int dist_min = min(dist_min_x, dist_min_y);
float dist = width / 2.f + 1.f - (float)dist_min;
if (dist >= 1) {
c = color;
} else if (dist >= 0) {
float frac = dist - floorf(dist);
c.overlay(color, frac);
}
return c;
}
return prev_color;
}
__device__ __host__ AABB Rect::getAABB() const {
AABB res(x0 < x1 ? x0 : x1,
y0 < y1 ? y0 : y1,
x0 < x1 ? x1 : x0,
y0 < y1 ? y1 : y0);
res.inflate(width);
return res;
}
template<class T>
void scanPixel(size_t width, size_t height, uint8_t* d_img, const T& shape) {
CHECK(d_img);
AABB aabb = shape.getAABB();
dim3 gridDims(height, (width + 511) / 512);
checkPixel<<<gridDims, 512>>>(width, height, d_img, shape, aabb);
}
// Explicit template instantiation
template void scanPixel<Line>(size_t, size_t, uint8_t*, const Line&);
template void scanPixel<String>(size_t, size_t, uint8_t*, const String&);
template void scanPixel<Rect>(size_t, size_t, uint8_t*, const Rect&);
/******************************************************************************
API <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
******************************************************************************/
}
|
2a4b51b369f7ed8af2dc627f7210874412ee0a51.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by xiaolu.lin on 2019/10/25.
//
#include <hip/hip_runtime.h>
#include <gtest/gtest.h>
#include "gtest_cuda.h"
__global__ void test(){
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
printf("%d : __%d__\n", threadId, __LINE__);
}
CUDA_SINGLE_TEST(test);
CUDA_SIMPLE_TEST(test);
CUDA_TEST(test, 32, 2);
__global__ void death_test(){
assert(0);
}
CUDA_DEATH_TEST(death_test, 1, 1);
__global__ void part_death_test(){
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
assert(threadId > 16);
}
CUDA_DEATH_TEST(part_death_test, 32, 1);
CUDA_DEATH_TEST(part_death_test, 32, 2);
| 2a4b51b369f7ed8af2dc627f7210874412ee0a51.cu | //
// Created by xiaolu.lin on 2019/10/25.
//
#include <cuda_runtime.h>
#include <gtest/gtest.h>
#include "gtest_cuda.h"
__global__ void test(){
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
printf("%d : __%d__\n", threadId, __LINE__);
}
CUDA_SINGLE_TEST(test);
CUDA_SIMPLE_TEST(test);
CUDA_TEST(test, 32, 2);
__global__ void death_test(){
assert(0);
}
CUDA_DEATH_TEST(death_test, 1, 1);
__global__ void part_death_test(){
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
assert(threadId > 16);
}
CUDA_DEATH_TEST(part_death_test, 32, 1);
CUDA_DEATH_TEST(part_death_test, 32, 2);
|
04082139a64dad9e067a339264118e3cc75d0875.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
#define TPB 32 // thread per block
__device__ float scale(int i, int n){
return static_cast<float>(i)/(n-1);
}
// Compute the distance between 2 points in a line
__device__ float distance(float x1, float x2){
return sqrt((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float ref, int len){
const int i= blockIdx.x*blockDim.x + threadIdx.x;
const float x = scale(i, len);
d_out[i] = distance(x, ref);
// printf("i=%2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]);
}
int main(){
float *d_out = 0;
const float ref = 0.5f;
hipMalloc(&d_out, N*sizeof(float));
// block num: N/TPB
hipLaunchKernelGGL(( distanceKernel), dim3(N/TPB), dim3(TPB), 0, 0, d_out, ref, N);
hipFree(d_out);
return 0;
} | 04082139a64dad9e067a339264118e3cc75d0875.cu | #include <stdio.h>
#define N 64
#define TPB 32 // thread per block
__device__ float scale(int i, int n){
return static_cast<float>(i)/(n-1);
}
// Compute the distance between 2 points in a line
__device__ float distance(float x1, float x2){
return sqrt((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float ref, int len){
const int i= blockIdx.x*blockDim.x + threadIdx.x;
const float x = scale(i, len);
d_out[i] = distance(x, ref);
// printf("i=%2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]);
}
int main(){
float *d_out = 0;
const float ref = 0.5f;
cudaMalloc(&d_out, N*sizeof(float));
// block num: N/TPB
distanceKernel<<<N/TPB, TPB>>>(d_out, ref, N);
cudaFree(d_out);
return 0;
} |
860e65e6a1e8ff517acde3dbc47718c1b0a08ee0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//seqRuntime.cu
#include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__global__ void fillKernel(int *a, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
// if(tid < n) // Removing this comparision introduces an out-of-bounds error
a[tid] = tid;
}
void fill(int* d_a, int n)
{
int nThreadsPerBlock= 512;
int nBlocks= n/nThreadsPerBlock + ((n%nThreadsPerBlock)?1:0);
hipLaunchKernelGGL(( fillKernel) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, d_a, n);
}
int main()
{
const int N=50000;
// task 1: create the array
thrust::device_vector<int> a(N);
// task 2: fill the array using the runtime
fill(thrust::raw_pointer_cast(&a[0]),N);
// task 3: calculate the sum of the array
int sumA= thrust::reduce(a.begin(),a.end(), 0);
// task 4: calculate the sum of 0 .. N-1
int sumCheck=0;
for(int i=0; i < N; i++) sumCheck += i;
// task 5: check the results agree
if(sumA == sumCheck) cout << "Test Succeeded!" << endl;
else { cerr << "Test FAILED!" << endl; return(1);}
return(0);
}
| 860e65e6a1e8ff517acde3dbc47718c1b0a08ee0.cu | //seqRuntime.cu
#include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__global__ void fillKernel(int *a, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
// if(tid < n) // Removing this comparision introduces an out-of-bounds error
a[tid] = tid;
}
void fill(int* d_a, int n)
{
int nThreadsPerBlock= 512;
int nBlocks= n/nThreadsPerBlock + ((n%nThreadsPerBlock)?1:0);
fillKernel <<< nBlocks, nThreadsPerBlock >>> (d_a, n);
}
int main()
{
const int N=50000;
// task 1: create the array
thrust::device_vector<int> a(N);
// task 2: fill the array using the runtime
fill(thrust::raw_pointer_cast(&a[0]),N);
// task 3: calculate the sum of the array
int sumA= thrust::reduce(a.begin(),a.end(), 0);
// task 4: calculate the sum of 0 .. N-1
int sumCheck=0;
for(int i=0; i < N; i++) sumCheck += i;
// task 5: check the results agree
if(sumA == sumCheck) cout << "Test Succeeded!" << endl;
else { cerr << "Test FAILED!" << endl; return(1);}
return(0);
}
|
99a85b0d171517c6e95e32b4f39c92ef455baf68.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "hip/hip_runtime.h"
#include "common/book.h"
#include "common/cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__global__ void kernel( Sphere *s, unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
int main( void ) {
DataBlock data;
// capture the start time
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
Sphere *s;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
// allocate memory for the Sphere dataset
HANDLE_ERROR( hipMalloc( (void**)&s,
sizeof(Sphere) * SPHERES ) );
// allocate temp memory, initialize it, copy to
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
HANDLE_ERROR( hipMemcpy( s, temp_s,
sizeof(Sphere) * SPHERES,
hipMemcpyHostToDevice ) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, s, dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
//printf( "Time to generate: %3.1f ms\n", elapsedTime );
printf( "%3.1f\n", elapsedTime );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
HANDLE_ERROR( hipFree( s ) );
// display
//bitmap.display_and_exit();
}
| 99a85b0d171517c6e95e32b4f39c92ef455baf68.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "cuda.h"
#include "common/book.h"
#include "common/cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__global__ void kernel( Sphere *s, unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
int main( void ) {
DataBlock data;
// capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
Sphere *s;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
// allocate memory for the Sphere dataset
HANDLE_ERROR( cudaMalloc( (void**)&s,
sizeof(Sphere) * SPHERES ) );
// allocate temp memory, initialize it, copy to
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
HANDLE_ERROR( cudaMemcpy( s, temp_s,
sizeof(Sphere) * SPHERES,
cudaMemcpyHostToDevice ) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<grids,threads>>>( s, dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
//printf( "Time to generate: %3.1f ms\n", elapsedTime );
printf( "%3.1f\n", elapsedTime );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
HANDLE_ERROR( cudaFree( s ) );
// display
//bitmap.display_and_exit();
}
|
1499cb4d3ae23fe55d67a286a3db7c923fbcfa5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaComputeYGradient(int* y_gradient, unsigned char* channel, int image_width, int image_height, int chunk_size_per_thread) {
int y_kernel[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } };
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = index * chunk_size_per_thread; i < (index + 1) * chunk_size_per_thread - 1; i++) {
if (i + 2 * image_width + 1 < image_width * image_height) {
if (i == 0 && blockIdx.x == 0 && blockIdx.x == 0) {
continue;
} else {
y_gradient[i] =
y_kernel[0][0] * channel[i - 1] +
y_kernel[1][0] * channel[i] +
y_kernel[2][0] * channel[i + 1] +
y_kernel[0][1] * channel[i + image_width - 1] +
y_kernel[1][1] * channel[i + image_width] +
y_kernel[2][1] * channel[i + image_width + 1] +
y_kernel[0][2] * channel[i + 2 * image_width - 1] +
y_kernel[1][2] * channel[i + 2 * image_width] +
y_kernel[2][2] * channel[i + 2 * image_width + 1];
}
}
}
return;
} | 1499cb4d3ae23fe55d67a286a3db7c923fbcfa5e.cu | #include "includes.h"
__global__ void cudaComputeYGradient(int* y_gradient, unsigned char* channel, int image_width, int image_height, int chunk_size_per_thread) {
int y_kernel[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } };
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = index * chunk_size_per_thread; i < (index + 1) * chunk_size_per_thread - 1; i++) {
if (i + 2 * image_width + 1 < image_width * image_height) {
if (i == 0 && blockIdx.x == 0 && blockIdx.x == 0) {
continue;
} else {
y_gradient[i] =
y_kernel[0][0] * channel[i - 1] +
y_kernel[1][0] * channel[i] +
y_kernel[2][0] * channel[i + 1] +
y_kernel[0][1] * channel[i + image_width - 1] +
y_kernel[1][1] * channel[i + image_width] +
y_kernel[2][1] * channel[i + image_width + 1] +
y_kernel[0][2] * channel[i + 2 * image_width - 1] +
y_kernel[1][2] * channel[i + 2 * image_width] +
y_kernel[2][2] * channel[i + 2 * image_width + 1];
}
}
}
return;
} |
34bfd754bd6ecc1041a50bb715410be095821a4c.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_CUDA
#include <xxhash.h>
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <unordered_map>
#include "glog/logging.h"
#include "paddle/phi/backends/context_pool.h"
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/backends/gpu/cuda/cudnn_desc.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"
#include "paddle/utils/optional.h"
namespace phi {
namespace fusion {
namespace {
// TODO(wilber): Add a LRU strategy.
class CudnnConvDescManager {
public:
static CudnnConvDescManager* Instance() {
static CudnnConvDescManager global;
return &global;
}
struct CudnnCacheInfo {
phi::backends::gpu::TensorDescriptor* x_desc{nullptr};
phi::backends::gpu::FilterDescriptor* w_desc{nullptr};
phi::backends::gpu::TensorDescriptor* b_desc{nullptr};
phi::backends::gpu::TensorDescriptor* o_desc{nullptr};
phi::backends::gpu::ConvolutionDescriptor* conv_desc{nullptr};
phi::backends::gpu::ActivationDescriptor* act_desc{nullptr};
size_t workspace_size;
cudnnConvolutionFwdAlgo_t algo;
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int> input_pad;
std::vector<int> new_input_shape_vec;
bool is_sys_pad;
// TODO(wilber): The destruction of cudnn descriptor depends on the
// phi::dynload::cudnn singleton, but when the process exits, the singleton
// destruction order cannot be determined.
// After testing, it is found that the phi::dynload::cudnn related singleton
// on Windows is destructed first, causing the descriptor to be destructed
// and failed, while the descriptor on Linux is destructed first, and the
// phi::dynload::cudnn singleton is destructed later, so that it is correct.
// To circumvent this problem, we rely entirely on freeing resources when
// the process exits.
// ~CudnnCacheInfo() {
// if (x_desc) delete x_desc;
// if (w_desc) delete w_desc;
// if (b_desc) delete b_desc;
// if (o_desc) delete o_desc;
// if (conv_desc) delete conv_desc;
// if (act_desc) delete act_desc;
// }
};
CudnnCacheInfo* GetCudnnCacheInfo(
const std::vector<int>& input_dims,
const std::vector<int>& filter_dims,
const std::vector<int>& bias_dims,
const std::vector<int>& output_dims,
const std::vector<int>& paddings,
const std::vector<int>& strides,
const std::vector<int>& dilations,
phi::DataType input_dtype,
int groups,
cudnnDataType_t dtype,
cudnnTensorFormat_t format,
const std::function<void(cudnnConvolutionFwdAlgo_t*,
size_t*,
cudnnTensorDescriptor_t,
cudnnFilterDescriptor_t,
cudnnTensorDescriptor_t,
cudnnConvolutionDescriptor_t)>& search_func,
const std::string& act,
double value_max = std::numeric_limits<double>::max()) {
// std::hash takes about 5us, xxhash can optimize to 2.5us.
XXH64_state_t* const state = XXH64_createState();
if (state == nullptr) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_hash_t const seed = 0;
if (XXH64_reset(state, seed) == XXH_ERROR) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash reset state failed, maybe a environment error."));
}
XXH64_update(state, input_dims.data(), input_dims.size() * sizeof(int));
XXH64_update(state, filter_dims.data(), filter_dims.size() * sizeof(int));
XXH64_update(state, bias_dims.data(), bias_dims.size() * sizeof(int));
// XXH64_update(state, output_dims.data(), output_dims.size() *
// sizeof(int));
XXH64_update(state, paddings.data(), paddings.size() * sizeof(int));
XXH64_update(state, strides.data(), strides.size() * sizeof(int));
XXH64_update(state, dilations.data(), dilations.size() * sizeof(int));
XXH64_update(state, &input_dtype, sizeof(int));
XXH64_update(state, &groups, sizeof(int));
XXH64_update(state, &dtype, sizeof(int));
XXH64_update(state, &format, sizeof(int));
XXH64_update(state, act.data(), act.length() * sizeof(char));
// XXH64_update(state, &value_max, sizeof(double));
XXH64_hash_t hash_key = XXH64_digest(state);
XXH64_freeState(state);
std::lock_guard<std::mutex> lock(cache_mutex_);
if (!cudnn_conv_cache_.count(hash_key)) {
cudnn_conv_cache_[hash_key] = CudnnCacheInfo();
cudnn_conv_cache_[hash_key].x_desc =
GetTensorDescInfo(input_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].w_desc =
GetFilterDescInfo(filter_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].o_desc =
GetTensorDescInfo(output_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].b_desc =
GetTensorDescInfo(bias_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].conv_desc =
GetConvDescInfo(paddings, strides, dilations, groups, dtype);
cudnn_conv_cache_[hash_key].act_desc =
GetActivationDescInfo(act, value_max);
size_t workspace_size;
cudnnConvolutionFwdAlgo_t algo;
search_func(&algo,
&workspace_size,
cudnn_conv_cache_[hash_key].x_desc->desc(),
cudnn_conv_cache_[hash_key].w_desc->desc(),
cudnn_conv_cache_[hash_key].o_desc->desc(),
cudnn_conv_cache_[hash_key].conv_desc->desc());
cudnn_conv_cache_[hash_key].workspace_size = workspace_size;
cudnn_conv_cache_[hash_key].algo = algo;
}
return &cudnn_conv_cache_.at(hash_key);
}
struct ConvAttrCacheInfo {
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int> input_pad;
std::vector<int> new_input_shape_vec;
bool is_sys_pad;
};
ConvAttrCacheInfo* GetConvAttr(const std::vector<int>& paddings_t,
const std::vector<int>& dilations_t,
const std::string& padding_algorithm,
const std::vector<int>& input_dims,
const std::vector<int>& filter_dims,
const std::vector<int>& strides,
cudnnTensorFormat_t format) {
XXH64_state_t* const state = XXH64_createState();
if (state == nullptr) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_hash_t const seed = 0;
if (XXH64_reset(state, seed) == XXH_ERROR) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_update(state, paddings_t.data(), paddings_t.size() * sizeof(int));
XXH64_update(state, dilations_t.data(), dilations_t.size() * sizeof(int));
XXH64_update(state, input_dims.data(), input_dims.size() * sizeof(int));
XXH64_update(state, filter_dims.data(), filter_dims.size() * sizeof(int));
XXH64_update(state, strides.data(), strides.size() * sizeof(int));
XXH64_update(state, &format, sizeof(int));
XXH64_update(state,
padding_algorithm.data(),
padding_algorithm.length() * sizeof(char));
XXH64_hash_t hash_key = XXH64_digest(state);
XXH64_freeState(state);
std::lock_guard<std::mutex> lock(attr_mutex_);
if (!conv_attr_cache_.count(hash_key)) {
ConvAttrCacheInfo cache;
auto paddings = paddings_t;
auto dilations = dilations_t;
std::vector<int> in_data_dims(input_dims.size() - 2);
std::vector<int> ksize(filter_dims.size() - 2);
if (format == CUDNN_TENSOR_NHWC) {
for (size_t i = 1; i < input_dims.size() - 1; ++i) {
in_data_dims[i - 1] = input_dims[i];
}
for (size_t i = 1; i < filter_dims.size() - 1; ++i) {
ksize[i - 1] = filter_dims[i];
}
} else {
for (size_t i = 2; i < input_dims.size(); ++i) {
in_data_dims[i - 2] = input_dims[i];
}
for (size_t i = 2; i < filter_dims.size(); ++i) {
ksize[i - 2] = filter_dims[i];
}
}
phi::UpdatePaddingAndDilation(&paddings,
&dilations,
padding_algorithm,
make_ddim(in_data_dims),
strides,
ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = input_dims[0];
if (format == CUDNN_TENSOR_NCHW) {
new_input_shape_vec[1] = input_dims[1];
} else {
new_input_shape_vec[data_dim + 1] = input_dims[data_dim + 1];
}
std::vector<int> input_pad(input_dims.size() * 2, 0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
if (format == CUDNN_TENSOR_NCHW) {
new_input_shape_vec[i + 2] = input_dims[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] = input_dims[i + 1] + padding_diff[i];
}
if (format == CUDNN_TENSOR_NCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
cache.is_sys_pad = false;
cache.input_pad = input_pad;
cache.new_input_shape_vec = new_input_shape_vec;
} else {
cache.is_sys_pad = true;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
cache.dilations = dilations;
cache.paddings = padding_common;
conv_attr_cache_[hash_key] = cache;
}
return &conv_attr_cache_.at(hash_key);
}
private:
phi::backends::gpu::TensorDescriptor* GetTensorDescInfo(
const std::vector<int>& input_dims,
phi::DataType input_dtype,
cudnnTensorFormat_t input_format) {
auto* desc = new phi::backends::gpu::TensorDescriptor();
desc->set(
input_dims, input_format, backends::gpu::ToCudnnDataType(input_dtype));
return desc;
}
phi::backends::gpu::FilterDescriptor* GetFilterDescInfo(
const std::vector<int>& input_dims,
phi::DataType input_dtype,
cudnnTensorFormat_t input_format) {
auto* desc = new phi::backends::gpu::FilterDescriptor();
desc->set(
input_dims, input_format, backends::gpu::ToCudnnDataType(input_dtype));
return desc;
}
phi::backends::gpu::ConvolutionDescriptor* GetConvDescInfo(
const std::vector<int>& paddings,
const std::vector<int>& strides,
const std::vector<int>& dilations,
int groups,
cudnnDataType_t dtype) {
auto* desc = new phi::backends::gpu::ConvolutionDescriptor();
desc->set(
dtype, paddings, strides, dilations, phi::AllowTF32Cudnn(), groups);
return desc;
}
phi::backends::gpu::ActivationDescriptor* GetActivationDescInfo(
const std::string& act,
double value_max = std::numeric_limits<double>::max()) {
auto* desc = new phi::backends::gpu::ActivationDescriptor();
cudnnActivationMode_t mode;
double relu_ceiling = 0.0;
if (act == "identity") {
mode = CUDNN_ACTIVATION_IDENTITY;
} else if (act == "relu") {
mode = CUDNN_ACTIVATION_RELU;
} else if (act == "relu6") {
relu_ceiling = 6.0;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
} else if (act == "sigmoid") {
mode = CUDNN_ACTIVATION_SIGMOID;
} else if (act == "relux") {
relu_ceiling = value_max;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
} else if (act == "tanh") {
mode = CUDNN_ACTIVATION_TANH;
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Unknown CUDNN activation string: %s.", act));
}
desc->set(mode, relu_ceiling);
return desc;
}
std::mutex cache_mutex_;
std::unordered_map<size_t, CudnnCacheInfo> cudnn_conv_cache_;
std::mutex attr_mutex_;
std::unordered_map<size_t, ConvAttrCacheInfo> conv_attr_cache_;
};
} // namespace
template <typename T, typename Context>
void ConvFusionKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& bias,
const paddle::optional<DenseTensor>& residual,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
const std::vector<int>& dilations_t,
int groups,
const std::string& data_format,
const std::string& activation,
bool exhaustive_search,
const std::vector<int>& channels,
int user_workspace_size,
DenseTensor* output,
std::vector<DenseTensor*> outs) {
auto handle = ctx.cudnn_handle();
ctx.template Alloc<T>(output);
auto workspace_handle = ctx.cudnn_workspace_handle();
exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search;
bool deterministic = FLAGS_cudnn_deterministic;
PADDLE_ENFORCE_EQ(exhaustive_search && deterministic,
false,
phi::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
size_t workspace_size_limit = 0;
if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) {
int64_t max_user_size =
::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit),
static_cast<int64_t>(user_workspace_size));
workspace_size_limit = max_user_size * 1024 * 1024;
}
auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// Choose NHWC or NCHW by data_format attr.
auto compute_format = channel_last ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW;
VLOG(3) << "Compute ConvFusionOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == CUDNN_TENSOR_NHWC ? "NHWC" : "NCHW");
auto* conv_attr_cache = CudnnConvDescManager::Instance()->GetConvAttr(
paddings_t,
dilations_t,
padding_algorithm,
phi::vectorize<int>(input.dims()),
phi::vectorize<int>(filter.dims()),
strides,
compute_format);
DenseTensor transformed_input;
const int input_rank = input.dims().size();
auto unsys_pad_process = [&](const std::vector<int>& new_input_shape_vec,
const std::vector<int>& input_pad) {
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
ctx.template Alloc<T>(&transformed_input);
T pad_value(0.0);
switch (input_rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, input, pad_value, &transformed_input);
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, input, pad_value, &transformed_input);
} break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
};
if (conv_attr_cache->is_sys_pad) {
transformed_input.ShareDataWith(input);
} else {
unsys_pad_process(conv_attr_cache->new_input_shape_vec,
conv_attr_cache->input_pad);
}
std::vector<int> b_dims(input_rank, 1);
if (compute_format == CUDNN_TENSOR_NCHW) {
auto bias_rank = bias.dims().size();
if (input_rank == bias_rank) {
b_dims[1] = static_cast<int>(bias.dims()[1]);
} else {
b_dims[1] = static_cast<int>(bias.dims()[0]);
}
} else {
b_dims[input_rank - 1] = static_cast<int>(bias.dims()[0]);
}
auto search_func = [&](cudnnConvolutionFwdAlgo_t* cudnn_algo,
size_t* wks_bytes,
cudnnTensorDescriptor_t x_desc,
cudnnFilterDescriptor_t w_desc,
cudnnTensorDescriptor_t o_desc,
cudnnConvolutionDescriptor_t cudnn_conv_desc) {
if (!exhaustive_search) {
#if CUDNN_VERSION >= 8000
int perf_count;
int best_algo_idx = 0;
size_t tmp_size = 0;
std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results(
new cudnnConvolutionFwdAlgoPerf_t[phi::kNUM_CUDNN_FWD_ALGS]);
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardAlgorithm_v7(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
phi::kNUM_CUDNN_FWD_ALGS,
&perf_count,
perf_results.get()));
*cudnn_algo = (perf_results.get())[best_algo_idx].algo;
#else
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardAlgorithm(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit,
cudnn_algo));
#endif
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardWorkspaceSize(handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
*cudnn_algo,
wks_bytes));
} else {
std::array<cudnnConvolutionFwdAlgoPerf_t, phi::kNUM_CUDNN_FWD_ALGS>
fwd_perf_stat;
int returned_algo_count;
auto cudnn_find_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnFindConvolutionForwardAlgorithmEx(
handle,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
o_desc,
output->data(),
phi::kNUM_CUDNN_FWD_ALGS,
&returned_algo_count,
fwd_perf_stat.data(),
cudnn_workspace,
workspace_size_limit));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit);
*cudnn_algo = fwd_perf_stat[0].algo;
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
fwd_perf_stat[0].algo,
wks_bytes));
}
};
auto cudnn_cache_info = CudnnConvDescManager::Instance()->GetCudnnCacheInfo(
phi::vectorize<int>(transformed_input.dims()),
phi::vectorize<int>(filter.dims()),
b_dims,
phi::vectorize<int>(output->dims()),
conv_attr_cache->paddings,
strides,
conv_attr_cache->dilations,
transformed_input.dtype(),
groups,
phi::backends::gpu::CudnnDataType<T>::type,
compute_format,
search_func,
activation);
auto x_desc = cudnn_cache_info->x_desc->desc();
auto w_desc = cudnn_cache_info->w_desc->desc();
auto b_desc = cudnn_cache_info->b_desc->desc();
auto o_desc = cudnn_cache_info->o_desc->desc();
auto cudnn_conv_desc = cudnn_cache_info->conv_desc->desc();
auto act_desc = cudnn_cache_info->act_desc->desc();
auto algo = cudnn_cache_info->algo;
auto workspace_size = cudnn_cache_info->workspace_size;
if ((activation == "identity") && (!residual.get_ptr())) {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib.
// But test in some case, the speed is slower, change to use
// cudnnConvolutionForward and cudnnAddTensor
// ------------- cudnn conv forward and bias add ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnConvolutionForward(handle,
&alpha,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size,
&beta,
o_desc,
output->data()));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnAddTensor(
handle, &alpha, b_desc, bias.data(), &alpha, o_desc, output->data()));
} else {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY.
if (activation == "identity") {
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = residual.get_ptr() ? 1.0f : 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnConvolutionBiasActivationForward(
handle,
&alpha,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size,
&beta,
o_desc,
residual.get_ptr() ? residual->data() : output->data(),
b_desc,
bias.data(),
act_desc,
o_desc,
output->data()));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (!channels.empty()) {
if (transformed_input.dims()[0] == 1 &&
compute_format == CUDNN_TENSOR_NCHW) {
// share data with Output
phi::DenseTensor t;
t.ShareDataWith(*output);
auto y_dims = output->dims();
t.Resize({y_dims[1], y_dims[2], y_dims[3]});
int s = 0;
for (size_t i = 0; i < channels.size(); ++i) {
int e = s + channels[i];
outs[i]->ShareDataWith(t.Slice(s, e));
outs[i]->Resize(
{transformed_input.dims()[0], channels[i], y_dims[2], y_dims[3]});
s = e;
}
} else {
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW(phi::errors::Unimplemented(
"Input with batch size greater than 1 is unsupported. The received "
"batch size is %d, Input's shape is [%s].",
transformed_input.dims()[0],
transformed_input.dims()));
}
}
}
} // namespace fusion
} // namespace phi
PD_REGISTER_KERNEL(conv2d_fusion, // cuda_only
GPUDNN,
ALL_LAYOUT,
phi::fusion::ConvFusionKernel,
float,
double,
phi::dtype::float16) {}
#endif
| 34bfd754bd6ecc1041a50bb715410be095821a4c.cu | // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_CUDA
#include <xxhash.h>
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <unordered_map>
#include "glog/logging.h"
#include "paddle/phi/backends/context_pool.h"
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/backends/gpu/cuda/cudnn_desc.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"
#include "paddle/utils/optional.h"
namespace phi {
namespace fusion {
namespace {
// TODO(wilber): Add a LRU strategy.
class CudnnConvDescManager {
public:
static CudnnConvDescManager* Instance() {
static CudnnConvDescManager global;
return &global;
}
struct CudnnCacheInfo {
phi::backends::gpu::TensorDescriptor* x_desc{nullptr};
phi::backends::gpu::FilterDescriptor* w_desc{nullptr};
phi::backends::gpu::TensorDescriptor* b_desc{nullptr};
phi::backends::gpu::TensorDescriptor* o_desc{nullptr};
phi::backends::gpu::ConvolutionDescriptor* conv_desc{nullptr};
phi::backends::gpu::ActivationDescriptor* act_desc{nullptr};
size_t workspace_size;
cudnnConvolutionFwdAlgo_t algo;
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int> input_pad;
std::vector<int> new_input_shape_vec;
bool is_sys_pad;
// TODO(wilber): The destruction of cudnn descriptor depends on the
// phi::dynload::cudnn singleton, but when the process exits, the singleton
// destruction order cannot be determined.
// After testing, it is found that the phi::dynload::cudnn related singleton
// on Windows is destructed first, causing the descriptor to be destructed
// and failed, while the descriptor on Linux is destructed first, and the
// phi::dynload::cudnn singleton is destructed later, so that it is correct.
// To circumvent this problem, we rely entirely on freeing resources when
// the process exits.
// ~CudnnCacheInfo() {
// if (x_desc) delete x_desc;
// if (w_desc) delete w_desc;
// if (b_desc) delete b_desc;
// if (o_desc) delete o_desc;
// if (conv_desc) delete conv_desc;
// if (act_desc) delete act_desc;
// }
};
CudnnCacheInfo* GetCudnnCacheInfo(
const std::vector<int>& input_dims,
const std::vector<int>& filter_dims,
const std::vector<int>& bias_dims,
const std::vector<int>& output_dims,
const std::vector<int>& paddings,
const std::vector<int>& strides,
const std::vector<int>& dilations,
phi::DataType input_dtype,
int groups,
cudnnDataType_t dtype,
cudnnTensorFormat_t format,
const std::function<void(cudnnConvolutionFwdAlgo_t*,
size_t*,
cudnnTensorDescriptor_t,
cudnnFilterDescriptor_t,
cudnnTensorDescriptor_t,
cudnnConvolutionDescriptor_t)>& search_func,
const std::string& act,
double value_max = std::numeric_limits<double>::max()) {
// std::hash takes about 5us, xxhash can optimize to 2.5us.
XXH64_state_t* const state = XXH64_createState();
if (state == nullptr) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_hash_t const seed = 0;
if (XXH64_reset(state, seed) == XXH_ERROR) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash reset state failed, maybe a environment error."));
}
XXH64_update(state, input_dims.data(), input_dims.size() * sizeof(int));
XXH64_update(state, filter_dims.data(), filter_dims.size() * sizeof(int));
XXH64_update(state, bias_dims.data(), bias_dims.size() * sizeof(int));
// XXH64_update(state, output_dims.data(), output_dims.size() *
// sizeof(int));
XXH64_update(state, paddings.data(), paddings.size() * sizeof(int));
XXH64_update(state, strides.data(), strides.size() * sizeof(int));
XXH64_update(state, dilations.data(), dilations.size() * sizeof(int));
XXH64_update(state, &input_dtype, sizeof(int));
XXH64_update(state, &groups, sizeof(int));
XXH64_update(state, &dtype, sizeof(int));
XXH64_update(state, &format, sizeof(int));
XXH64_update(state, act.data(), act.length() * sizeof(char));
// XXH64_update(state, &value_max, sizeof(double));
XXH64_hash_t hash_key = XXH64_digest(state);
XXH64_freeState(state);
std::lock_guard<std::mutex> lock(cache_mutex_);
if (!cudnn_conv_cache_.count(hash_key)) {
cudnn_conv_cache_[hash_key] = CudnnCacheInfo();
cudnn_conv_cache_[hash_key].x_desc =
GetTensorDescInfo(input_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].w_desc =
GetFilterDescInfo(filter_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].o_desc =
GetTensorDescInfo(output_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].b_desc =
GetTensorDescInfo(bias_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].conv_desc =
GetConvDescInfo(paddings, strides, dilations, groups, dtype);
cudnn_conv_cache_[hash_key].act_desc =
GetActivationDescInfo(act, value_max);
size_t workspace_size;
cudnnConvolutionFwdAlgo_t algo;
search_func(&algo,
&workspace_size,
cudnn_conv_cache_[hash_key].x_desc->desc(),
cudnn_conv_cache_[hash_key].w_desc->desc(),
cudnn_conv_cache_[hash_key].o_desc->desc(),
cudnn_conv_cache_[hash_key].conv_desc->desc());
cudnn_conv_cache_[hash_key].workspace_size = workspace_size;
cudnn_conv_cache_[hash_key].algo = algo;
}
return &cudnn_conv_cache_.at(hash_key);
}
struct ConvAttrCacheInfo {
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int> input_pad;
std::vector<int> new_input_shape_vec;
bool is_sys_pad;
};
ConvAttrCacheInfo* GetConvAttr(const std::vector<int>& paddings_t,
const std::vector<int>& dilations_t,
const std::string& padding_algorithm,
const std::vector<int>& input_dims,
const std::vector<int>& filter_dims,
const std::vector<int>& strides,
cudnnTensorFormat_t format) {
XXH64_state_t* const state = XXH64_createState();
if (state == nullptr) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_hash_t const seed = 0;
if (XXH64_reset(state, seed) == XXH_ERROR) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_update(state, paddings_t.data(), paddings_t.size() * sizeof(int));
XXH64_update(state, dilations_t.data(), dilations_t.size() * sizeof(int));
XXH64_update(state, input_dims.data(), input_dims.size() * sizeof(int));
XXH64_update(state, filter_dims.data(), filter_dims.size() * sizeof(int));
XXH64_update(state, strides.data(), strides.size() * sizeof(int));
XXH64_update(state, &format, sizeof(int));
XXH64_update(state,
padding_algorithm.data(),
padding_algorithm.length() * sizeof(char));
XXH64_hash_t hash_key = XXH64_digest(state);
XXH64_freeState(state);
std::lock_guard<std::mutex> lock(attr_mutex_);
if (!conv_attr_cache_.count(hash_key)) {
ConvAttrCacheInfo cache;
auto paddings = paddings_t;
auto dilations = dilations_t;
std::vector<int> in_data_dims(input_dims.size() - 2);
std::vector<int> ksize(filter_dims.size() - 2);
if (format == CUDNN_TENSOR_NHWC) {
for (size_t i = 1; i < input_dims.size() - 1; ++i) {
in_data_dims[i - 1] = input_dims[i];
}
for (size_t i = 1; i < filter_dims.size() - 1; ++i) {
ksize[i - 1] = filter_dims[i];
}
} else {
for (size_t i = 2; i < input_dims.size(); ++i) {
in_data_dims[i - 2] = input_dims[i];
}
for (size_t i = 2; i < filter_dims.size(); ++i) {
ksize[i - 2] = filter_dims[i];
}
}
phi::UpdatePaddingAndDilation(&paddings,
&dilations,
padding_algorithm,
make_ddim(in_data_dims),
strides,
ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = input_dims[0];
if (format == CUDNN_TENSOR_NCHW) {
new_input_shape_vec[1] = input_dims[1];
} else {
new_input_shape_vec[data_dim + 1] = input_dims[data_dim + 1];
}
std::vector<int> input_pad(input_dims.size() * 2, 0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
if (format == CUDNN_TENSOR_NCHW) {
new_input_shape_vec[i + 2] = input_dims[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] = input_dims[i + 1] + padding_diff[i];
}
if (format == CUDNN_TENSOR_NCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
cache.is_sys_pad = false;
cache.input_pad = input_pad;
cache.new_input_shape_vec = new_input_shape_vec;
} else {
cache.is_sys_pad = true;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
cache.dilations = dilations;
cache.paddings = padding_common;
conv_attr_cache_[hash_key] = cache;
}
return &conv_attr_cache_.at(hash_key);
}
private:
phi::backends::gpu::TensorDescriptor* GetTensorDescInfo(
const std::vector<int>& input_dims,
phi::DataType input_dtype,
cudnnTensorFormat_t input_format) {
auto* desc = new phi::backends::gpu::TensorDescriptor();
desc->set(
input_dims, input_format, backends::gpu::ToCudnnDataType(input_dtype));
return desc;
}
phi::backends::gpu::FilterDescriptor* GetFilterDescInfo(
const std::vector<int>& input_dims,
phi::DataType input_dtype,
cudnnTensorFormat_t input_format) {
auto* desc = new phi::backends::gpu::FilterDescriptor();
desc->set(
input_dims, input_format, backends::gpu::ToCudnnDataType(input_dtype));
return desc;
}
phi::backends::gpu::ConvolutionDescriptor* GetConvDescInfo(
const std::vector<int>& paddings,
const std::vector<int>& strides,
const std::vector<int>& dilations,
int groups,
cudnnDataType_t dtype) {
auto* desc = new phi::backends::gpu::ConvolutionDescriptor();
desc->set(
dtype, paddings, strides, dilations, phi::AllowTF32Cudnn(), groups);
return desc;
}
phi::backends::gpu::ActivationDescriptor* GetActivationDescInfo(
const std::string& act,
double value_max = std::numeric_limits<double>::max()) {
auto* desc = new phi::backends::gpu::ActivationDescriptor();
cudnnActivationMode_t mode;
double relu_ceiling = 0.0;
if (act == "identity") {
mode = CUDNN_ACTIVATION_IDENTITY;
} else if (act == "relu") {
mode = CUDNN_ACTIVATION_RELU;
} else if (act == "relu6") {
relu_ceiling = 6.0;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
} else if (act == "sigmoid") {
mode = CUDNN_ACTIVATION_SIGMOID;
} else if (act == "relux") {
relu_ceiling = value_max;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
} else if (act == "tanh") {
mode = CUDNN_ACTIVATION_TANH;
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Unknown CUDNN activation string: %s.", act));
}
desc->set(mode, relu_ceiling);
return desc;
}
std::mutex cache_mutex_;
std::unordered_map<size_t, CudnnCacheInfo> cudnn_conv_cache_;
std::mutex attr_mutex_;
std::unordered_map<size_t, ConvAttrCacheInfo> conv_attr_cache_;
};
} // namespace
template <typename T, typename Context>
void ConvFusionKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& bias,
const paddle::optional<DenseTensor>& residual,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
const std::vector<int>& dilations_t,
int groups,
const std::string& data_format,
const std::string& activation,
bool exhaustive_search,
const std::vector<int>& channels,
int user_workspace_size,
DenseTensor* output,
std::vector<DenseTensor*> outs) {
auto handle = ctx.cudnn_handle();
ctx.template Alloc<T>(output);
auto workspace_handle = ctx.cudnn_workspace_handle();
exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search;
bool deterministic = FLAGS_cudnn_deterministic;
PADDLE_ENFORCE_EQ(exhaustive_search && deterministic,
false,
phi::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
size_t workspace_size_limit = 0;
if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) {
int64_t max_user_size =
std::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit),
static_cast<int64_t>(user_workspace_size));
workspace_size_limit = max_user_size * 1024 * 1024;
}
auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// Choose NHWC or NCHW by data_format attr.
auto compute_format = channel_last ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW;
VLOG(3) << "Compute ConvFusionOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == CUDNN_TENSOR_NHWC ? "NHWC" : "NCHW");
auto* conv_attr_cache = CudnnConvDescManager::Instance()->GetConvAttr(
paddings_t,
dilations_t,
padding_algorithm,
phi::vectorize<int>(input.dims()),
phi::vectorize<int>(filter.dims()),
strides,
compute_format);
DenseTensor transformed_input;
const int input_rank = input.dims().size();
auto unsys_pad_process = [&](const std::vector<int>& new_input_shape_vec,
const std::vector<int>& input_pad) {
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
ctx.template Alloc<T>(&transformed_input);
T pad_value(0.0);
switch (input_rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, input, pad_value, &transformed_input);
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, input, pad_value, &transformed_input);
} break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
};
if (conv_attr_cache->is_sys_pad) {
transformed_input.ShareDataWith(input);
} else {
unsys_pad_process(conv_attr_cache->new_input_shape_vec,
conv_attr_cache->input_pad);
}
std::vector<int> b_dims(input_rank, 1);
if (compute_format == CUDNN_TENSOR_NCHW) {
auto bias_rank = bias.dims().size();
if (input_rank == bias_rank) {
b_dims[1] = static_cast<int>(bias.dims()[1]);
} else {
b_dims[1] = static_cast<int>(bias.dims()[0]);
}
} else {
b_dims[input_rank - 1] = static_cast<int>(bias.dims()[0]);
}
auto search_func = [&](cudnnConvolutionFwdAlgo_t* cudnn_algo,
size_t* wks_bytes,
cudnnTensorDescriptor_t x_desc,
cudnnFilterDescriptor_t w_desc,
cudnnTensorDescriptor_t o_desc,
cudnnConvolutionDescriptor_t cudnn_conv_desc) {
if (!exhaustive_search) {
#if CUDNN_VERSION >= 8000
int perf_count;
int best_algo_idx = 0;
size_t tmp_size = 0;
std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results(
new cudnnConvolutionFwdAlgoPerf_t[phi::kNUM_CUDNN_FWD_ALGS]);
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardAlgorithm_v7(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
phi::kNUM_CUDNN_FWD_ALGS,
&perf_count,
perf_results.get()));
*cudnn_algo = (perf_results.get())[best_algo_idx].algo;
#else
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardAlgorithm(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit,
cudnn_algo));
#endif
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardWorkspaceSize(handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
*cudnn_algo,
wks_bytes));
} else {
std::array<cudnnConvolutionFwdAlgoPerf_t, phi::kNUM_CUDNN_FWD_ALGS>
fwd_perf_stat;
int returned_algo_count;
auto cudnn_find_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnFindConvolutionForwardAlgorithmEx(
handle,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
o_desc,
output->data(),
phi::kNUM_CUDNN_FWD_ALGS,
&returned_algo_count,
fwd_perf_stat.data(),
cudnn_workspace,
workspace_size_limit));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit);
*cudnn_algo = fwd_perf_stat[0].algo;
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
fwd_perf_stat[0].algo,
wks_bytes));
}
};
auto cudnn_cache_info = CudnnConvDescManager::Instance()->GetCudnnCacheInfo(
phi::vectorize<int>(transformed_input.dims()),
phi::vectorize<int>(filter.dims()),
b_dims,
phi::vectorize<int>(output->dims()),
conv_attr_cache->paddings,
strides,
conv_attr_cache->dilations,
transformed_input.dtype(),
groups,
phi::backends::gpu::CudnnDataType<T>::type,
compute_format,
search_func,
activation);
auto x_desc = cudnn_cache_info->x_desc->desc();
auto w_desc = cudnn_cache_info->w_desc->desc();
auto b_desc = cudnn_cache_info->b_desc->desc();
auto o_desc = cudnn_cache_info->o_desc->desc();
auto cudnn_conv_desc = cudnn_cache_info->conv_desc->desc();
auto act_desc = cudnn_cache_info->act_desc->desc();
auto algo = cudnn_cache_info->algo;
auto workspace_size = cudnn_cache_info->workspace_size;
if ((activation == "identity") && (!residual.get_ptr())) {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib.
// But test in some case, the speed is slower, change to use
// cudnnConvolutionForward and cudnnAddTensor
// ------------- cudnn conv forward and bias add ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnConvolutionForward(handle,
&alpha,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size,
&beta,
o_desc,
output->data()));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnAddTensor(
handle, &alpha, b_desc, bias.data(), &alpha, o_desc, output->data()));
} else {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY.
if (activation == "identity") {
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = residual.get_ptr() ? 1.0f : 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnConvolutionBiasActivationForward(
handle,
&alpha,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size,
&beta,
o_desc,
residual.get_ptr() ? residual->data() : output->data(),
b_desc,
bias.data(),
act_desc,
o_desc,
output->data()));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (!channels.empty()) {
if (transformed_input.dims()[0] == 1 &&
compute_format == CUDNN_TENSOR_NCHW) {
// share data with Output
phi::DenseTensor t;
t.ShareDataWith(*output);
auto y_dims = output->dims();
t.Resize({y_dims[1], y_dims[2], y_dims[3]});
int s = 0;
for (size_t i = 0; i < channels.size(); ++i) {
int e = s + channels[i];
outs[i]->ShareDataWith(t.Slice(s, e));
outs[i]->Resize(
{transformed_input.dims()[0], channels[i], y_dims[2], y_dims[3]});
s = e;
}
} else {
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW(phi::errors::Unimplemented(
"Input with batch size greater than 1 is unsupported. The received "
"batch size is %d, Input's shape is [%s].",
transformed_input.dims()[0],
transformed_input.dims()));
}
}
}
} // namespace fusion
} // namespace phi
PD_REGISTER_KERNEL(conv2d_fusion, // cuda_only
GPUDNN,
ALL_LAYOUT,
phi::fusion::ConvFusionKernel,
float,
double,
phi::dtype::float16) {}
#endif
|
6c0132f901619d812c93ec0d877b47575fa5b667.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "hipfft.h"
#include "fft.h"
#include "utils.h"
extern "C"
{
#include "timer.h"
#include "cuda_fft.h"
}
extern "C"
void cuda_fft(complex_t *x, complex_t *x_f, int n, int repetitions, float* time)
{
reset_timer();
double DDD = get_time();
hipfftHandle plan;
hipfftResult err = hipfftPlan1d(&plan, n, HIPFFT_Z2Z, 1);
if (err != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: Execution failed, error code is %d\n", err);
exit(-1);
}
printf("Time to create cuFFT plan: %lf\n", get_time()-DDD);
//hipEvent_t start, stop;
//hipEventCreate(&start);
//hipEventCreate(&stop);
//hipEventRecord(start);
complex_t *cufft_x_f = (complex_t *)malloc(n*sizeof(complex_t));
complex_t *d_x, *d_x_f;
hipMalloc((void**)&d_x, n*sizeof(complex_t));
hipMemcpy(d_x, x, n*sizeof(complex_t),hipMemcpyHostToDevice);
hipMalloc((void**)&d_x_f, n*sizeof(complex_t));
for(int i = 0; i < repetitions; i++){
err = hipfftExecZ2Z(plan, (hipfftDoubleComplex *)d_x, (hipfftDoubleComplex *)d_x_f, HIPFFT_FORWARD);
if (err != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: Execution failed, error code is %d\n", err);
exit(-1);
}
}
hipMemcpy(cufft_x_f, d_x_f, n*sizeof(complex_t), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_x_f);
//hipEventRecord(stop);
//hipEventSynchronize(stop);
//hipEventElapsedTime(time, start, stop);
//
//hipEventDestroy(start);
//hipEventDestroy(stop);
printf("Time to run cuFFT : %f\n", get_time());
real_t CUFFT_ERROR =0;
for(int i=0; i< n ; i++){
CUFFT_ERROR += cabs(cufft_x_f[i]/n- x_f[i]);
}
printf("ERROR of CUFFT is %lg\n", CUFFT_ERROR);
hipfftDestroy(plan);
free(cufft_x_f);
}
extern "C"
void cufft_plan_create(unsigned int* plan, int B, int loops)
{
//hipFree(0);
hipfftHandle *plan1 = (hipfftHandle*)plan;
hipfftResult err;
err = hipfftPlan1d(plan1, B, HIPFFT_Z2Z, loops);
if (err != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: Plan creation failed");
exit(-1);
}
}
| 6c0132f901619d812c93ec0d877b47575fa5b667.cu | #include <cuda.h>
#include "cufft.h"
#include "fft.h"
#include "utils.h"
extern "C"
{
#include "timer.h"
#include "cuda_fft.h"
}
extern "C"
void cuda_fft(complex_t *x, complex_t *x_f, int n, int repetitions, float* time)
{
reset_timer();
double DDD = get_time();
cufftHandle plan;
cufftResult err = cufftPlan1d(&plan, n, CUFFT_Z2Z, 1);
if (err != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: Execution failed, error code is %d\n", err);
exit(-1);
}
printf("Time to create cuFFT plan: %lf\n", get_time()-DDD);
//cudaEvent_t start, stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//cudaEventRecord(start);
complex_t *cufft_x_f = (complex_t *)malloc(n*sizeof(complex_t));
complex_t *d_x, *d_x_f;
cudaMalloc((void**)&d_x, n*sizeof(complex_t));
cudaMemcpy(d_x, x, n*sizeof(complex_t),cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_x_f, n*sizeof(complex_t));
for(int i = 0; i < repetitions; i++){
err = cufftExecZ2Z(plan, (cufftDoubleComplex *)d_x, (cufftDoubleComplex *)d_x_f, CUFFT_FORWARD);
if (err != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: Execution failed, error code is %d\n", err);
exit(-1);
}
}
cudaMemcpy(cufft_x_f, d_x_f, n*sizeof(complex_t), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_x_f);
//cudaEventRecord(stop);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(time, start, stop);
//
//cudaEventDestroy(start);
//cudaEventDestroy(stop);
printf("Time to run cuFFT : %f\n", get_time());
real_t CUFFT_ERROR =0;
for(int i=0; i< n ; i++){
CUFFT_ERROR += cabs(cufft_x_f[i]/n- x_f[i]);
}
printf("ERROR of CUFFT is %lg\n", CUFFT_ERROR);
cufftDestroy(plan);
free(cufft_x_f);
}
extern "C"
void cufft_plan_create(unsigned int* plan, int B, int loops)
{
//cudaFree(0);
cufftHandle *plan1 = (cufftHandle*)plan;
cufftResult err;
err = cufftPlan1d(plan1, B, CUFFT_Z2Z, loops);
if (err != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: Plan creation failed");
exit(-1);
}
}
|
c25f4ef29e09bc7648221518397f144d823d8087.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author: Hamed Hassani Saadi
April 2017
Email: [email protected]
to run on monk/mon54:
module load opencv/2.4.9
nvcc HDR_GPU2.cu -o HDR_GPU2.o -L /opt/sharcnet/opencv/2.4.9/lib -lopencv_core -lopencv_imgproc -lopencv_highgui -O3 -arch=sm_20 -Xcompiler -Wall -Xcompiler -Wextra -m64 -I /opt/sharcnet/cuda/7.5.18/include/ -I /opt/sharcnet/opencv/2.4.9/include
./HDR_CPU2.o input_file.png
for profililng:
nvprof ./HDR_GPU2.o input_file.png
or
nvprof --print-gpu-trace ./HDR_GPU2.o input_file.png
or
nvprof --print-gpu-trace --metrics achieved_occupancy ./HDR_GPU2.o input_file2.png
*/
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <vector>
#include <stdio.h>
#include <algorithm>
#include <cassert>
#include <string.h>
#include <iostream>
#include <thrust/extrema.h>
#include <sys/time.h>
#include <time.h>
#include "utils.h"
#define NUMBINS 1024
#define BLOCKSIZE 1024
/*
+++++++++++++++++++++timevalu_subtract Function+++++++++++++++++++++
*/
int timeval_subtract (double *result, struct timeval *x, struct timeval *y) {
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
/*
+++++++++++++++++++++LoadImage Function+++++++++++++++++++++
*/
void loadImageHDR(const std::string &filename, float **imagePtr, size_t *numRows, size_t *numCols){
cv::Mat originImg = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR | CV_LOAD_IMAGE_ANYDEPTH);
cv::Mat image;
if(originImg.type() != CV_32FC3){
originImg.convertTo(image,CV_32FC3);
} else{
image = originImg;
}
if (image.empty()){
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
if (image.channels() != 3){
std::cerr << "Image must be color!" << std::endl;
exit(1);
}
if (!image.isContinuous()){
std::cerr << "Image isn't continuous!" << std::endl;
exit(1);
}
*imagePtr = new float[image.rows * image.cols * image.channels()];
float *cvPtr = image.ptr<float>(0);
for (int i = 0; i < image.rows * image.cols * image.channels(); ++i)
(*imagePtr)[i] = cvPtr[i];
*numRows = image.rows;
*numCols = image.cols;
}
/*
+++++++++++++++++++++saveImage Function+++++++++++++++++++++
*/
void saveImageHDR(const float* const image, const size_t numRows, const size_t numCols, const std::string &output_file){
int sizes[2];
sizes[0] = (int)numRows;
sizes[1] = (int)numCols;
cv::Mat imageHDR(2, sizes, CV_32FC3, (void *)image);
imageHDR = imageHDR * 255;
cv::imwrite(output_file.c_str(), imageHDR);
}
/*
+++++++++++++++++++++compareImages Function+++++++++++++++++++++
*/
void compareImages(std::string reference_filename, std::string test_filename)
{
cv::Mat reference = cv::imread(reference_filename, -1);
cv::Mat test = cv::imread(test_filename, -1);
cv::Mat diff = abs(reference - test);
cv::Mat diffSingleChannel = diff.reshape(1, 0); //convert to 1 channel, same # rows
double minVal, maxVal;
cv::minMaxLoc(diffSingleChannel, &minVal, &maxVal, NULL, NULL); //NULL because we don't care about location
//now perform transform so that we bump values to the full range
// diffSingleChannel = (diffSingleChannel - minVal) * (255. / (maxVal - minVal));
// diff = diffSingleChannel.reshape(reference.channels(), 0);
// cv::imwrite("differenceImage.png", diff);
//OK, now we can start comparing values...
unsigned char *referencePtr = reference.ptr<unsigned char>(0);
unsigned char *testPtr = test.ptr<unsigned char>(0);
//checkResultsEps(referencePtr, testPtr, reference.rows * reference.cols * reference.channels(), 4.0, 4.0);
//checkResultsAutodesk(referencePtr, testPtr, reference.rows * reference.cols * reference.channels(), 0.0, 0);
checkResultsExact(referencePtr, testPtr, reference.rows * reference.cols * reference.channels(), 50);
std::cout << "Images are equal 100%." << std::endl;
return;
}
/*
+++++++++++++++++++++rgb_to_xyY Function+++++++++++++++++++++
*/
void rgb_to_xyY(float* red, float* green, float* blue, float* x_, float* y_, float* log_Y_, float delta, size_t numRows, size_t numCols){
float r, g, b;
float X,Y,Z, L;
for (size_t i=0; i<numRows; i++) {
for (size_t j=0; j<numCols; j++){
r = red[numCols*i+j];
g = green[numCols*i+j];
b = blue[numCols*i+j];
X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
L = X + Y + Z;
(x_)[numCols*i+j] = X / L;
(y_)[numCols*i+j] = Y / L;
(log_Y_)[numCols*i+j] = log10f( delta + Y );
}
}
}
/*
+++++++++++++++++++GPU rgb_to_xyY Function+++++++++++++++++++
*/
__global__ void rgb_to_xyY_gpu(float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x){
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny ){
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
/*
+++++++++++++++++++++histogram_and_prefixsum Function+++++++++++++++++++++
*/
void histogram_and_prefixsum(float *luminance, unsigned int *cdf, size_t numRows, size_t numCols, size_t numBins, float *luminance_min, float *luminance_max){
float logLumMin = luminance[0];
float logLumMax = luminance[0];
//Step 1
//first we find the minimum and maximum across the entire image
for (size_t i = 1; i < numCols * numRows; ++i) {
logLumMin = ::min(luminance[i], logLumMin);
logLumMax = ::max(luminance[i], logLumMax);
}
//Step 2
float logLumRange = logLumMax - logLumMin;
*luminance_min = logLumMin;
*luminance_max = logLumMax;
//Step 3
//next we use the now known range to compute
//a histogram of numBins bins
unsigned int *histo = new unsigned int[numBins];
for (size_t i = 0; i < numBins; ++i) histo[i] = 0;
for (size_t i = 0; i < numCols * numRows; ++i) {
unsigned int bin = ::min(static_cast<unsigned int>(numBins - 1),
static_cast<unsigned int>((luminance[i] - logLumMin) / logLumRange * numBins));
histo[bin]++;
}
//Step 4
//finally we perform and exclusive scan (prefix sum)
//on the histogram to get the cumulative distribution
(cdf)[0] = 0;
for (size_t i = 1; i < numBins; ++i) {
(cdf)[i] = (cdf)[i - 1] + histo[i - 1];
}
delete[] histo;
}
/*
+++++++++++++++++++GPU histogram_and_prefixsum Function+++++++++++++++++++
*/
__global__ void shmem_reduce_kernel_minmax(float * d_out_min, float * d_out_max, float * d_in_min, float * d_in_max, int nblocks){
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
if (tid < nblocks){
sdata[tid] = d_in_min[myId];
sdata[tid+blockDim.x] = d_in_max[myId];
}
else {
sdata[tid] = 1E+37;
sdata[tid+blockDim.x] = 1E-37;
}
__syncthreads(); // make sure entire block is loaded!
for (unsigned int s = blockDim.x / 2; s > 0; s /= 2){
if (tid < s){
sdata[tid] = min(sdata[tid],sdata[tid+s]);
sdata[tid+blockDim.x] = max(sdata[tid+blockDim.x],sdata[tid+s+blockDim.x]);
}
__syncthreads(); // make sure all adds at one stage are done!
}
if (tid == 0){
d_out_min[blockIdx.x] = sdata[0];
d_out_max[blockIdx.x] = sdata[0+blockDim.x];
}
}
void reduce_min_max(float * d_log_min, float * d_log_max, float* d_in, float * d_intermin, float * d_intermax, size_t numRows, size_t numCols){
const int maxThreadsPerBlock = BLOCKSIZE;
int threads = maxThreadsPerBlock; // launch one thread for each block in prev step
int blocks = numRows*numCols / maxThreadsPerBlock;
hipLaunchKernelGGL(( shmem_reduce_kernel_minmax), dim3(blocks), dim3(threads), 2 * threads * sizeof(float), 0, d_intermin, d_intermax, d_in, d_in, threads);
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
hipLaunchKernelGGL(( shmem_reduce_kernel_minmax), dim3(blocks), dim3(128), 2 * 128 * sizeof(float), 0, d_log_min, d_log_max, d_intermin, d_intermax, threads);
}
__global__ void simple_histo(unsigned int *d_bins, float * d_log_min, float * d_log_max, float* d_in, size_t numBins, size_t numRows, size_t numCols)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numRows*numCols)
return;
if (myId < numBins)
d_bins[myId] = 0;
__syncthreads();
float myBinF = (d_in[myId] - d_log_min[0]) / (d_log_max[0] - d_log_min[0]);
int myBin = myBinF * numBins;
atomicAdd(&(d_bins[myBin]), 1);
}
__global__ void scan(unsigned int *g_odata, unsigned int *g_idata, int n){
extern __shared__ unsigned int temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2*thid] = g_idata[2*thid]; // load input into shared memory
temp[2*thid+1] = g_idata[2*thid+1];
for (int d = n>>1; d > 0; d >>= 1){ // build sum in place up the tree
__syncthreads();
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2){ // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
unsigned int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2*thid] = temp[2*thid]; // write results to device memory
g_odata[2*thid+1] = temp[2*thid+1];
}
void histogram_and_prefixsum_gpu(float* d_logLuminance, unsigned int * d_cdf, float * d_log_min, float * d_log_max, float * d_intermin, float * d_intermax, unsigned int * d_hist, size_t numRows, size_t numCols, size_t numBins){
reduce_min_max(d_log_min, d_log_max, d_logLuminance, d_intermin, d_intermax, numRows, numCols);
hipDeviceSynchronize();
hipLaunchKernelGGL(( simple_histo), dim3(numRows*numCols/BLOCKSIZE),dim3(BLOCKSIZE), 0, 0, d_hist, d_log_min, d_log_max, d_logLuminance, numBins, numRows, numCols);
hipDeviceSynchronize();
hipLaunchKernelGGL(( scan), dim3(1),dim3(numBins/2),numBins*sizeof(unsigned int), 0, d_cdf,d_hist,numBins);
hipDeviceSynchronize();
}
/*
+++++++++++++++++++++normalize_cdf Function+++++++++++++++++++++
*/
void normalize_cdf(unsigned int* input_cdf, float* output_cdf, size_t n){
const float normalization_constant = 1.f / input_cdf[n - 1];
float tmp;
for (size_t i=0; i<n; i++) {
tmp = input_cdf[i]*normalization_constant;
(output_cdf)[i] = tmp;
}
}
/*
+++++++++++++++++++GPU normalize_cdf Function+++++++++++++++++++
*/
__global__ void normalize_cdf_gpu(unsigned int* d_input_cdf, float* d_output_cdf, int n){
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
/*
+++++++++++++++++++++tonemap Function+++++++++++++++++++++
*/
void tonemap(float* x, float* y, float* log_Y, float* ncdf, float* r_new, float* g_new, float* b_new, float min_log_Y, float max_log_Y, size_t num_bins, size_t numRows, size_t numCols){
float log_Y_range = max_log_Y - min_log_Y;
float x_, y_, log_Y_;
unsigned int bin_index;
float X_new, Y_new, Z_new;
for (size_t i=0; i<numRows; i++) {
for (size_t j=0; j<numCols; j++) {
x_ = x[numCols*i+j];
y_ = y[numCols*i+j];
log_Y_ = log_Y[numCols*i+j];
bin_index = min((int)num_bins - 1, int((num_bins * (log_Y_ - min_log_Y)) / log_Y_range));
Y_new = ncdf[bin_index];
X_new = x_ * ( Y_new / y_ );
Z_new = (1 - x_ - y_) * (Y_new / y_);
(r_new)[numCols*i+j] = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
(g_new)[numCols*i+j] = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
(b_new)[numCols*i+j] = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
}
}
}
/*
+++++++++++++++++++GPU tonemap Function+++++++++++++++++++
*/
__global__ void tonemap_gpu(float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float* d_log_min, float* d_log_max, int num_bins, int num_pixels_y, int num_pixels_x){
float log_Y_range = d_log_max[0] - d_log_min[0];
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - d_log_min[0] ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
/*
+++++++++++++++++++++CPU_histeq Function+++++++++++++++++++++
*/
void CPU_histeq(float * red, float * green, float * blue, float * r_new, float * g_new, float * b_new, float * x, float * y, float * luminance, unsigned int * cdf, float * ncdf, size_t numRows, size_t numCols, size_t numBins){
////////////////////////////////converting RGB to xyY
rgb_to_xyY(red, green, blue, x, y, luminance, 0.0001f, numRows, numCols);
//calculating histogram and CDF
float luminance_min, luminance_max;
histogram_and_prefixsum(luminance, cdf, numRows, numCols, numBins, &luminance_min, &luminance_max);
//normalizing CDF
normalize_cdf(cdf, ncdf, numBins);
//tone-mapping
tonemap(x, y, luminance, ncdf, r_new, g_new, b_new, luminance_min, luminance_max, numBins, numRows, numCols);
}
/*
+++++++++++++++++++++GPU_histeq Function+++++++++++++++++++++
*/
void GPU_histeq(float * d_red, float * d_green, float * d_blue, float * d_r_new, float * d_g_new, float * d_b_new, float * d_x, float * d_y, float * d_luminance, unsigned int * d_cdf, float * d_ncdf, float * d_log_min, float * d_log_max, float * d_intermin, float * d_intermax, unsigned int * d_hist, size_t numRows, size_t numCols, size_t numBins){
//convert from RGB space to chrominance/luminance space xyY
dim3 blockSize(32, 16, 1);
dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y, 1);
hipLaunchKernelGGL(( rgb_to_xyY_gpu), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue, d_x, d_y, d_luminance, 0.0001f, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
histogram_and_prefixsum_gpu(d_luminance, d_cdf, d_log_min, d_log_max, d_intermin, d_intermax, d_hist, numRows, numCols, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( normalize_cdf_gpu), dim3((numBins + 192 - 1) / 192), dim3(192), 0, 0, d_cdf, d_ncdf, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
dim3 blockSize1(32, 16, 1);
dim3 gridSize1( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y );
hipLaunchKernelGGL(( tonemap_gpu), dim3(gridSize1), dim3(blockSize1), 0, 0, d_x, d_y, d_luminance, d_ncdf, d_r_new, d_g_new, d_b_new, d_log_min, d_log_max, numBins, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
/*
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
+++++++++++++++++++++Main Function+++++++++++++++++++++
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
*/
int main(int argc, const char * argv[]) {
std::string input_file;
std::string output_file_cpu;
std::string output_file_gpu;
if(argc == 2){
input_file = std::string(argv[1]);
}
else{
input_file = "input_file.png";
}
std::size_t idx = input_file.find_last_of("/");
if (idx == std::string::npos) {
output_file_cpu = "cpu_" + input_file;
output_file_gpu = "gpu_" + input_file;
}
else{
output_file_cpu = "cpu_" + input_file.substr(idx+1,input_file.size()-idx);
output_file_gpu = "gpu_" + input_file.substr(idx+1,input_file.size()-idx);
}
struct timeval td0, td1;
double t_cpu, t_gpu;
double t_copyin, t_copyout;
////////////////////////////////loading iamge
float *imgPtr;
size_t numRows, numCols;
loadImageHDR(input_file, &imgPtr, &numRows, &numCols);
////////////////////////////////separating RGB channels
size_t numPixels = numRows * numCols;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr;
/*
//////////////////////
///////////////////CPU
//////////////////////
*/
//image histogram equalization on CPU
float * r_new = new float[numPixels];
float * g_new = new float[numPixels];
float * b_new = new float[numPixels];
float * x = new float[numPixels];
float * y = new float[numPixels];
float * luminance = new float[numPixels];
size_t numBins = NUMBINS;
unsigned int * cdf = new unsigned int[numBins];
float * ncdf = new float[numBins];
gettimeofday (&td0, NULL);
CPU_histeq(red, green, blue, r_new, g_new, b_new, x, y, luminance, cdf, ncdf, numRows, numCols, numBins);
gettimeofday (&td1, NULL);
timeval_subtract (&t_cpu, &td1, &td0);
delete[] x;
delete[] y;
delete[] luminance;
delete[] cdf;
delete[] ncdf;
/*
//////////////////////
///////////////////GPU
//////////////////////
*/
//make sure the context initializes ok
checkCudaErrors(hipFree(0));
float * d_red, * d_green, * d_blue;
float * d_x, * d_y, * d_luminance;
unsigned int * d_cdf;
float * d_log_min, * d_log_max;
float * d_ncdf;
float * d_r_new, * d_g_new, * d_b_new;
float * d_intermin, * d_intermax;
unsigned int * d_hist;
checkCudaErrors(hipMalloc(&d_red, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_green, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_blue, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_x, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_y, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_luminance, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_log_min, sizeof(float)));
checkCudaErrors(hipMalloc(&d_log_max, sizeof(float)));
checkCudaErrors(hipMalloc(&d_cdf, sizeof(unsigned int)*numBins));
checkCudaErrors(hipMalloc(&d_ncdf, sizeof(float)*numBins));
checkCudaErrors(hipMalloc(&d_r_new, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_g_new, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_b_new, sizeof(float)*numPixels));
checkCudaErrors(hipMalloc(&d_intermin, sizeof(float)*numRows*numCols));
checkCudaErrors(hipMalloc(&d_intermax, sizeof(float)*numRows*numCols));
checkCudaErrors(hipMalloc(&d_hist, sizeof(unsigned int)*numBins));
gettimeofday (&td0, NULL);
checkCudaErrors(hipMemcpy(d_red, red, sizeof(float)*numPixels, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_green, green, sizeof(float)*numPixels, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_blue, blue, sizeof(float)*numPixels, hipMemcpyHostToDevice));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
gettimeofday (&td1, NULL);
timeval_subtract (&t_copyin, &td1, &td0);
gettimeofday (&td0, NULL);
GPU_histeq(d_red, d_green, d_blue, d_r_new, d_g_new, d_b_new, d_x, d_y, d_luminance, d_cdf, d_ncdf, d_log_min, d_log_max, d_intermin, d_intermax, d_hist, numRows, numCols, numBins);
gettimeofday (&td1, NULL);
timeval_subtract (&t_gpu, &td1, &td0);
float * h_red = new float[numPixels];
float * h_green = new float[numPixels];
float * h_blue = new float[numPixels];
gettimeofday (&td0, NULL);
checkCudaErrors(hipMemcpy(h_red, d_r_new, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_green, d_g_new, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_blue, d_b_new, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
gettimeofday (&td1, NULL);
timeval_subtract (&t_copyout, &td1, &td0);
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_y));
checkCudaErrors(hipFree(d_luminance));
checkCudaErrors(hipFree(d_log_min));
checkCudaErrors(hipFree(d_log_max));
checkCudaErrors(hipFree(d_cdf));
checkCudaErrors(hipFree(d_ncdf));
checkCudaErrors(hipFree(d_r_new));
checkCudaErrors(hipFree(d_g_new));
checkCudaErrors(hipFree(d_b_new));
checkCudaErrors(hipFree(d_intermin));
checkCudaErrors(hipFree(d_intermax));
checkCudaErrors(hipFree(d_hist));
//recombine the image channels
float *imageHDR = new float[numPixels * 3];
for (size_t i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = b_new[i];
imageHDR[3 * i + 1] = g_new[i];
imageHDR[3 * i + 2] = r_new[i];
}
//saving image
saveImageHDR(imageHDR, numRows, numCols, output_file_cpu);
for (size_t i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
//saving image
saveImageHDR(imageHDR, numRows, numCols, output_file_gpu);
printf("CPU runtime: %f ms\n",t_cpu);
printf("GPU runtime: %f ms\n",t_gpu);
printf("Copying data into the GPU: %f ms\n", t_copyin);
printf("Copying data form the GPU: %f ms\n", t_copyout);
printf("GPU runtime + data transfer: %f ms\n", t_copyin+t_gpu+t_copyout);
printf("Image dimension: %dx%d = %d pixels\n", (int)numRows, (int)numCols, (int)numPixels);
compareImages(output_file_cpu, output_file_gpu);
delete[] red;
delete[] green;
delete[] blue;
delete[] r_new;
delete[] g_new;
delete[] b_new;
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
} | c25f4ef29e09bc7648221518397f144d823d8087.cu | /*
Author: Hamed Hassani Saadi
April 2017
Email: [email protected]
to run on monk/mon54:
module load opencv/2.4.9
nvcc HDR_GPU2.cu -o HDR_GPU2.o -L /opt/sharcnet/opencv/2.4.9/lib -lopencv_core -lopencv_imgproc -lopencv_highgui -O3 -arch=sm_20 -Xcompiler -Wall -Xcompiler -Wextra -m64 -I /opt/sharcnet/cuda/7.5.18/include/ -I /opt/sharcnet/opencv/2.4.9/include
./HDR_CPU2.o input_file.png
for profililng:
nvprof ./HDR_GPU2.o input_file.png
or
nvprof --print-gpu-trace ./HDR_GPU2.o input_file.png
or
nvprof --print-gpu-trace --metrics achieved_occupancy ./HDR_GPU2.o input_file2.png
*/
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <vector>
#include <stdio.h>
#include <algorithm>
#include <cassert>
#include <string.h>
#include <iostream>
#include <thrust/extrema.h>
#include <sys/time.h>
#include <time.h>
#include "utils.h"
#define NUMBINS 1024
#define BLOCKSIZE 1024
/*
+++++++++++++++++++++timevalu_subtract Function+++++++++++++++++++++
*/
int timeval_subtract (double *result, struct timeval *x, struct timeval *y) {
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
/*
+++++++++++++++++++++LoadImage Function+++++++++++++++++++++
*/
void loadImageHDR(const std::string &filename, float **imagePtr, size_t *numRows, size_t *numCols){
cv::Mat originImg = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR | CV_LOAD_IMAGE_ANYDEPTH);
cv::Mat image;
if(originImg.type() != CV_32FC3){
originImg.convertTo(image,CV_32FC3);
} else{
image = originImg;
}
if (image.empty()){
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
if (image.channels() != 3){
std::cerr << "Image must be color!" << std::endl;
exit(1);
}
if (!image.isContinuous()){
std::cerr << "Image isn't continuous!" << std::endl;
exit(1);
}
*imagePtr = new float[image.rows * image.cols * image.channels()];
float *cvPtr = image.ptr<float>(0);
for (int i = 0; i < image.rows * image.cols * image.channels(); ++i)
(*imagePtr)[i] = cvPtr[i];
*numRows = image.rows;
*numCols = image.cols;
}
/*
+++++++++++++++++++++saveImage Function+++++++++++++++++++++
*/
void saveImageHDR(const float* const image, const size_t numRows, const size_t numCols, const std::string &output_file){
int sizes[2];
sizes[0] = (int)numRows;
sizes[1] = (int)numCols;
cv::Mat imageHDR(2, sizes, CV_32FC3, (void *)image);
imageHDR = imageHDR * 255;
cv::imwrite(output_file.c_str(), imageHDR);
}
/*
+++++++++++++++++++++compareImages Function+++++++++++++++++++++
*/
void compareImages(std::string reference_filename, std::string test_filename)
{
cv::Mat reference = cv::imread(reference_filename, -1);
cv::Mat test = cv::imread(test_filename, -1);
cv::Mat diff = abs(reference - test);
cv::Mat diffSingleChannel = diff.reshape(1, 0); //convert to 1 channel, same # rows
double minVal, maxVal;
cv::minMaxLoc(diffSingleChannel, &minVal, &maxVal, NULL, NULL); //NULL because we don't care about location
//now perform transform so that we bump values to the full range
// diffSingleChannel = (diffSingleChannel - minVal) * (255. / (maxVal - minVal));
// diff = diffSingleChannel.reshape(reference.channels(), 0);
// cv::imwrite("differenceImage.png", diff);
//OK, now we can start comparing values...
unsigned char *referencePtr = reference.ptr<unsigned char>(0);
unsigned char *testPtr = test.ptr<unsigned char>(0);
//checkResultsEps(referencePtr, testPtr, reference.rows * reference.cols * reference.channels(), 4.0, 4.0);
//checkResultsAutodesk(referencePtr, testPtr, reference.rows * reference.cols * reference.channels(), 0.0, 0);
checkResultsExact(referencePtr, testPtr, reference.rows * reference.cols * reference.channels(), 50);
std::cout << "Images are equal 100%." << std::endl;
return;
}
/*
+++++++++++++++++++++rgb_to_xyY Function+++++++++++++++++++++
*/
void rgb_to_xyY(float* red, float* green, float* blue, float* x_, float* y_, float* log_Y_, float delta, size_t numRows, size_t numCols){
float r, g, b;
float X,Y,Z, L;
for (size_t i=0; i<numRows; i++) {
for (size_t j=0; j<numCols; j++){
r = red[numCols*i+j];
g = green[numCols*i+j];
b = blue[numCols*i+j];
X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
L = X + Y + Z;
(x_)[numCols*i+j] = X / L;
(y_)[numCols*i+j] = Y / L;
(log_Y_)[numCols*i+j] = log10f( delta + Y );
}
}
}
/*
+++++++++++++++++++GPU rgb_to_xyY Function+++++++++++++++++++
*/
__global__ void rgb_to_xyY_gpu(float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x){
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny ){
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
/*
+++++++++++++++++++++histogram_and_prefixsum Function+++++++++++++++++++++
*/
void histogram_and_prefixsum(float *luminance, unsigned int *cdf, size_t numRows, size_t numCols, size_t numBins, float *luminance_min, float *luminance_max){
float logLumMin = luminance[0];
float logLumMax = luminance[0];
//Step 1
//first we find the minimum and maximum across the entire image
for (size_t i = 1; i < numCols * numRows; ++i) {
logLumMin = std::min(luminance[i], logLumMin);
logLumMax = std::max(luminance[i], logLumMax);
}
//Step 2
float logLumRange = logLumMax - logLumMin;
*luminance_min = logLumMin;
*luminance_max = logLumMax;
//Step 3
//next we use the now known range to compute
//a histogram of numBins bins
unsigned int *histo = new unsigned int[numBins];
for (size_t i = 0; i < numBins; ++i) histo[i] = 0;
for (size_t i = 0; i < numCols * numRows; ++i) {
unsigned int bin = std::min(static_cast<unsigned int>(numBins - 1),
static_cast<unsigned int>((luminance[i] - logLumMin) / logLumRange * numBins));
histo[bin]++;
}
//Step 4
//finally we perform and exclusive scan (prefix sum)
//on the histogram to get the cumulative distribution
(cdf)[0] = 0;
for (size_t i = 1; i < numBins; ++i) {
(cdf)[i] = (cdf)[i - 1] + histo[i - 1];
}
delete[] histo;
}
/*
+++++++++++++++++++GPU histogram_and_prefixsum Function+++++++++++++++++++
*/
__global__ void shmem_reduce_kernel_minmax(float * d_out_min, float * d_out_max, float * d_in_min, float * d_in_max, int nblocks){
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
if (tid < nblocks){
sdata[tid] = d_in_min[myId];
sdata[tid+blockDim.x] = d_in_max[myId];
}
else {
sdata[tid] = 1E+37;
sdata[tid+blockDim.x] = 1E-37;
}
__syncthreads(); // make sure entire block is loaded!
for (unsigned int s = blockDim.x / 2; s > 0; s /= 2){
if (tid < s){
sdata[tid] = min(sdata[tid],sdata[tid+s]);
sdata[tid+blockDim.x] = max(sdata[tid+blockDim.x],sdata[tid+s+blockDim.x]);
}
__syncthreads(); // make sure all adds at one stage are done!
}
if (tid == 0){
d_out_min[blockIdx.x] = sdata[0];
d_out_max[blockIdx.x] = sdata[0+blockDim.x];
}
}
void reduce_min_max(float * d_log_min, float * d_log_max, float* d_in, float * d_intermin, float * d_intermax, size_t numRows, size_t numCols){
const int maxThreadsPerBlock = BLOCKSIZE;
int threads = maxThreadsPerBlock; // launch one thread for each block in prev step
int blocks = numRows*numCols / maxThreadsPerBlock;
shmem_reduce_kernel_minmax<<<blocks, threads, 2 * threads * sizeof(float)>>>(d_intermin, d_intermax, d_in, d_in, threads);
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
shmem_reduce_kernel_minmax<<<blocks, 128, 2 * 128 * sizeof(float)>>>(d_log_min, d_log_max, d_intermin, d_intermax, threads);
}
__global__ void simple_histo(unsigned int *d_bins, float * d_log_min, float * d_log_max, float* d_in, size_t numBins, size_t numRows, size_t numCols)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numRows*numCols)
return;
if (myId < numBins)
d_bins[myId] = 0;
__syncthreads();
float myBinF = (d_in[myId] - d_log_min[0]) / (d_log_max[0] - d_log_min[0]);
int myBin = myBinF * numBins;
atomicAdd(&(d_bins[myBin]), 1);
}
__global__ void scan(unsigned int *g_odata, unsigned int *g_idata, int n){
extern __shared__ unsigned int temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2*thid] = g_idata[2*thid]; // load input into shared memory
temp[2*thid+1] = g_idata[2*thid+1];
for (int d = n>>1; d > 0; d >>= 1){ // build sum in place up the tree
__syncthreads();
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2){ // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
unsigned int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2*thid] = temp[2*thid]; // write results to device memory
g_odata[2*thid+1] = temp[2*thid+1];
}
void histogram_and_prefixsum_gpu(float* d_logLuminance, unsigned int * d_cdf, float * d_log_min, float * d_log_max, float * d_intermin, float * d_intermax, unsigned int * d_hist, size_t numRows, size_t numCols, size_t numBins){
reduce_min_max(d_log_min, d_log_max, d_logLuminance, d_intermin, d_intermax, numRows, numCols);
cudaDeviceSynchronize();
simple_histo<<<numRows*numCols/BLOCKSIZE,BLOCKSIZE>>>(d_hist, d_log_min, d_log_max, d_logLuminance, numBins, numRows, numCols);
cudaDeviceSynchronize();
scan<<<1,numBins/2,numBins*sizeof(unsigned int)>>>(d_cdf,d_hist,numBins);
cudaDeviceSynchronize();
}
/*
+++++++++++++++++++++normalize_cdf Function+++++++++++++++++++++
*/
void normalize_cdf(unsigned int* input_cdf, float* output_cdf, size_t n){
const float normalization_constant = 1.f / input_cdf[n - 1];
float tmp;
for (size_t i=0; i<n; i++) {
tmp = input_cdf[i]*normalization_constant;
(output_cdf)[i] = tmp;
}
}
/*
+++++++++++++++++++GPU normalize_cdf Function+++++++++++++++++++
*/
__global__ void normalize_cdf_gpu(unsigned int* d_input_cdf, float* d_output_cdf, int n){
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
/*
+++++++++++++++++++++tonemap Function+++++++++++++++++++++
*/
void tonemap(float* x, float* y, float* log_Y, float* ncdf, float* r_new, float* g_new, float* b_new, float min_log_Y, float max_log_Y, size_t num_bins, size_t numRows, size_t numCols){
float log_Y_range = max_log_Y - min_log_Y;
float x_, y_, log_Y_;
unsigned int bin_index;
float X_new, Y_new, Z_new;
for (size_t i=0; i<numRows; i++) {
for (size_t j=0; j<numCols; j++) {
x_ = x[numCols*i+j];
y_ = y[numCols*i+j];
log_Y_ = log_Y[numCols*i+j];
bin_index = min((int)num_bins - 1, int((num_bins * (log_Y_ - min_log_Y)) / log_Y_range));
Y_new = ncdf[bin_index];
X_new = x_ * ( Y_new / y_ );
Z_new = (1 - x_ - y_) * (Y_new / y_);
(r_new)[numCols*i+j] = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
(g_new)[numCols*i+j] = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
(b_new)[numCols*i+j] = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
}
}
}
/*
+++++++++++++++++++GPU tonemap Function+++++++++++++++++++
*/
__global__ void tonemap_gpu(float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float* d_log_min, float* d_log_max, int num_bins, int num_pixels_y, int num_pixels_x){
float log_Y_range = d_log_max[0] - d_log_min[0];
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - d_log_min[0] ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
/*
+++++++++++++++++++++CPU_histeq Function+++++++++++++++++++++
*/
void CPU_histeq(float * red, float * green, float * blue, float * r_new, float * g_new, float * b_new, float * x, float * y, float * luminance, unsigned int * cdf, float * ncdf, size_t numRows, size_t numCols, size_t numBins){
////////////////////////////////converting RGB to xyY
rgb_to_xyY(red, green, blue, x, y, luminance, 0.0001f, numRows, numCols);
//calculating histogram and CDF
float luminance_min, luminance_max;
histogram_and_prefixsum(luminance, cdf, numRows, numCols, numBins, &luminance_min, &luminance_max);
//normalizing CDF
normalize_cdf(cdf, ncdf, numBins);
//tone-mapping
tonemap(x, y, luminance, ncdf, r_new, g_new, b_new, luminance_min, luminance_max, numBins, numRows, numCols);
}
/*
+++++++++++++++++++++GPU_histeq Function+++++++++++++++++++++
*/
void GPU_histeq(float * d_red, float * d_green, float * d_blue, float * d_r_new, float * d_g_new, float * d_b_new, float * d_x, float * d_y, float * d_luminance, unsigned int * d_cdf, float * d_ncdf, float * d_log_min, float * d_log_max, float * d_intermin, float * d_intermax, unsigned int * d_hist, size_t numRows, size_t numCols, size_t numBins){
//convert from RGB space to chrominance/luminance space xyY
dim3 blockSize(32, 16, 1);
dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y, 1);
rgb_to_xyY_gpu<<<gridSize, blockSize>>>(d_red, d_green, d_blue, d_x, d_y, d_luminance, 0.0001f, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
histogram_and_prefixsum_gpu(d_luminance, d_cdf, d_log_min, d_log_max, d_intermin, d_intermax, d_hist, numRows, numCols, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
normalize_cdf_gpu<<<(numBins + 192 - 1) / 192, 192>>>(d_cdf, d_ncdf, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
dim3 blockSize1(32, 16, 1);
dim3 gridSize1( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y );
tonemap_gpu<<<gridSize1, blockSize1>>>(d_x, d_y, d_luminance, d_ncdf, d_r_new, d_g_new, d_b_new, d_log_min, d_log_max, numBins, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
/*
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
+++++++++++++++++++++Main Function+++++++++++++++++++++
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
*/
int main(int argc, const char * argv[]) {
std::string input_file;
std::string output_file_cpu;
std::string output_file_gpu;
if(argc == 2){
input_file = std::string(argv[1]);
}
else{
input_file = "input_file.png";
}
std::size_t idx = input_file.find_last_of("/");
if (idx == std::string::npos) {
output_file_cpu = "cpu_" + input_file;
output_file_gpu = "gpu_" + input_file;
}
else{
output_file_cpu = "cpu_" + input_file.substr(idx+1,input_file.size()-idx);
output_file_gpu = "gpu_" + input_file.substr(idx+1,input_file.size()-idx);
}
struct timeval td0, td1;
double t_cpu, t_gpu;
double t_copyin, t_copyout;
////////////////////////////////loading iamge
float *imgPtr;
size_t numRows, numCols;
loadImageHDR(input_file, &imgPtr, &numRows, &numCols);
////////////////////////////////separating RGB channels
size_t numPixels = numRows * numCols;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr;
/*
//////////////////////
///////////////////CPU
//////////////////////
*/
//image histogram equalization on CPU
float * r_new = new float[numPixels];
float * g_new = new float[numPixels];
float * b_new = new float[numPixels];
float * x = new float[numPixels];
float * y = new float[numPixels];
float * luminance = new float[numPixels];
size_t numBins = NUMBINS;
unsigned int * cdf = new unsigned int[numBins];
float * ncdf = new float[numBins];
gettimeofday (&td0, NULL);
CPU_histeq(red, green, blue, r_new, g_new, b_new, x, y, luminance, cdf, ncdf, numRows, numCols, numBins);
gettimeofday (&td1, NULL);
timeval_subtract (&t_cpu, &td1, &td0);
delete[] x;
delete[] y;
delete[] luminance;
delete[] cdf;
delete[] ncdf;
/*
//////////////////////
///////////////////GPU
//////////////////////
*/
//make sure the context initializes ok
checkCudaErrors(cudaFree(0));
float * d_red, * d_green, * d_blue;
float * d_x, * d_y, * d_luminance;
unsigned int * d_cdf;
float * d_log_min, * d_log_max;
float * d_ncdf;
float * d_r_new, * d_g_new, * d_b_new;
float * d_intermin, * d_intermax;
unsigned int * d_hist;
checkCudaErrors(cudaMalloc(&d_red, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_green, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_x, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_y, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_luminance, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_log_min, sizeof(float)));
checkCudaErrors(cudaMalloc(&d_log_max, sizeof(float)));
checkCudaErrors(cudaMalloc(&d_cdf, sizeof(unsigned int)*numBins));
checkCudaErrors(cudaMalloc(&d_ncdf, sizeof(float)*numBins));
checkCudaErrors(cudaMalloc(&d_r_new, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_g_new, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_b_new, sizeof(float)*numPixels));
checkCudaErrors(cudaMalloc(&d_intermin, sizeof(float)*numRows*numCols));
checkCudaErrors(cudaMalloc(&d_intermax, sizeof(float)*numRows*numCols));
checkCudaErrors(cudaMalloc(&d_hist, sizeof(unsigned int)*numBins));
gettimeofday (&td0, NULL);
checkCudaErrors(cudaMemcpy(d_red, red, sizeof(float)*numPixels, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_green, green, sizeof(float)*numPixels, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_blue, blue, sizeof(float)*numPixels, cudaMemcpyHostToDevice));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gettimeofday (&td1, NULL);
timeval_subtract (&t_copyin, &td1, &td0);
gettimeofday (&td0, NULL);
GPU_histeq(d_red, d_green, d_blue, d_r_new, d_g_new, d_b_new, d_x, d_y, d_luminance, d_cdf, d_ncdf, d_log_min, d_log_max, d_intermin, d_intermax, d_hist, numRows, numCols, numBins);
gettimeofday (&td1, NULL);
timeval_subtract (&t_gpu, &td1, &td0);
float * h_red = new float[numPixels];
float * h_green = new float[numPixels];
float * h_blue = new float[numPixels];
gettimeofday (&td0, NULL);
checkCudaErrors(cudaMemcpy(h_red, d_r_new, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_green, d_g_new, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_blue, d_b_new, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gettimeofday (&td1, NULL);
timeval_subtract (&t_copyout, &td1, &td0);
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_y));
checkCudaErrors(cudaFree(d_luminance));
checkCudaErrors(cudaFree(d_log_min));
checkCudaErrors(cudaFree(d_log_max));
checkCudaErrors(cudaFree(d_cdf));
checkCudaErrors(cudaFree(d_ncdf));
checkCudaErrors(cudaFree(d_r_new));
checkCudaErrors(cudaFree(d_g_new));
checkCudaErrors(cudaFree(d_b_new));
checkCudaErrors(cudaFree(d_intermin));
checkCudaErrors(cudaFree(d_intermax));
checkCudaErrors(cudaFree(d_hist));
//recombine the image channels
float *imageHDR = new float[numPixels * 3];
for (size_t i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = b_new[i];
imageHDR[3 * i + 1] = g_new[i];
imageHDR[3 * i + 2] = r_new[i];
}
//saving image
saveImageHDR(imageHDR, numRows, numCols, output_file_cpu);
for (size_t i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
//saving image
saveImageHDR(imageHDR, numRows, numCols, output_file_gpu);
printf("CPU runtime: %f ms\n",t_cpu);
printf("GPU runtime: %f ms\n",t_gpu);
printf("Copying data into the GPU: %f ms\n", t_copyin);
printf("Copying data form the GPU: %f ms\n", t_copyout);
printf("GPU runtime + data transfer: %f ms\n", t_copyin+t_gpu+t_copyout);
printf("Image dimension: %dx%d = %d pixels\n", (int)numRows, (int)numCols, (int)numPixels);
compareImages(output_file_cpu, output_file_gpu);
delete[] red;
delete[] green;
delete[] blue;
delete[] r_new;
delete[] g_new;
delete[] b_new;
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
} |
6f6e930a0fce164742a42476ab847e8d8152ebe2.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/opencv.hpp>
#include <iostream>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "GpuTimer.h"
#include<time.h>
using namespace cv;
using namespace std;
//In OpenCV the image is read in BGR format, that is for each pixel, the Blue, Green, then Red components are read from the image file.
// Serial implementation for running on CPU using a single thread.
void rgbaToGrayscaleCpu(unsigned char* rgbImage, unsigned char* grayImage, int numRows, int numCols, int Channels)
{
//@@ Insert your code here
for (int i = 0; i < numRows; i++) {
for (int j = 0; j < numCols; j++) {
int grayOffset = numCols*i + j;
int rgbOffset = grayOffset * 3;
unsigned char b = rgbImage[rgbOffset];
unsigned char g = rgbImage[rgbOffset + 1];
unsigned char r = rgbImage[rgbOffset + 2];
grayImage[grayOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
}
}
}
// we have 3 channels corresponding to B, G, and R components of each pixel
// The input image is encoded as unsigned characters [0, 255]
__global__ void colorToGrayscaleConversion(unsigned char * Pout, unsigned
char * Pin, int width, int height, int numChannels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height) {
int grayOffset = y*width + x;
int rgbOffset = grayOffset * 3;
unsigned char b = Pin[rgbOffset];
unsigned char g = Pin[rgbOffset + 1];
unsigned char r = Pin[rgbOffset + 2];
Pout[grayOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
}
}
int main(void)
{
hipError_t err = hipSuccess;
//Read the image using OpenCV
Mat image; //Create matrix to read iamge
image = imread("Scene.jpeg", IMREAD_COLOR);
if (image.empty()) {
printf("Cannot read image file %s", "Scene.jpeg");
exit(1);
}
int imageChannels = 3;
int imageWidth = image.cols;
int imageHeight = image.rows;
//Allocate the host image vectors
unsigned char *h_rgbImage = (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight*imageChannels);
unsigned char *h_grayImage = (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight);
unsigned char *h_grayImage_CPU = (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight);
h_rgbImage = image.data; //The data member of a Mat object returns the pointer to the first row, first column of the image.
//try image.ptr()
//Allocate memory on the device for the rgb image and the grayscale image and record the needed time
unsigned char *d_rgbImage = NULL;
unsigned char *d_grayImage = NULL;
float imageSize = imageHeight * imageWidth * imageChannels;
GpuTimer timer;
timer.Start();
//@@ Insert Your code Here to allocate memory on the device for color and gray images
err = hipMalloc((void **)&d_rgbImage, imageSize);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device rgbImage (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_grayImage, imageSize / 3);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device grayImage (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
timer.Stop();
printf("Time to allocate memory on the device is: %f msecs.\n", timer.Elapsed());
//Copy the rgb image from the host to the device and record the needed time
GpuTimer timer1;
timer1.Start();
//@@ Insert your code here to Copy the rgb image from the host to the device
hipMemcpy(d_rgbImage, h_rgbImage, imageSize, hipMemcpyHostToDevice);
timer1.Stop();
printf("Time to copy the RGB image from the host to the device is: %f msecs.\n", timer1.Elapsed());
//Do the Processing on the GPU
//Kernel Execution Configuration Parameters
dim3 dimBlock(16, 16, 1);
//@@ Insert Your code Here for grid dimensions
dim3 gridDim((imageWidth - 1) / 16 + 1, (imageHeight - 1) / 16 + 1, 1);
//Invoke the colorToGrayscaleConversion kernel and record the needed time for its execution
//GpuTimer timer;
GpuTimer timer2;
timer2.Start();
//@@ Insert your code here for kernel invocation
colorToGrayscaleConversion << < gridDim, dimBlock >> > (d_grayImage, d_rgbImage, imageWidth, imageHeight, imageChannels);
timer2.Stop();
printf("Implemented CUDA code ran in: %f msecs.\n", timer2.Elapsed());
//Copy resulting gray image from device to host and record the needed time
GpuTimer timer3;
timer3.Start();
//@@ Insert your code here to Copy resulting gray image from device to host
float imageSize_gray = imageHeight * imageWidth;
hipMemcpy(h_grayImage, d_grayImage, imageSize_gray, hipMemcpyDeviceToHost);
timer3.Stop();
printf("Time to copy the Gray image from the device to the host is: %f msecs.\n", timer3.Elapsed());
//Do the Processing on the CPU
clock_t begin = clock();
//@@ Insert your code her to call the cpu function for colortograyscale conversion on the CPU
rgbaToGrayscaleCpu(h_rgbImage, h_grayImage_CPU, imageWidth, imageHeight, imageChannels);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000;
printf("Implemented CPU code ran in: %f msecs.\n", time_spent);
//Postprocess and Display the resulting images using OpenCV
Mat Image1(imageHeight, imageWidth, CV_8UC1, h_grayImage); //grayscale image mat object
Mat Image2(imageHeight, imageWidth, CV_8UC1, h_grayImage_CPU); //grayscale image mat object
namedWindow("CPUImage", WINDOW_NORMAL); //Create window to display the image
namedWindow("GPUImage", WINDOW_NORMAL);
imshow("GPUImage", Image1);
imshow("CPUImage", Image2); //Display the image in the window
waitKey(0); //Wait till you press a key
//Free host memory
image.release();
Image1.release();
Image2.release();
free(h_grayImage);
free(h_grayImage_CPU);
//Free device memory
//@@ Insert your code here to free device memory
free(d_grayImage);
free(d_rgbImage);
return 0;
} | 6f6e930a0fce164742a42476ab847e8d8152ebe2.cu | #include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/opencv.hpp>
#include <iostream>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "GpuTimer.h"
#include<time.h>
using namespace cv;
using namespace std;
//In OpenCV the image is read in BGR format, that is for each pixel, the Blue, Green, then Red components are read from the image file.
// Serial implementation for running on CPU using a single thread.
void rgbaToGrayscaleCpu(unsigned char* rgbImage, unsigned char* grayImage, int numRows, int numCols, int Channels)
{
//@@ Insert your code here
for (int i = 0; i < numRows; i++) {
for (int j = 0; j < numCols; j++) {
int grayOffset = numCols*i + j;
int rgbOffset = grayOffset * 3;
unsigned char b = rgbImage[rgbOffset];
unsigned char g = rgbImage[rgbOffset + 1];
unsigned char r = rgbImage[rgbOffset + 2];
grayImage[grayOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
}
}
}
// we have 3 channels corresponding to B, G, and R components of each pixel
// The input image is encoded as unsigned characters [0, 255]
__global__ void colorToGrayscaleConversion(unsigned char * Pout, unsigned
char * Pin, int width, int height, int numChannels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height) {
int grayOffset = y*width + x;
int rgbOffset = grayOffset * 3;
unsigned char b = Pin[rgbOffset];
unsigned char g = Pin[rgbOffset + 1];
unsigned char r = Pin[rgbOffset + 2];
Pout[grayOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
}
}
int main(void)
{
cudaError_t err = cudaSuccess;
//Read the image using OpenCV
Mat image; //Create matrix to read iamge
image = imread("Scene.jpeg", IMREAD_COLOR);
if (image.empty()) {
printf("Cannot read image file %s", "Scene.jpeg");
exit(1);
}
int imageChannels = 3;
int imageWidth = image.cols;
int imageHeight = image.rows;
//Allocate the host image vectors
unsigned char *h_rgbImage = (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight*imageChannels);
unsigned char *h_grayImage = (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight);
unsigned char *h_grayImage_CPU = (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight);
h_rgbImage = image.data; //The data member of a Mat object returns the pointer to the first row, first column of the image.
//try image.ptr()
//Allocate memory on the device for the rgb image and the grayscale image and record the needed time
unsigned char *d_rgbImage = NULL;
unsigned char *d_grayImage = NULL;
float imageSize = imageHeight * imageWidth * imageChannels;
GpuTimer timer;
timer.Start();
//@@ Insert Your code Here to allocate memory on the device for color and gray images
err = cudaMalloc((void **)&d_rgbImage, imageSize);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device rgbImage (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_grayImage, imageSize / 3);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device grayImage (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
timer.Stop();
printf("Time to allocate memory on the device is: %f msecs.\n", timer.Elapsed());
//Copy the rgb image from the host to the device and record the needed time
GpuTimer timer1;
timer1.Start();
//@@ Insert your code here to Copy the rgb image from the host to the device
cudaMemcpy(d_rgbImage, h_rgbImage, imageSize, cudaMemcpyHostToDevice);
timer1.Stop();
printf("Time to copy the RGB image from the host to the device is: %f msecs.\n", timer1.Elapsed());
//Do the Processing on the GPU
//Kernel Execution Configuration Parameters
dim3 dimBlock(16, 16, 1);
//@@ Insert Your code Here for grid dimensions
dim3 gridDim((imageWidth - 1) / 16 + 1, (imageHeight - 1) / 16 + 1, 1);
//Invoke the colorToGrayscaleConversion kernel and record the needed time for its execution
//GpuTimer timer;
GpuTimer timer2;
timer2.Start();
//@@ Insert your code here for kernel invocation
colorToGrayscaleConversion << < gridDim, dimBlock >> > (d_grayImage, d_rgbImage, imageWidth, imageHeight, imageChannels);
timer2.Stop();
printf("Implemented CUDA code ran in: %f msecs.\n", timer2.Elapsed());
//Copy resulting gray image from device to host and record the needed time
GpuTimer timer3;
timer3.Start();
//@@ Insert your code here to Copy resulting gray image from device to host
float imageSize_gray = imageHeight * imageWidth;
cudaMemcpy(h_grayImage, d_grayImage, imageSize_gray, cudaMemcpyDeviceToHost);
timer3.Stop();
printf("Time to copy the Gray image from the device to the host is: %f msecs.\n", timer3.Elapsed());
//Do the Processing on the CPU
clock_t begin = clock();
//@@ Insert your code her to call the cpu function for colortograyscale conversion on the CPU
rgbaToGrayscaleCpu(h_rgbImage, h_grayImage_CPU, imageWidth, imageHeight, imageChannels);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000;
printf("Implemented CPU code ran in: %f msecs.\n", time_spent);
//Postprocess and Display the resulting images using OpenCV
Mat Image1(imageHeight, imageWidth, CV_8UC1, h_grayImage); //grayscale image mat object
Mat Image2(imageHeight, imageWidth, CV_8UC1, h_grayImage_CPU); //grayscale image mat object
namedWindow("CPUImage", WINDOW_NORMAL); //Create window to display the image
namedWindow("GPUImage", WINDOW_NORMAL);
imshow("GPUImage", Image1);
imshow("CPUImage", Image2); //Display the image in the window
waitKey(0); //Wait till you press a key
//Free host memory
image.release();
Image1.release();
Image2.release();
free(h_grayImage);
free(h_grayImage_CPU);
//Free device memory
//@@ Insert your code here to free device memory
free(d_grayImage);
free(d_rgbImage);
return 0;
} |
51a7a481760c24857cf848d09b0d887c0cd95839.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
#define TPB 32
__device__ float scale(int i, int n) {
return ((float)i / (n - 1));
}
__device__ float distance(float x1, float x2) {
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float* d_out, float ref, int len) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = scale(i, len);
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to ref is %f\n", i, x, d_out[i]);
}
int main() {
const float ref = 0.5;
float* d_out = NULL;
hipMalloc(&d_out, N * sizeof(float));
hipLaunchKernelGGL(( distanceKernel), dim3(N/TPB), dim3(TPB), 0, 0, d_out, ref, N);
hipFree(d_out);
return 0;
}
| 51a7a481760c24857cf848d09b0d887c0cd95839.cu | #include <stdio.h>
#define N 64
#define TPB 32
__device__ float scale(int i, int n) {
return ((float)i / (n - 1));
}
__device__ float distance(float x1, float x2) {
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float* d_out, float ref, int len) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = scale(i, len);
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to ref is %f\n", i, x, d_out[i]);
}
int main() {
const float ref = 0.5;
float* d_out = NULL;
cudaMalloc(&d_out, N * sizeof(float));
distanceKernel<<<N/TPB, TPB>>>(d_out, ref, N);
cudaFree(d_out);
return 0;
}
|
151c2e96ab52fa439b2468231c7f23efa59bdb82.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/cudart_utils.h>
#include <gtest/gtest.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/datasets/make_regression.hpp>
#include <cuml/ensemble/randomforest.hpp>
#include <metrics/scores.cuh>
namespace ML {
using namespace MLCommon;
struct RfInputs {
int n_rows;
int n_cols;
int n_trees;
float max_features;
float max_samples;
int max_depth;
int max_leaves;
bool bootstrap;
bool bootstrap_features;
int n_bins;
int split_algo;
int min_samples_leaf;
int min_samples_split;
float min_impurity_decrease;
int n_streams;
CRITERION split_criterion;
float min_expected_acc;
};
template <typename T>
class RFBatchedRegTest : public ::testing::TestWithParam<RfInputs> {
protected:
void basicTest() {
params = ::testing::TestWithParam<RfInputs>::GetParam();
RF_params rf_params;
rf_params = set_rf_params(
params.max_depth, params.max_leaves, params.max_features, params.n_bins,
params.split_algo, params.min_samples_leaf, params.min_samples_split,
params.min_impurity_decrease, params.bootstrap_features, params.bootstrap,
params.n_trees, params.max_samples, 0, params.split_criterion, false,
params.n_streams, true, 128);
CUDA_CHECK(hipStreamCreate(&stream));
handle.reset(new raft::handle_t(rf_params.n_streams));
handle->set_stream(stream);
auto allocator = handle->get_device_allocator();
int data_len = params.n_rows * params.n_cols;
data = (T *)allocator->allocate(data_len * sizeof(T), stream);
data_row_major = (T *)allocator->allocate(data_len * sizeof(T), stream);
labels = (T *)allocator->allocate(params.n_rows * sizeof(T), stream);
predicted_labels =
(T *)allocator->allocate(params.n_rows * sizeof(T), stream);
Datasets::make_regression(*handle, data_row_major, labels, params.n_rows,
params.n_cols, params.n_cols, nullptr, 1, 0.0f,
-1, 0.0, 0.0f, false, 3536699ULL);
hipblasHandle_t cublas_h = handle->get_cublas_handle();
raft::linalg::transpose(*handle, data_row_major, data, params.n_cols,
params.n_rows, stream);
// Training part
forest = new typename ML::RandomForestMetaData<T, T>;
null_trees_ptr(forest);
fit(*handle, forest, data, params.n_rows, params.n_cols, labels, rf_params);
// predict function expects row major lay out of data, so we need to
// transpose the data first
predict(*handle, forest, data_row_major, params.n_rows, params.n_cols,
predicted_labels);
accuracy = Score::r2_score(predicted_labels, labels, params.n_rows, stream);
}
void SetUp() override { basicTest(); }
void TearDown() override {
auto allocator = handle->get_device_allocator();
allocator->deallocate(data, params.n_rows * params.n_cols * sizeof(T),
stream);
allocator->deallocate(data_row_major,
params.n_rows * params.n_cols * sizeof(T), stream);
allocator->deallocate(labels, params.n_rows * sizeof(T), stream);
allocator->deallocate(predicted_labels, params.n_rows * sizeof(T), stream);
delete forest;
handle.reset();
}
protected:
std::shared_ptr<raft::handle_t> handle;
hipStream_t stream;
RfInputs params;
RandomForestMetaData<T, T> *forest;
float accuracy = -1.0f; // overriden in each test SetUp and TearDown
T *data, *data_row_major;
T *labels, *predicted_labels;
};
//-------------------------------------------------------------------------------------------------------------------------------------
const std::vector<RfInputs> inputs = {
// Small datasets to repro corner cases as in #3107 (test for crash)
{100, 29, 1, 1.0f, 1.0f, 2, -1, false, false, 16, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 2, 0.0, 2, CRITERION::MAE, -10.0},
{100, 57, 2, 1.0f, 1.0f, 2, -1, false, false, 16, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 2, 0.0, 2, CRITERION::MAE, -10.0},
{101, 57, 2, 1.0f, 1.0f, 2, -1, false, false, 13, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 2, 0.0, 2, CRITERION::MSE, -10.0},
{100, 1, 2, 1.0f, 1.0f, 2, -1, false, false, 13, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 2, 0.0, 2, CRITERION::MAE, -10.0},
// Larger datasets for accuracy
{1000, 10, 10, 1.0f, 1.0f, 12, -1, true, false, 10,
SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MAE, 0.7f},
{2000, 20, 20, 1.0f, 0.6f, 13, -1, true, false, 10,
SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MSE, 0.68f}};
typedef RFBatchedRegTest<float> RFBatchedRegTestF;
TEST_P(RFBatchedRegTestF, Fit) { ASSERT_GT(accuracy, params.min_expected_acc); }
INSTANTIATE_TEST_CASE_P(RFBatchedRegTests, RFBatchedRegTestF,
::testing::ValuesIn(inputs));
typedef RFBatchedRegTest<double> RFBatchedRegTestD;
TEST_P(RFBatchedRegTestD, Fit) { ASSERT_GT(accuracy, params.min_expected_acc); }
INSTANTIATE_TEST_CASE_P(RFBatchedRegTests, RFBatchedRegTestD,
::testing::ValuesIn(inputs));
} // end namespace ML
| 151c2e96ab52fa439b2468231c7f23efa59bdb82.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/cudart_utils.h>
#include <gtest/gtest.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/datasets/make_regression.hpp>
#include <cuml/ensemble/randomforest.hpp>
#include <metrics/scores.cuh>
namespace ML {
using namespace MLCommon;
struct RfInputs {
int n_rows;
int n_cols;
int n_trees;
float max_features;
float max_samples;
int max_depth;
int max_leaves;
bool bootstrap;
bool bootstrap_features;
int n_bins;
int split_algo;
int min_samples_leaf;
int min_samples_split;
float min_impurity_decrease;
int n_streams;
CRITERION split_criterion;
float min_expected_acc;
};
template <typename T>
class RFBatchedRegTest : public ::testing::TestWithParam<RfInputs> {
protected:
void basicTest() {
params = ::testing::TestWithParam<RfInputs>::GetParam();
RF_params rf_params;
rf_params = set_rf_params(
params.max_depth, params.max_leaves, params.max_features, params.n_bins,
params.split_algo, params.min_samples_leaf, params.min_samples_split,
params.min_impurity_decrease, params.bootstrap_features, params.bootstrap,
params.n_trees, params.max_samples, 0, params.split_criterion, false,
params.n_streams, true, 128);
CUDA_CHECK(cudaStreamCreate(&stream));
handle.reset(new raft::handle_t(rf_params.n_streams));
handle->set_stream(stream);
auto allocator = handle->get_device_allocator();
int data_len = params.n_rows * params.n_cols;
data = (T *)allocator->allocate(data_len * sizeof(T), stream);
data_row_major = (T *)allocator->allocate(data_len * sizeof(T), stream);
labels = (T *)allocator->allocate(params.n_rows * sizeof(T), stream);
predicted_labels =
(T *)allocator->allocate(params.n_rows * sizeof(T), stream);
Datasets::make_regression(*handle, data_row_major, labels, params.n_rows,
params.n_cols, params.n_cols, nullptr, 1, 0.0f,
-1, 0.0, 0.0f, false, 3536699ULL);
cublasHandle_t cublas_h = handle->get_cublas_handle();
raft::linalg::transpose(*handle, data_row_major, data, params.n_cols,
params.n_rows, stream);
// Training part
forest = new typename ML::RandomForestMetaData<T, T>;
null_trees_ptr(forest);
fit(*handle, forest, data, params.n_rows, params.n_cols, labels, rf_params);
// predict function expects row major lay out of data, so we need to
// transpose the data first
predict(*handle, forest, data_row_major, params.n_rows, params.n_cols,
predicted_labels);
accuracy = Score::r2_score(predicted_labels, labels, params.n_rows, stream);
}
void SetUp() override { basicTest(); }
void TearDown() override {
auto allocator = handle->get_device_allocator();
allocator->deallocate(data, params.n_rows * params.n_cols * sizeof(T),
stream);
allocator->deallocate(data_row_major,
params.n_rows * params.n_cols * sizeof(T), stream);
allocator->deallocate(labels, params.n_rows * sizeof(T), stream);
allocator->deallocate(predicted_labels, params.n_rows * sizeof(T), stream);
delete forest;
handle.reset();
}
protected:
std::shared_ptr<raft::handle_t> handle;
cudaStream_t stream;
RfInputs params;
RandomForestMetaData<T, T> *forest;
float accuracy = -1.0f; // overriden in each test SetUp and TearDown
T *data, *data_row_major;
T *labels, *predicted_labels;
};
//-------------------------------------------------------------------------------------------------------------------------------------
const std::vector<RfInputs> inputs = {
// Small datasets to repro corner cases as in #3107 (test for crash)
{100, 29, 1, 1.0f, 1.0f, 2, -1, false, false, 16, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 2, 0.0, 2, CRITERION::MAE, -10.0},
{100, 57, 2, 1.0f, 1.0f, 2, -1, false, false, 16, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 2, 0.0, 2, CRITERION::MAE, -10.0},
{101, 57, 2, 1.0f, 1.0f, 2, -1, false, false, 13, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 2, 0.0, 2, CRITERION::MSE, -10.0},
{100, 1, 2, 1.0f, 1.0f, 2, -1, false, false, 13, SPLIT_ALGO::GLOBAL_QUANTILE,
2, 2, 0.0, 2, CRITERION::MAE, -10.0},
// Larger datasets for accuracy
{1000, 10, 10, 1.0f, 1.0f, 12, -1, true, false, 10,
SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MAE, 0.7f},
{2000, 20, 20, 1.0f, 0.6f, 13, -1, true, false, 10,
SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MSE, 0.68f}};
typedef RFBatchedRegTest<float> RFBatchedRegTestF;
TEST_P(RFBatchedRegTestF, Fit) { ASSERT_GT(accuracy, params.min_expected_acc); }
INSTANTIATE_TEST_CASE_P(RFBatchedRegTests, RFBatchedRegTestF,
::testing::ValuesIn(inputs));
typedef RFBatchedRegTest<double> RFBatchedRegTestD;
TEST_P(RFBatchedRegTestD, Fit) { ASSERT_GT(accuracy, params.min_expected_acc); }
INSTANTIATE_TEST_CASE_P(RFBatchedRegTests, RFBatchedRegTestD,
::testing::ValuesIn(inputs));
} // end namespace ML
|
790924e5cf44b4be8c3e4c1ae5be01dd558aaac7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Author: W. Evan Durno
* Written April 2016
* All rights reserved
*/
#include <stdio.h>
#include <float.h>
////////////////////////////////////////////////////////////////////////////////////// external facing headers
extern "C" {
// GPU-accelerated multivariate regression with a factor model and floor student-t marginals via gaussian copula
// all matrices are stores in column-major order
// y : n X p matrix, counts data to be regressed
// x : n X q matrix, design matrix
// t : tn X K matrix, parameters, each column is a different parameter
// specifically, the order of parameters in one t column is beta_{q X p} , lsig_{p} , lnu_{p} , l_{p X m} , lpsi_{p} , always column-major order
// m : the number of factors
// p is assumed > 32, otherwise use of this software is not motivated
// returns out : K-length vector of sample likelihoods
void log_lik_cu ( float *y , float *x , float *t , int n , int p , int q , int tn , int K , int m , int n_iter , int seed , float *out ) ;
} // end extern "C"
////////////////////////////////////////////////////////////////////////////////////// local headers
// stores device locations of major parameter components for the first parameter t
// later elements can be accessed via column shifts
struct params
{
float *beta ;
float *lsig ;
float *lnu ;
float *l ;
float *lpsi ;
};
// Extracts parameters elements from a single parameter vector
void construct_params ( float *t , struct params *a , int p , int q , int m ) ;
// NOT USED
// constructs I + L' inv(Psi) L
// out : m X m X K matrix
// threads : m X m X K
__global__ void f2_make_inner_sig ( float *l , float *psi , float *out , int p , int m , int K , int tn ) ;
// constructs F_M( y )
// mode : in { 1 , 2 , 3 } , indicating different parts of the argument domain
// plus : an amount to add to y, either 0 or 1
// out : n X p X K matrix
// threads : n X p X K
__global__ void f3_marginal_cdf ( float *y , float *x , float *beta , float *lsig , float *lnu , float *out , int n , int p , int q , int K , int tn , int mode , float plus ) ;
// constructs diag( L L' + Psi )
// out : p X K matrix
// threads : p X K
__global__ void f4_diag_sig ( float *l , float *psi , float *out , int p , int m , int K , int tn ) ;
// NOT USED
// constructs log det( L L' + Psi )
// out : K-length vector of matrix determinants
__global__ void f5_ldet ( float *lpsi , float **ri , float *out , int m , int p , int K , int tn ) ;
// NOT_USD
// constructs inv( I + L' inv(Psi) L )
// in : product of f2
// w : 4 X m X m X K matrix, working space
// out : m X m X K matrix in out , but also m X m X K matrix in r
// threads : K
__global__ void f6_inv_inner_sig ( float *f2 , float *out , int m , int K , float *w , float **r ) ;
// constructs log f_M(y)
// in : f3( y ) , f3( y+1 )
// out : n X p X K matrix
// threads : n X p X K
__global__ void f7_lmarginal_pdf ( float *f3 , float *f31 , float *out , int n , int p , int K ) ;
// NOT USED
// constructs inv(Psi) - inv(Psi) L inv( I + L' inv(Psi) L ) L' inv(Psi) = inv(Sigma)
// in : product of f6
// out : p X p X K matrix
// threads : p X p X K
__global__ void f8_inv_sig ( float *f6 , float *l , float *lpsi , float *out , int p , int m , int K , int tn ) ;
// constructs Sum_j log f_M( y_{ij} )
// in : product of f7
// out : n X K matrix
// threads n X K
__global__ void f9_sum_lpdfs ( float *f7 , float *out , int n , int p , int K ) ;
// constructs F_N^{-1}( F_M( y+1 ) )
// in : f3( y ) and product of f4
// out : n X p X K matrix
// threads : n X p X K
__global__ void f10_F_N_inv ( float *f3 , float *f4 , float *out , int n , int p , int K ) ;
// constructs log f_{N_p} ( f10 )
// out : n X K matrix
// threads : n X K
__global__ void f11_lpmnorm ( float *f10 , float *f101 , float *l , float *lpsi , float *out , int n , int p , int m , int K , size_t seed , int n_iter , float *w ) ;
// constructs log_lik( t ; y , x )
// out : K-length vector
// threads : K
__global__ void f12_sum_log_likes ( float *f11 , float *f9 , float *out , int n , int K ) ;
////////////////////////////////////////////////////////////////////////////////////// host implementations
void log_lik_cu ( float *y , float *x , float *t , int n , int p , int q , int tn , int K , int m , int n_iter , int seed , float *out )
{
size_t n_cores = 32 ;
if( n_cores * 65536 < max( p*p*K , n*p*K ) )
n_cores = max( p*p*K , n*p*K )/65536 + 1 ;
if( n_cores >= 1024 )
n_cores = 1023 ;
if( n_cores * 65536 < max( p*p*K , n*p*K ) )
fprintf( stderr , "CUDA WARNING: insufficient threads!\n" ) ;
hipError_t status = hipSuccess ;
// initialize streams
// s[0] : NOT USED
// s[1] : NOT USED
// s[2] : f7 - f9, f7 requires s[7:12]
// s[3] : f10(y), f10 requires s[7:9] as well as f4 via event[6]
// s[4] : f4 - f10(y+1), f10 requires s[10:12]
// s[5] : f11 - f12, f11 requires s[0], s[1], s[3], s[4], and f12 requires s[2]
// s[6] : NOT USED
// s[7] : f3(y,1)
// s[8] : f3(y,2)
// s[9] : f3(y,3)
// s[10] : f3(y+1,1)
// s[11] : f3(y+1,2)
// s[12] : f3(y+1,3)
hipStream_t stream[13] ;
int i ;
for( i = 0 ; i < 13 && status == hipSuccess ; i++ )
status = hipStreamCreate( stream + i ) ;
// initialize events
// event[i] marks the completion of stream[i]
// except event[6] which marks the completion of f4 in s[4]
hipEvent_t event[13] ;
for( i = 0 ; i < 13 && status == hipSuccess ; i++ )
status = hipEventCreate( event + i ) ;
// represent device memory
float *d_y = NULL ;
float *d_x = NULL ;
float *d_t = NULL ;
float *d_f3 = NULL ;
float *d_f31 = NULL ;
float *d_f4 = NULL ;
float **d_ri = NULL ;
float *d_f7 = NULL ;
float *d_f9 = NULL ;
float *d_f10 = NULL ;
float *d_f101 = NULL ;
float *d_f11 = NULL ;
float *d_f11w = NULL ;
float *d_f12 = NULL ;
// allocate device memory
if( status == hipSuccess )
status = hipMalloc( &d_y , n * p * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_x , n * q * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_t , tn * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f3 , n * p * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f31 , n * p * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f4 , p * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_ri , K * sizeof(float*) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f7 , n * p * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f9 , n * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f10 , n * p * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f101 , n * p * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f11 , n * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f11w , m * n * K * sizeof(float) ) ;
if( status == hipSuccess )
status = hipMalloc( &d_f12 , K * sizeof(float) ) ;
// populate device memory
if( status == hipSuccess )
status = hipMemcpy( d_y , y , n * p * sizeof(float) , hipMemcpyHostToDevice ) ;
if( status == hipSuccess )
status = hipMemcpy( d_x , x , n * q * sizeof(float) , hipMemcpyHostToDevice ) ;
if( status == hipSuccess )
status = hipMemcpy( d_t , t , tn * K * sizeof(float) , hipMemcpyHostToDevice ) ;
size_t free_mem , total_mem ;
if( status == hipSuccess )
status = hipMemGetInfo( &free_mem, &total_mem ) ;
if( status == hipSuccess )
fprintf( stderr , "Device free mem: %lu, total mem: %lu, remaining: %f%%\n" , free_mem , total_mem , ((float) free_mem)/((float) total_mem) ) ;
// identify parameter elements
struct params a ;
construct_params ( d_t , &a , p , q , m ) ;
// count threads
size_t t3 = n * p * K ;
size_t t4 = p * K ;
size_t t7 = n * p * K ;
size_t t9 = n * K ;
size_t t10 = n * p * K ;
size_t t11 = n * K ;
size_t t12 = K ;
///////////////// run and schedule kernels
// stream[7:9]
if( status == hipSuccess )
hipLaunchKernelGGL(( f3_marginal_cdf) , dim3(t3 / n_cores + 1) , dim3(n_cores) , 0 , stream[7] , d_y , d_x , a.beta , a.lsig , a.lnu , d_f3 , n , p , q , K , tn , 1 , 0.0f ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f3_marginal_cdf) , dim3(t3 / n_cores + 1) , dim3(n_cores) , 0 , stream[8] , d_y , d_x , a.beta , a.lsig , a.lnu , d_f3 , n , p , q , K , tn , 2 , 0.0f ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f3_marginal_cdf) , dim3(t3 / n_cores + 1) , dim3(n_cores) , 0 , stream[9] , d_y , d_x , a.beta , a.lsig , a.lnu , d_f3 , n , p , q , K , tn , 3 , 0.0f ) ;
if( status == hipSuccess )
status = hipEventRecord( event[7] , stream[7] ) ;
if( status == hipSuccess )
status = hipEventRecord( event[8] , stream[8] ) ;
if( status == hipSuccess )
status = hipEventRecord( event[9] , stream[9] ) ;
// stream[10:12]
if( status == hipSuccess )
hipLaunchKernelGGL(( f3_marginal_cdf) , dim3(t3 / n_cores + 1) , dim3(n_cores) , 0 , stream[10] , d_y , d_x , a.beta , a.lsig , a.lnu , d_f31 , n , p , q , K , tn , 1 , 1.0f ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f3_marginal_cdf) , dim3(t3 / n_cores + 1) , dim3(n_cores) , 0 , stream[11] , d_y , d_x , a.beta , a.lsig , a.lnu , d_f31 , n , p , q , K , tn , 2 , 1.0f ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f3_marginal_cdf) , dim3(t3 / n_cores + 1) , dim3(n_cores) , 0 , stream[12] , d_y , d_x , a.beta , a.lsig , a.lnu , d_f31 , n , p , q , K , tn , 3 , 1.0f ) ;
if( status == hipSuccess )
status = hipEventRecord( event[10] , stream[10] ) ;
if( status == hipSuccess )
status = hipEventRecord( event[11] , stream[11] ) ;
if( status == hipSuccess )
status = hipEventRecord( event[12] , stream[12] ) ;
// stream[2]
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[2] , event[7] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[2] , event[8] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[2] , event[9] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[2] , event[10] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[2] , event[11] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[2] , event[12] , 0 ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f7_lmarginal_pdf) , dim3(t7 / n_cores + 1) , dim3(n_cores) , 0 , stream[2] , d_f3 , d_f31 , d_f7 , n , p , K ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f9_sum_lpdfs) , dim3(t9 / n_cores + 1) , dim3(n_cores) , 0, 0, d_f7 , d_f9 , n , p , K ) ;
if( status == hipSuccess )
status = hipEventRecord( event[2] , stream[2] ) ;
// stream[4]
if( status == hipSuccess )
hipLaunchKernelGGL(( f4_diag_sig) , dim3(t4 / n_cores + 1) , dim3(n_cores) , 0 , stream[4] , a.l , a.lpsi , d_f4 , p , m , K , tn ) ;
if( status == hipSuccess )
status = hipEventRecord( event[6] , stream[4] ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[4] , event[10] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[4] , event[11] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[4] , event[12] , 0 ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f10_F_N_inv) , dim3(t10 / n_cores + 1) , dim3(n_cores) , 0 , stream[4] , d_f31 , d_f4 , d_f101 , n , p , K ) ;
if( status == hipSuccess )
status = hipEventRecord( event[4] , stream[4] ) ;
// stream[3]
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[3] , event[6] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[3] , event[7] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[3] , event[8] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[3] , event[9] , 0 ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f10_F_N_inv) , dim3(t10 / n_cores + 1) , dim3(n_cores) , 0 , stream[3] , d_f3 , d_f4 , d_f10 , n , p , K ) ;
if( status == hipSuccess )
status = hipEventRecord( event[3] , stream[3] ) ;
// stream[5]
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[5] , event[3] , 0 ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[5] , event[4] , 0 ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f11_lpmnorm) , dim3(t11 / n_cores + 1) , dim3(n_cores) , 0 , stream[5] , d_f10 , d_f101 , a.l , a.lpsi , d_f11 , n , p , m , K , seed , n_iter , d_f11w ) ;
if( status == hipSuccess )
status = hipStreamWaitEvent( stream[5] , event[2] , 0 ) ;
if( status == hipSuccess )
hipLaunchKernelGGL(( f12_sum_log_likes) , dim3(t12 / n_cores + 1) , dim3(n_cores) , 0, 0, d_f11 , d_f9 , d_f12 , n , K ) ;
///////////////// finished running and scheduling kernels
// extract data from device
if( status == hipSuccess )
status = hipMemcpy( out , d_f12 , K * sizeof(float) , hipMemcpyDeviceToHost ) ;
// delete streams
for( i = 0 ; i < 13 && status == hipSuccess ; i++ )
status = hipStreamDestroy( stream[i] ) ;
// delete events
for( i = 0 ; i < 13 && status == hipSuccess ; i++ )
status = hipEventDestroy( event[i] ) ;
// check for errors
if( status != hipSuccess )
fprintf( stderr , "CUDA ERROR: %s\n" , hipGetErrorString(status) ) ;
// free device memory
hipFree( d_y ) ;
hipFree( d_x ) ;
hipFree( d_t ) ;
hipFree( d_f3 ) ;
hipFree( d_f31 ) ;
hipFree( d_f4 ) ;
hipFree( d_ri ) ;
hipFree( d_f7 ) ;
hipFree( d_f9 ) ;
hipFree( d_f10 ) ;
hipFree( d_f101 ) ;
hipFree( d_f11 ) ;
hipFree( d_f12 ) ;
hipFree( d_f11w ) ;
return ;
}
void construct_params ( float *t , struct params *a , int p , int q , int m )
{
a->beta = t ;
a->lsig = t + p*q ;
a->lnu = t + p*(q + 1) ;
a->l = t + p*(q + 2) ;
a->lpsi = t + p*(q + 2 + m) ;
}
////////////////////////////////////////////////////////////////////////////////////// device implementations
__global__ void f2_make_inner_sig ( float *l , float *psi , float *out , int p , int m , int K , int tn )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= m * m * K )
return ;
int k = i / (m*m) ;
int col = (i - k*m*m) / m ;
int row = i - k*m*m - col*m ;
out += i ;
*out = 0.0f ;
for( i = 0 ; i < p ; i++ )
*out += expf( -psi[ k*tn + i ]) * l[ k*tn + p*row + i ] * l[ k*tn + p*col + i ] ;
if( row == col )
*out += 1.0f ;
}
__device__ double beta( float a , float b )
{
return exp( lgammaf(a) + lgamma(b) - lgamma(a+b) ) ;
}
__device__ double beta_sr ( float x , float a , float b , float i )
{
return expf( (a+i)*logf(x) + b*logf(1-x) - logf(a+i) + lgammaf(a+i+b) - lgammaf(a+i) - lgammaf(b) ) ;
}
__device__ void pt_beta_reductions ( float *x , float nu )
{
int flag = ( (*x)*(*x) > nu )? 1 : 0 ;
float y = ( flag )? nu/( (*x)*(*x) + nu) : 1.0f - 1.0f/( 1.0f + ((*x)/nu)*(*x) ) ;
float a = ( flag )? 0.5f*nu : 0.5f ;
float b = ( flag )? 0.5f : 0.5f*nu ;
float out = 0.0f ;
int i = 0 ;
for( i = 0 ; i < 20 ; i++ )
{
out += beta_sr( y , a , b , (float) i ) ;
}
out = ( flag )? 1.0f - 0.5f*out : 1.0f - 0.5f*(1.0f - out) ;
out = ( *x < 0.0 )? 1.0f - out : out ;
*x = out ;
}
__device__ void pt_alg395 ( float *x , float nu )
{
float t = (*x) * (*x) ;
float y = t/nu ;
float b = 1.0f + y ;
y = ( y > 1e-6f )? logf(b) : y ;
float a = nu - 0.5 ;
b = 48.0f * a * a ;
y = a * y ;
y = (((((-0.4f*y-3.3f)*y - 24.0f)*y - 85.5f)/(0.8f*y*y + 100.0f + b) + y + 3.0f)/b + 1.0f)*sqrtf(y) ;
y = normcdff( y ) ;
*x = ( *x > 0.0f )? y : 1.0f - y ;
}
__device__ void pt_normal ( float *x )
{
*x = normcdff( *x ) ;
}
__global__ void f3_marginal_cdf ( float *y , float *x , float *beta , float *lsig , float *lnu , float *out , int n , int p , int q , int K , int tn , int mode , float plus )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n * p * K )
return ;
int k = i / (p*n) ;
int col = (i - k*p*n) / n ; // dimension
int row = i - k*p*n - col*n ; // sample
if( mode == 1 )
{
if( expf( lnu[ tn*k + col ] ) <= 1e5f ) // must satisfy to proceed: nu > 1e5f
return ;
y += n*col + row ;
out += i ;
if( *y + plus <= 0.0f ) // if y is zero, all forms of f3 return 0.0f. We do this under the 'mode ==' test to avoid write collisions.
{
*out = 0.0f ;
return ;
}
float sig = expf( lsig[ tn*k + col ] ) ;
float mu = 0.0f ;
for( i = 0 ; i < q ; i++ )
mu += x[ i*n + row ] * beta[ k*tn + col*q + i ] ;
*out = logf((*y) + plus)/sig - mu ;
pt_normal( out ) ;
}
if( mode == 2 )
{
float nu = expf( lnu[ tn*k + col ] ) ;
if( nu > 1e5f ) // must satisfy to proceed: nu <= 1e5f
return ;
y += n*col + row ;
float sig = expf( lsig[ tn*k + col ] ) ;
float mu = 0.0f ;
int j ;
for( j = 0 ; j < q ; j++ )
mu += x[ j*n + row ] * beta[ k*tn + col*q + j ] ;
float tmp = logf((*y) + plus)/sig - mu ;
if( ! ( nu > 200.0f || ( tmp*tmp < nu && nu > 4.0f ) ) )
return ;
out += i ;
if( *y + plus <= 0.0f )
{
*out = 0.0f ;
return ;
}
*out = tmp ;
pt_alg395 ( out , nu ) ;
}
if( mode == 3 )
{
float nu = expf( lnu[ tn*k + col ] ) ;
if( nu > 1e5f )
return ;
y += n*col + row ;
float sig = expf( lsig[ tn*k + col ] ) ;
float mu = 0.0f ;
int j ;
for( j = 0 ; j < q ; j++ )
mu += x[ j*n + row ] * beta[ k*tn + col*q + j ] ;
float tmp = logf((*y) + plus)/sig - mu ;
if( nu > 200.0f || ( tmp*tmp < nu && nu > 4.0f ) )
return ;
out += i ;
if( *y + plus <= 0.0f )
{
*out = 0.0f ;
return ;
}
*out = tmp ;
pt_beta_reductions ( out , nu ) ;
}
}
__global__ void f4_diag_sig ( float *l , float *psi , float *out , int p , int m , int K , int tn )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= p * K )
return ;
int k = i / p ;
int row = i - k*p ; // dimension
out += i ; // row-th row in the k-th vector
*out = expf( psi[ k*tn + row ] ) ;
for( i = 0 ; i < m ; i++ )
*out += l[ k*tn + i*p + row ] * l[ k*tn + i*p + row ] ;
}
__global__ void f5_ldet ( float *lpsi , float **ri , float *out , int m , int p , int K , int tn )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= K )
return ;
int k = i ;
float *invR = ri[i] ;
out += i ;
*out = 0.0f ;
for( i = 0 ; i < p ; i++ )
*out += lpsi[ k*tn + i ] ; // returns on log scale
for( i = 0 ; i < m ; i++ )
*out -= logf( fabs(invR[ i*m + i ]) ) ; // algebraically correct and numerically stable (avoids squaring)
}
__device__ void copyFloats ( float *dest , float *src , int n )
{
float *fin = dest + n ;
while( dest < fin )
{
*dest = *src ;
dest++ ;
src++ ;
}
}
// single-threaded matrix product
// utilizes a single cuda-core, meant for mass products
// x : nXn matrix
// y : nXn matrix
// w : nXn matrix, working space
// returns in w
__device__ void mat_prod_serial ( float *x , float *y , int n , float *w )
{
float *fin = x + n*n ;
float *t1 = NULL ;
float *t2 = NULL ;
int i , j ;
for( j = 0 ; j < n ; j++ )
{
for( i = 0 ; i < n ; i++ )
{
t1 = x + i ; // i-th row of x
t2 = y + j*n ; // j-th column of y
*w = 0.0f ;
while( t1 < fin )
{
*w += (*t1) * (*t2) ;
t1 += n ;
t2 ++ ;
}
w++ ;
}
}
}
// Householder reflection matrix
// Creates the (nx-nv+1)-th Householder reflection matrix for QR decomposition
// x : output matrix, nx X nx
// v : vector of length nv
// requires nv <= nx
__device__ void construct_reflection_matrix ( float *x , float *v , int nx , int nv )
{
int N = nx - nv ;
int i , j ;
for( i = 0 ; i < nx ; i++ )
{
for( j = 0 ; j < nx ; j++ )
{
if( i == j )
x[ i + nx*i ] = 1.0f ;
else
x[ i + nx*j ] = 0.0f ;
if( i >= N && j >= N ) // less 2 X v's outer product
{
x[ i + nx*j ] -= 2.0f * v[ i-N ] * v[ j-N ] ;
}
}
}
}
// invert upper-triangular r_{n X n} into ri with back fitting
__device__ void invert_upper_serial ( float *r , int n , float *ri )
{
int i , j ; // row and column of ri respectively
int k ;
for( j = 0 ; j < n ; j++ )
{
for( i = n-1 ; i >= 0 ; i-- )
{
if( i > j ) // lower quadrant
ri[ i + j*n ] = 0.0f ;
else if( i == j )
ri[ i + j*n ] = 1.0f / r[ i + j*n ] ;
else // i < j
{
ri[ i + j*n ] = 0.0f ;
for( k = j ; k > i ; k-- )
ri[ i + j*n ] -= r[ i + k*n ] * ri[ k + j*n ] ;
ri[ i + j*n ] /= r[ i + i*n ] ;
}
}
}
}
__global__ void f6_inv_inner_sig ( float *f2 , float *out , int m , int K , float *w , float **ri )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= K )
return ;
out += i*m*m ;
if( m == 1 )
{
*out = 1.0f / (*out) ;
return ;
}
int work_n = 4*m*m ; // total working space size per thread
float *qt = w + i * work_n ; // transpose of Q matrix; product of QR decomposition
float *qi = w + i*work_n + m*m ; // Q_i workingspace for QR decomp
ri[i] = w + i*work_n + 2*m*m ; // space for R^{-1} after QR decomp
float *w2 = w + i*work_n + 3*m*m ; // secondary working space
float *v = w2 ; // different name for same space, stands in for a vector. Optimizer should remove at compile
float mag = -1.0f ; // magnitude
// copy f1 to out
f2 += i*m*m ;
copyFloats ( out , f2 , m*m ) ;
// set qt to identity
int ii , j ;
for( ii = 0 ; ii < m ; ii++ )
{
for( j = 0 ; j < m ; j++ )
{
if( ii == j )
qt[ ii + j*m ] = 1.0f ;
else
qt[ ii + j*m ] = 0.0f ;
}
}
// QR decomposition via Householder reflections
for( ii = 0 ; ii < m ; ii++ )
{
// calc rotation vector
for( j = 0 ; j < m - ii ; j++ )
v[j] = out[ ii + j + ii*m ] ;
mag = 0.0f ;
for( j = 0 ; j < m - ii ; j++ )
mag += v[j]*v[j] ;
mag = sqrtf(mag) ;
v[0] += copysignf( mag , v[0] ) ;
mag = 0.0f ;
for( j = 0 ; j < m - ii ; j++ )
mag += v[j]*v[j] ;
mag = sqrtf( mag ) ;
for( j = 0 ; j < m - ii ; j++ )
v[j] /= mag ;
construct_reflection_matrix( qi , v , m , m-ii ) ;
// update m = Qi m
mat_prod_serial ( qi , out , m , w2 ) ; // optional todo : force zeros below i-th entry of i-th column
copyFloats ( out , w2 , m*m ) ; // m stores r
// update qt = qi qt
mat_prod_serial ( qi , qt , m , w2 ) ;
copyFloats ( qt , w2 , m*m ) ;
}
// Backfit R to I, producing a numerically stable inverse
invert_upper_serial ( out , m , ri[i] ) ;
// Matrix product R^{-1} Q^T produces the inverse of m
mat_prod_serial ( ri[i] , qt , m , out ) ;
}
__global__ void f7_lmarginal_pdf ( float *f3 , float *f31 , float *out , int n , int p , int K )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n*p*K )
return ;
int k = i / (n*p) ;
int col = (i - k*n*p) / n ;
int row = i - k*n*p - col*n ;
out += i ; // k-th matrix, row-th row, col-th col
*out = fabsf( f31[ k*n*p + col*n + row ] - f3[ k*n*p + col*n + row ] ) ; // I expect round-offs in the tail
*out = ( *out <= 0.0f ) ? sqrtf(FLT_MIN) : *out ; // just in case
*out = logf(*out) ;
}
__global__ void f8_inv_sig ( float *f6 , float *l , float *lpsi , float *out , int p , int m , int K , int tn )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= p*p*K )
return ;
int k = i / (p*p) ;
int col = (i - k*p*p) / p ;
int row = i - k*p*p - col*p ;
out += i ; // k-th matrix, col-th col, row-th row
int j ;
*out = 0.0f ;
for( i = 0 ; i < m ; i++ )
{
for( j = 0 ; j < m ; j++ )
*out += l[ k*tn + j*p + row ] * f6[ k*m*m + j*m + i ] * l[ k*tn + i*p + col ] ;
}
*out *= expf( -lpsi[k*tn + row] - lpsi[k*tn + col] ) ;
*out = ( row == col ) ? expf( -lpsi[ k*tn + row ] ) - (*out) : -(*out) ;
}
__global__ void f9_sum_lpdfs ( float *f7 , float *out , int n , int p , int K )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n*K )
return ;
int k = i / n ;
int row = i - k*n ;
out += i ; // k-th vector, row-th row
*out = 0.0f ;
for( i = 0 ; i < p ; i++ )
*out += f7[ k*n*p + i*n + row ] ;
}
__global__ void f10_F_N_inv ( float *f3 , float *f4 , float *out , int n , int p , int K )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n * p * K )
return ;
int k = i / (n*p) ;
int col = (i - k*n*p) / n ;
int row = i - k*n*p - col*n ;
out += i ; // k-th matrix, col-th col, row-th row
if( f3[ k*n*p + col*n + row ] <= 0.0f )
*out = -FLT_MAX ;
else if( f3[ k*n*p + col*n + row ] >= 1.0f )
*out = FLT_MAX ;
else
*out = sqrtf( f4[ k*p + col ] ) * normcdfinvf( f3[ k*n*p + col*n + row ] ) ;
}
// Add log scale values and return on log scale
// Used to avoid under/over-flows
// returns log( a + b )
__device__ float logSum ( float a , float b )
{
if( a < b ) // exponents can't be allowed to get too large
{
float t = a ;
a = b ;
b = t ;
}
return a + logf( 1.0f + expf( b - a ) ) ;
}
// Subtract log scale values from one another
// returns log( a - b )
__device__ float logSubtract( float a , float b )
{
return a + logf( 1.0f - expf( b - a ) ) ;
}
// a mod b
__device__ size_t mod( size_t a , size_t b )
{
return a - b*(a/b) ;
}
__device__ float generate_unif_lcg( size_t *seed )
{
*seed = mod( 1103515245*(*seed) + 12345 , 2147483648 ) ;
return ((float) ((*seed)) + 10000) / 2147503648.0f ; // = 2147483648 + 20000 ; do not return 0 or 1
}
__device__ float truncate ( float x )
{
return copysignf( floorf( fabsf(x) ) , x ) ;
}
// Returns log( normal_cdf(x) )
__device__ float log_norm_cdf ( float x )
{
// Catch boundary cases
if( x >= 1e37 )
return 0.0f ;
if( x <= -1e37 )
return -1.0f/0.0f ;
// Use pre-written CUDA software when possible
if( x <= 5.657f && x >= -5.657f ) // sqrt(32)
return logf(normcdff(x)) ;
// else : I borrowed this from R's prorm.c, references were ungiven for algorithm content
float p[6] = {
0.21589853405795699f ,
0.1274011611602473639f ,
0.022235277870649807f ,
0.001421619193227893466f ,
2.9112874951168792e-5f ,
0.02307344176494017303f };
float q[5] = {
1.28426009614491121f ,
0.468238212480865118f ,
0.0659881378689285515f ,
0.00378239633202758244f ,
7.29751555083966205e-5f };
float xsq = 1/(x*x) ;
float xnum = p[5]*xsq ;
float xden = xsq ;
int i ;
for( i = 0 ; i < 4 ; i++ )
{
xnum = ( xnum + p[i] ) * xsq ;
xden = ( xden + q[i] ) * xsq ;
}
float tmp = xsq * ( xnum + p[4] ) / ( xden + q[4] ) ;
tmp = ( 0.3989422804014327 - tmp ) / fabsf(x) ;
// xsq = truncate( x * 16.0f ) / 16.0f ; // modified from original code
xsq = truncate( x * 64.0f ) / 64.0f ;
float del = (x - xsq) * (x + xsq) ;
if( x < 0.0f )
return ( -xsq * xsq * 0.5f ) + ( -del * 0.5f ) + logf(tmp) ;
return logf( 1.0f - expf( -xsq * xsq * 0.5f )*expf( -del * 0.5f ) * tmp ) ;
}
__global__ void f11_lpmnorm ( float *f10 , float *f101 , float *l , float *lpsi , float *out , int n , int p , int m , int K , size_t seed , int n_iter , float *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n * K )
return ;
int k = i / n ;
int row = i - k*n ;
out += i ; // k-th vector, row-th row
w += m*i ;
size_t local_seed = seed + i ;
float lo, hi, mu , tmp , swp ;
float sqMin = sqrt(FLT_MIN) ;
*out = FLT_MIN ;
int j , kk ;
for( i = 0 ; i < n_iter ; i++ ) // MC-integral, E[ P( N in [a,b] | F ) ] = P( N in [a,b] )
{
for( kk = 0 ; kk < m ; kk++ )
w[kk] = normcdfinvf( generate_unif_lcg( &local_seed ) ) ; // generate a standard normal
tmp = 0.0f ;
for( j = 0 ; j < p ; j++ )
{
mu = 0.0f ;
for( kk = 0 ; kk < m ; kk++ )
mu += l[ k*p*m + kk*p + j ] * w[kk] ;
lo = (f10[ k*n*p + j*n + row ] - mu) * fmaxf( sqMin , expf( -lpsi[ k*p + j ] ) );
hi = (f101[ k*n*p + j*n + row ] - mu) * fmaxf( sqMin , expf( -lpsi[ k*p + j ] ) ) ;
if( hi < lo ) // possible via roundoffs
{
swp = lo ; // use as temp variable
lo = hi ;
hi = swp ;
}
if( 0.0f < lo ) // both positive likely results in an underflow
{
swp = lo ;
lo = -hi ;
hi = -swp ;
}
swp = logSubtract( log_norm_cdf(hi) , log_norm_cdf(lo) ) ;
if( swp != swp ) // round-off !
tmp += -FLT_MAX/1000.0f ;
else
tmp += swp ;
}
if( i == 1 )
*out = tmp ;
else
*out = logSum( *out , tmp ) ;
}
*out -= logf( (float) n_iter ) ;
}
__global__ void f12_sum_log_likes ( float *f11 , float *f9 , float *out , int n , int K )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= K )
return ;
out += i ; // i-th entry
*out = 0.0f ;
int j ;
for( j = 0 ; j < n ; j++ )
{
*out += f11[ i*n + j ] + f9[ i*n + j ] ;
}
}
| 790924e5cf44b4be8c3e4c1ae5be01dd558aaac7.cu |
/*
* Author: W. Evan Durno
* Written April 2016
* All rights reserved
*/
#include <stdio.h>
#include <float.h>
////////////////////////////////////////////////////////////////////////////////////// external facing headers
extern "C" {
// GPU-accelerated multivariate regression with a factor model and floor student-t marginals via gaussian copula
// all matrices are stores in column-major order
// y : n X p matrix, counts data to be regressed
// x : n X q matrix, design matrix
// t : tn X K matrix, parameters, each column is a different parameter
// specifically, the order of parameters in one t column is beta_{q X p} , lsig_{p} , lnu_{p} , l_{p X m} , lpsi_{p} , always column-major order
// m : the number of factors
// p is assumed > 32, otherwise use of this software is not motivated
// returns out : K-length vector of sample likelihoods
void log_lik_cu ( float *y , float *x , float *t , int n , int p , int q , int tn , int K , int m , int n_iter , int seed , float *out ) ;
} // end extern "C"
////////////////////////////////////////////////////////////////////////////////////// local headers
// stores device locations of major parameter components for the first parameter t
// later elements can be accessed via column shifts
struct params
{
float *beta ;
float *lsig ;
float *lnu ;
float *l ;
float *lpsi ;
};
// Extracts parameters elements from a single parameter vector
void construct_params ( float *t , struct params *a , int p , int q , int m ) ;
// NOT USED
// constructs I + L' inv(Psi) L
// out : m X m X K matrix
// threads : m X m X K
__global__ void f2_make_inner_sig ( float *l , float *psi , float *out , int p , int m , int K , int tn ) ;
// constructs F_M( y )
// mode : in { 1 , 2 , 3 } , indicating different parts of the argument domain
// plus : an amount to add to y, either 0 or 1
// out : n X p X K matrix
// threads : n X p X K
__global__ void f3_marginal_cdf ( float *y , float *x , float *beta , float *lsig , float *lnu , float *out , int n , int p , int q , int K , int tn , int mode , float plus ) ;
// constructs diag( L L' + Psi )
// out : p X K matrix
// threads : p X K
__global__ void f4_diag_sig ( float *l , float *psi , float *out , int p , int m , int K , int tn ) ;
// NOT USED
// constructs log det( L L' + Psi )
// out : K-length vector of matrix determinants
__global__ void f5_ldet ( float *lpsi , float **ri , float *out , int m , int p , int K , int tn ) ;
// NOT_USD
// constructs inv( I + L' inv(Psi) L )
// in : product of f2
// w : 4 X m X m X K matrix, working space
// out : m X m X K matrix in out , but also m X m X K matrix in r
// threads : K
__global__ void f6_inv_inner_sig ( float *f2 , float *out , int m , int K , float *w , float **r ) ;
// constructs log f_M(y)
// in : f3( y ) , f3( y+1 )
// out : n X p X K matrix
// threads : n X p X K
__global__ void f7_lmarginal_pdf ( float *f3 , float *f31 , float *out , int n , int p , int K ) ;
// NOT USED
// constructs inv(Psi) - inv(Psi) L inv( I + L' inv(Psi) L ) L' inv(Psi) = inv(Sigma)
// in : product of f6
// out : p X p X K matrix
// threads : p X p X K
__global__ void f8_inv_sig ( float *f6 , float *l , float *lpsi , float *out , int p , int m , int K , int tn ) ;
// constructs Sum_j log f_M( y_{ij} )
// in : product of f7
// out : n X K matrix
// threads n X K
__global__ void f9_sum_lpdfs ( float *f7 , float *out , int n , int p , int K ) ;
// constructs F_N^{-1}( F_M( y+1 ) )
// in : f3( y ) and product of f4
// out : n X p X K matrix
// threads : n X p X K
__global__ void f10_F_N_inv ( float *f3 , float *f4 , float *out , int n , int p , int K ) ;
// constructs log f_{N_p} ( f10 )
// out : n X K matrix
// threads : n X K
__global__ void f11_lpmnorm ( float *f10 , float *f101 , float *l , float *lpsi , float *out , int n , int p , int m , int K , size_t seed , int n_iter , float *w ) ;
// constructs log_lik( t ; y , x )
// out : K-length vector
// threads : K
__global__ void f12_sum_log_likes ( float *f11 , float *f9 , float *out , int n , int K ) ;
////////////////////////////////////////////////////////////////////////////////////// host implementations
void log_lik_cu ( float *y , float *x , float *t , int n , int p , int q , int tn , int K , int m , int n_iter , int seed , float *out )
{
size_t n_cores = 32 ;
if( n_cores * 65536 < max( p*p*K , n*p*K ) )
n_cores = max( p*p*K , n*p*K )/65536 + 1 ;
if( n_cores >= 1024 )
n_cores = 1023 ;
if( n_cores * 65536 < max( p*p*K , n*p*K ) )
fprintf( stderr , "CUDA WARNING: insufficient threads!\n" ) ;
cudaError_t status = cudaSuccess ;
// initialize streams
// s[0] : NOT USED
// s[1] : NOT USED
// s[2] : f7 - f9, f7 requires s[7:12]
// s[3] : f10(y), f10 requires s[7:9] as well as f4 via event[6]
// s[4] : f4 - f10(y+1), f10 requires s[10:12]
// s[5] : f11 - f12, f11 requires s[0], s[1], s[3], s[4], and f12 requires s[2]
// s[6] : NOT USED
// s[7] : f3(y,1)
// s[8] : f3(y,2)
// s[9] : f3(y,3)
// s[10] : f3(y+1,1)
// s[11] : f3(y+1,2)
// s[12] : f3(y+1,3)
cudaStream_t stream[13] ;
int i ;
for( i = 0 ; i < 13 && status == cudaSuccess ; i++ )
status = cudaStreamCreate( stream + i ) ;
// initialize events
// event[i] marks the completion of stream[i]
// except event[6] which marks the completion of f4 in s[4]
cudaEvent_t event[13] ;
for( i = 0 ; i < 13 && status == cudaSuccess ; i++ )
status = cudaEventCreate( event + i ) ;
// represent device memory
float *d_y = NULL ;
float *d_x = NULL ;
float *d_t = NULL ;
float *d_f3 = NULL ;
float *d_f31 = NULL ;
float *d_f4 = NULL ;
float **d_ri = NULL ;
float *d_f7 = NULL ;
float *d_f9 = NULL ;
float *d_f10 = NULL ;
float *d_f101 = NULL ;
float *d_f11 = NULL ;
float *d_f11w = NULL ;
float *d_f12 = NULL ;
// allocate device memory
if( status == cudaSuccess )
status = cudaMalloc( &d_y , n * p * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_x , n * q * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_t , tn * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f3 , n * p * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f31 , n * p * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f4 , p * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_ri , K * sizeof(float*) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f7 , n * p * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f9 , n * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f10 , n * p * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f101 , n * p * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f11 , n * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f11w , m * n * K * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_f12 , K * sizeof(float) ) ;
// populate device memory
if( status == cudaSuccess )
status = cudaMemcpy( d_y , y , n * p * sizeof(float) , cudaMemcpyHostToDevice ) ;
if( status == cudaSuccess )
status = cudaMemcpy( d_x , x , n * q * sizeof(float) , cudaMemcpyHostToDevice ) ;
if( status == cudaSuccess )
status = cudaMemcpy( d_t , t , tn * K * sizeof(float) , cudaMemcpyHostToDevice ) ;
size_t free_mem , total_mem ;
if( status == cudaSuccess )
status = cudaMemGetInfo( &free_mem, &total_mem ) ;
if( status == cudaSuccess )
fprintf( stderr , "Device free mem: %lu, total mem: %lu, remaining: %f%%\n" , free_mem , total_mem , ((float) free_mem)/((float) total_mem) ) ;
// identify parameter elements
struct params a ;
construct_params ( d_t , &a , p , q , m ) ;
// count threads
size_t t3 = n * p * K ;
size_t t4 = p * K ;
size_t t7 = n * p * K ;
size_t t9 = n * K ;
size_t t10 = n * p * K ;
size_t t11 = n * K ;
size_t t12 = K ;
///////////////// run and schedule kernels
// stream[7:9]
if( status == cudaSuccess )
f3_marginal_cdf <<< t3 / n_cores + 1 , n_cores , 0 , stream[7] >>> ( d_y , d_x , a.beta , a.lsig , a.lnu , d_f3 , n , p , q , K , tn , 1 , 0.0f ) ;
if( status == cudaSuccess )
f3_marginal_cdf <<< t3 / n_cores + 1 , n_cores , 0 , stream[8] >>> ( d_y , d_x , a.beta , a.lsig , a.lnu , d_f3 , n , p , q , K , tn , 2 , 0.0f ) ;
if( status == cudaSuccess )
f3_marginal_cdf <<< t3 / n_cores + 1 , n_cores , 0 , stream[9] >>> ( d_y , d_x , a.beta , a.lsig , a.lnu , d_f3 , n , p , q , K , tn , 3 , 0.0f ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[7] , stream[7] ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[8] , stream[8] ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[9] , stream[9] ) ;
// stream[10:12]
if( status == cudaSuccess )
f3_marginal_cdf <<< t3 / n_cores + 1 , n_cores , 0 , stream[10] >>> ( d_y , d_x , a.beta , a.lsig , a.lnu , d_f31 , n , p , q , K , tn , 1 , 1.0f ) ;
if( status == cudaSuccess )
f3_marginal_cdf <<< t3 / n_cores + 1 , n_cores , 0 , stream[11] >>> ( d_y , d_x , a.beta , a.lsig , a.lnu , d_f31 , n , p , q , K , tn , 2 , 1.0f ) ;
if( status == cudaSuccess )
f3_marginal_cdf <<< t3 / n_cores + 1 , n_cores , 0 , stream[12] >>> ( d_y , d_x , a.beta , a.lsig , a.lnu , d_f31 , n , p , q , K , tn , 3 , 1.0f ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[10] , stream[10] ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[11] , stream[11] ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[12] , stream[12] ) ;
// stream[2]
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[2] , event[7] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[2] , event[8] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[2] , event[9] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[2] , event[10] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[2] , event[11] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[2] , event[12] , 0 ) ;
if( status == cudaSuccess )
f7_lmarginal_pdf <<< t7 / n_cores + 1 , n_cores , 0 , stream[2] >>> ( d_f3 , d_f31 , d_f7 , n , p , K ) ;
if( status == cudaSuccess )
f9_sum_lpdfs <<< t9 / n_cores + 1 , n_cores >>> ( d_f7 , d_f9 , n , p , K ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[2] , stream[2] ) ;
// stream[4]
if( status == cudaSuccess )
f4_diag_sig <<< t4 / n_cores + 1 , n_cores , 0 , stream[4] >>> ( a.l , a.lpsi , d_f4 , p , m , K , tn ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[6] , stream[4] ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[4] , event[10] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[4] , event[11] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[4] , event[12] , 0 ) ;
if( status == cudaSuccess )
f10_F_N_inv <<< t10 / n_cores + 1 , n_cores , 0 , stream[4] >>> ( d_f31 , d_f4 , d_f101 , n , p , K ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[4] , stream[4] ) ;
// stream[3]
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[3] , event[6] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[3] , event[7] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[3] , event[8] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[3] , event[9] , 0 ) ;
if( status == cudaSuccess )
f10_F_N_inv <<< t10 / n_cores + 1 , n_cores , 0 , stream[3] >>> ( d_f3 , d_f4 , d_f10 , n , p , K ) ;
if( status == cudaSuccess )
status = cudaEventRecord( event[3] , stream[3] ) ;
// stream[5]
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[5] , event[3] , 0 ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[5] , event[4] , 0 ) ;
if( status == cudaSuccess )
f11_lpmnorm <<< t11 / n_cores + 1 , n_cores , 0 , stream[5] >>> ( d_f10 , d_f101 , a.l , a.lpsi , d_f11 , n , p , m , K , seed , n_iter , d_f11w ) ;
if( status == cudaSuccess )
status = cudaStreamWaitEvent( stream[5] , event[2] , 0 ) ;
if( status == cudaSuccess )
f12_sum_log_likes <<< t12 / n_cores + 1 , n_cores >>> ( d_f11 , d_f9 , d_f12 , n , K ) ;
///////////////// finished running and scheduling kernels
// extract data from device
if( status == cudaSuccess )
status = cudaMemcpy( out , d_f12 , K * sizeof(float) , cudaMemcpyDeviceToHost ) ;
// delete streams
for( i = 0 ; i < 13 && status == cudaSuccess ; i++ )
status = cudaStreamDestroy( stream[i] ) ;
// delete events
for( i = 0 ; i < 13 && status == cudaSuccess ; i++ )
status = cudaEventDestroy( event[i] ) ;
// check for errors
if( status != cudaSuccess )
fprintf( stderr , "CUDA ERROR: %s\n" , cudaGetErrorString(status) ) ;
// free device memory
cudaFree( d_y ) ;
cudaFree( d_x ) ;
cudaFree( d_t ) ;
cudaFree( d_f3 ) ;
cudaFree( d_f31 ) ;
cudaFree( d_f4 ) ;
cudaFree( d_ri ) ;
cudaFree( d_f7 ) ;
cudaFree( d_f9 ) ;
cudaFree( d_f10 ) ;
cudaFree( d_f101 ) ;
cudaFree( d_f11 ) ;
cudaFree( d_f12 ) ;
cudaFree( d_f11w ) ;
return ;
}
void construct_params ( float *t , struct params *a , int p , int q , int m )
{
a->beta = t ;
a->lsig = t + p*q ;
a->lnu = t + p*(q + 1) ;
a->l = t + p*(q + 2) ;
a->lpsi = t + p*(q + 2 + m) ;
}
////////////////////////////////////////////////////////////////////////////////////// device implementations
__global__ void f2_make_inner_sig ( float *l , float *psi , float *out , int p , int m , int K , int tn )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= m * m * K )
return ;
int k = i / (m*m) ;
int col = (i - k*m*m) / m ;
int row = i - k*m*m - col*m ;
out += i ;
*out = 0.0f ;
for( i = 0 ; i < p ; i++ )
*out += expf( -psi[ k*tn + i ]) * l[ k*tn + p*row + i ] * l[ k*tn + p*col + i ] ;
if( row == col )
*out += 1.0f ;
}
__device__ double beta( float a , float b )
{
return exp( lgammaf(a) + lgamma(b) - lgamma(a+b) ) ;
}
__device__ double beta_sr ( float x , float a , float b , float i )
{
return expf( (a+i)*logf(x) + b*logf(1-x) - logf(a+i) + lgammaf(a+i+b) - lgammaf(a+i) - lgammaf(b) ) ;
}
__device__ void pt_beta_reductions ( float *x , float nu )
{
int flag = ( (*x)*(*x) > nu )? 1 : 0 ;
float y = ( flag )? nu/( (*x)*(*x) + nu) : 1.0f - 1.0f/( 1.0f + ((*x)/nu)*(*x) ) ;
float a = ( flag )? 0.5f*nu : 0.5f ;
float b = ( flag )? 0.5f : 0.5f*nu ;
float out = 0.0f ;
int i = 0 ;
for( i = 0 ; i < 20 ; i++ )
{
out += beta_sr( y , a , b , (float) i ) ;
}
out = ( flag )? 1.0f - 0.5f*out : 1.0f - 0.5f*(1.0f - out) ;
out = ( *x < 0.0 )? 1.0f - out : out ;
*x = out ;
}
__device__ void pt_alg395 ( float *x , float nu )
{
float t = (*x) * (*x) ;
float y = t/nu ;
float b = 1.0f + y ;
y = ( y > 1e-6f )? logf(b) : y ;
float a = nu - 0.5 ;
b = 48.0f * a * a ;
y = a * y ;
y = (((((-0.4f*y-3.3f)*y - 24.0f)*y - 85.5f)/(0.8f*y*y + 100.0f + b) + y + 3.0f)/b + 1.0f)*sqrtf(y) ;
y = normcdff( y ) ;
*x = ( *x > 0.0f )? y : 1.0f - y ;
}
__device__ void pt_normal ( float *x )
{
*x = normcdff( *x ) ;
}
__global__ void f3_marginal_cdf ( float *y , float *x , float *beta , float *lsig , float *lnu , float *out , int n , int p , int q , int K , int tn , int mode , float plus )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n * p * K )
return ;
int k = i / (p*n) ;
int col = (i - k*p*n) / n ; // dimension
int row = i - k*p*n - col*n ; // sample
if( mode == 1 )
{
if( expf( lnu[ tn*k + col ] ) <= 1e5f ) // must satisfy to proceed: nu > 1e5f
return ;
y += n*col + row ;
out += i ;
if( *y + plus <= 0.0f ) // if y is zero, all forms of f3 return 0.0f. We do this under the 'mode ==' test to avoid write collisions.
{
*out = 0.0f ;
return ;
}
float sig = expf( lsig[ tn*k + col ] ) ;
float mu = 0.0f ;
for( i = 0 ; i < q ; i++ )
mu += x[ i*n + row ] * beta[ k*tn + col*q + i ] ;
*out = logf((*y) + plus)/sig - mu ;
pt_normal( out ) ;
}
if( mode == 2 )
{
float nu = expf( lnu[ tn*k + col ] ) ;
if( nu > 1e5f ) // must satisfy to proceed: nu <= 1e5f
return ;
y += n*col + row ;
float sig = expf( lsig[ tn*k + col ] ) ;
float mu = 0.0f ;
int j ;
for( j = 0 ; j < q ; j++ )
mu += x[ j*n + row ] * beta[ k*tn + col*q + j ] ;
float tmp = logf((*y) + plus)/sig - mu ;
if( ! ( nu > 200.0f || ( tmp*tmp < nu && nu > 4.0f ) ) )
return ;
out += i ;
if( *y + plus <= 0.0f )
{
*out = 0.0f ;
return ;
}
*out = tmp ;
pt_alg395 ( out , nu ) ;
}
if( mode == 3 )
{
float nu = expf( lnu[ tn*k + col ] ) ;
if( nu > 1e5f )
return ;
y += n*col + row ;
float sig = expf( lsig[ tn*k + col ] ) ;
float mu = 0.0f ;
int j ;
for( j = 0 ; j < q ; j++ )
mu += x[ j*n + row ] * beta[ k*tn + col*q + j ] ;
float tmp = logf((*y) + plus)/sig - mu ;
if( nu > 200.0f || ( tmp*tmp < nu && nu > 4.0f ) )
return ;
out += i ;
if( *y + plus <= 0.0f )
{
*out = 0.0f ;
return ;
}
*out = tmp ;
pt_beta_reductions ( out , nu ) ;
}
}
__global__ void f4_diag_sig ( float *l , float *psi , float *out , int p , int m , int K , int tn )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= p * K )
return ;
int k = i / p ;
int row = i - k*p ; // dimension
out += i ; // row-th row in the k-th vector
*out = expf( psi[ k*tn + row ] ) ;
for( i = 0 ; i < m ; i++ )
*out += l[ k*tn + i*p + row ] * l[ k*tn + i*p + row ] ;
}
__global__ void f5_ldet ( float *lpsi , float **ri , float *out , int m , int p , int K , int tn )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= K )
return ;
int k = i ;
float *invR = ri[i] ;
out += i ;
*out = 0.0f ;
for( i = 0 ; i < p ; i++ )
*out += lpsi[ k*tn + i ] ; // returns on log scale
for( i = 0 ; i < m ; i++ )
*out -= logf( fabs(invR[ i*m + i ]) ) ; // algebraically correct and numerically stable (avoids squaring)
}
__device__ void copyFloats ( float *dest , float *src , int n )
{
float *fin = dest + n ;
while( dest < fin )
{
*dest = *src ;
dest++ ;
src++ ;
}
}
// single-threaded matrix product
// utilizes a single cuda-core, meant for mass products
// x : nXn matrix
// y : nXn matrix
// w : nXn matrix, working space
// returns in w
__device__ void mat_prod_serial ( float *x , float *y , int n , float *w )
{
float *fin = x + n*n ;
float *t1 = NULL ;
float *t2 = NULL ;
int i , j ;
for( j = 0 ; j < n ; j++ )
{
for( i = 0 ; i < n ; i++ )
{
t1 = x + i ; // i-th row of x
t2 = y + j*n ; // j-th column of y
*w = 0.0f ;
while( t1 < fin )
{
*w += (*t1) * (*t2) ;
t1 += n ;
t2 ++ ;
}
w++ ;
}
}
}
// Householder reflection matrix
// Creates the (nx-nv+1)-th Householder reflection matrix for QR decomposition
// x : output matrix, nx X nx
// v : vector of length nv
// requires nv <= nx
__device__ void construct_reflection_matrix ( float *x , float *v , int nx , int nv )
{
int N = nx - nv ;
int i , j ;
for( i = 0 ; i < nx ; i++ )
{
for( j = 0 ; j < nx ; j++ )
{
if( i == j )
x[ i + nx*i ] = 1.0f ;
else
x[ i + nx*j ] = 0.0f ;
if( i >= N && j >= N ) // less 2 X v's outer product
{
x[ i + nx*j ] -= 2.0f * v[ i-N ] * v[ j-N ] ;
}
}
}
}
// invert upper-triangular r_{n X n} into ri with back fitting
__device__ void invert_upper_serial ( float *r , int n , float *ri )
{
int i , j ; // row and column of ri respectively
int k ;
for( j = 0 ; j < n ; j++ )
{
for( i = n-1 ; i >= 0 ; i-- )
{
if( i > j ) // lower quadrant
ri[ i + j*n ] = 0.0f ;
else if( i == j )
ri[ i + j*n ] = 1.0f / r[ i + j*n ] ;
else // i < j
{
ri[ i + j*n ] = 0.0f ;
for( k = j ; k > i ; k-- )
ri[ i + j*n ] -= r[ i + k*n ] * ri[ k + j*n ] ;
ri[ i + j*n ] /= r[ i + i*n ] ;
}
}
}
}
__global__ void f6_inv_inner_sig ( float *f2 , float *out , int m , int K , float *w , float **ri )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= K )
return ;
out += i*m*m ;
if( m == 1 )
{
*out = 1.0f / (*out) ;
return ;
}
int work_n = 4*m*m ; // total working space size per thread
float *qt = w + i * work_n ; // transpose of Q matrix; product of QR decomposition
float *qi = w + i*work_n + m*m ; // Q_i workingspace for QR decomp
ri[i] = w + i*work_n + 2*m*m ; // space for R^{-1} after QR decomp
float *w2 = w + i*work_n + 3*m*m ; // secondary working space
float *v = w2 ; // different name for same space, stands in for a vector. Optimizer should remove at compile
float mag = -1.0f ; // magnitude
// copy f1 to out
f2 += i*m*m ;
copyFloats ( out , f2 , m*m ) ;
// set qt to identity
int ii , j ;
for( ii = 0 ; ii < m ; ii++ )
{
for( j = 0 ; j < m ; j++ )
{
if( ii == j )
qt[ ii + j*m ] = 1.0f ;
else
qt[ ii + j*m ] = 0.0f ;
}
}
// QR decomposition via Householder reflections
for( ii = 0 ; ii < m ; ii++ )
{
// calc rotation vector
for( j = 0 ; j < m - ii ; j++ )
v[j] = out[ ii + j + ii*m ] ;
mag = 0.0f ;
for( j = 0 ; j < m - ii ; j++ )
mag += v[j]*v[j] ;
mag = sqrtf(mag) ;
v[0] += copysignf( mag , v[0] ) ;
mag = 0.0f ;
for( j = 0 ; j < m - ii ; j++ )
mag += v[j]*v[j] ;
mag = sqrtf( mag ) ;
for( j = 0 ; j < m - ii ; j++ )
v[j] /= mag ;
construct_reflection_matrix( qi , v , m , m-ii ) ;
// update m = Qi m
mat_prod_serial ( qi , out , m , w2 ) ; // optional todo : force zeros below i-th entry of i-th column
copyFloats ( out , w2 , m*m ) ; // m stores r
// update qt = qi qt
mat_prod_serial ( qi , qt , m , w2 ) ;
copyFloats ( qt , w2 , m*m ) ;
}
// Backfit R to I, producing a numerically stable inverse
invert_upper_serial ( out , m , ri[i] ) ;
// Matrix product R^{-1} Q^T produces the inverse of m
mat_prod_serial ( ri[i] , qt , m , out ) ;
}
__global__ void f7_lmarginal_pdf ( float *f3 , float *f31 , float *out , int n , int p , int K )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n*p*K )
return ;
int k = i / (n*p) ;
int col = (i - k*n*p) / n ;
int row = i - k*n*p - col*n ;
out += i ; // k-th matrix, row-th row, col-th col
*out = fabsf( f31[ k*n*p + col*n + row ] - f3[ k*n*p + col*n + row ] ) ; // I expect round-offs in the tail
*out = ( *out <= 0.0f ) ? sqrtf(FLT_MIN) : *out ; // just in case
*out = logf(*out) ;
}
__global__ void f8_inv_sig ( float *f6 , float *l , float *lpsi , float *out , int p , int m , int K , int tn )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= p*p*K )
return ;
int k = i / (p*p) ;
int col = (i - k*p*p) / p ;
int row = i - k*p*p - col*p ;
out += i ; // k-th matrix, col-th col, row-th row
int j ;
*out = 0.0f ;
for( i = 0 ; i < m ; i++ )
{
for( j = 0 ; j < m ; j++ )
*out += l[ k*tn + j*p + row ] * f6[ k*m*m + j*m + i ] * l[ k*tn + i*p + col ] ;
}
*out *= expf( -lpsi[k*tn + row] - lpsi[k*tn + col] ) ;
*out = ( row == col ) ? expf( -lpsi[ k*tn + row ] ) - (*out) : -(*out) ;
}
__global__ void f9_sum_lpdfs ( float *f7 , float *out , int n , int p , int K )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n*K )
return ;
int k = i / n ;
int row = i - k*n ;
out += i ; // k-th vector, row-th row
*out = 0.0f ;
for( i = 0 ; i < p ; i++ )
*out += f7[ k*n*p + i*n + row ] ;
}
__global__ void f10_F_N_inv ( float *f3 , float *f4 , float *out , int n , int p , int K )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n * p * K )
return ;
int k = i / (n*p) ;
int col = (i - k*n*p) / n ;
int row = i - k*n*p - col*n ;
out += i ; // k-th matrix, col-th col, row-th row
if( f3[ k*n*p + col*n + row ] <= 0.0f )
*out = -FLT_MAX ;
else if( f3[ k*n*p + col*n + row ] >= 1.0f )
*out = FLT_MAX ;
else
*out = sqrtf( f4[ k*p + col ] ) * normcdfinvf( f3[ k*n*p + col*n + row ] ) ;
}
// Add log scale values and return on log scale
// Used to avoid under/over-flows
// returns log( a + b )
__device__ float logSum ( float a , float b )
{
if( a < b ) // exponents can't be allowed to get too large
{
float t = a ;
a = b ;
b = t ;
}
return a + logf( 1.0f + expf( b - a ) ) ;
}
// Subtract log scale values from one another
// returns log( a - b )
__device__ float logSubtract( float a , float b )
{
return a + logf( 1.0f - expf( b - a ) ) ;
}
// a mod b
__device__ size_t mod( size_t a , size_t b )
{
return a - b*(a/b) ;
}
__device__ float generate_unif_lcg( size_t *seed )
{
*seed = mod( 1103515245*(*seed) + 12345 , 2147483648 ) ;
return ((float) ((*seed)) + 10000) / 2147503648.0f ; // = 2147483648 + 20000 ; do not return 0 or 1
}
__device__ float truncate ( float x )
{
return copysignf( floorf( fabsf(x) ) , x ) ;
}
// Returns log( normal_cdf(x) )
__device__ float log_norm_cdf ( float x )
{
// Catch boundary cases
if( x >= 1e37 )
return 0.0f ;
if( x <= -1e37 )
return -1.0f/0.0f ;
// Use pre-written CUDA software when possible
if( x <= 5.657f && x >= -5.657f ) // sqrt(32)
return logf(normcdff(x)) ;
// else : I borrowed this from R's prorm.c, references were ungiven for algorithm content
float p[6] = {
0.21589853405795699f ,
0.1274011611602473639f ,
0.022235277870649807f ,
0.001421619193227893466f ,
2.9112874951168792e-5f ,
0.02307344176494017303f };
float q[5] = {
1.28426009614491121f ,
0.468238212480865118f ,
0.0659881378689285515f ,
0.00378239633202758244f ,
7.29751555083966205e-5f };
float xsq = 1/(x*x) ;
float xnum = p[5]*xsq ;
float xden = xsq ;
int i ;
for( i = 0 ; i < 4 ; i++ )
{
xnum = ( xnum + p[i] ) * xsq ;
xden = ( xden + q[i] ) * xsq ;
}
float tmp = xsq * ( xnum + p[4] ) / ( xden + q[4] ) ;
tmp = ( 0.3989422804014327 - tmp ) / fabsf(x) ;
// xsq = truncate( x * 16.0f ) / 16.0f ; // modified from original code
xsq = truncate( x * 64.0f ) / 64.0f ;
float del = (x - xsq) * (x + xsq) ;
if( x < 0.0f )
return ( -xsq * xsq * 0.5f ) + ( -del * 0.5f ) + logf(tmp) ;
return logf( 1.0f - expf( -xsq * xsq * 0.5f )*expf( -del * 0.5f ) * tmp ) ;
}
__global__ void f11_lpmnorm ( float *f10 , float *f101 , float *l , float *lpsi , float *out , int n , int p , int m , int K , size_t seed , int n_iter , float *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= n * K )
return ;
int k = i / n ;
int row = i - k*n ;
out += i ; // k-th vector, row-th row
w += m*i ;
size_t local_seed = seed + i ;
float lo, hi, mu , tmp , swp ;
float sqMin = sqrt(FLT_MIN) ;
*out = FLT_MIN ;
int j , kk ;
for( i = 0 ; i < n_iter ; i++ ) // MC-integral, E[ P( N in [a,b] | F ) ] = P( N in [a,b] )
{
for( kk = 0 ; kk < m ; kk++ )
w[kk] = normcdfinvf( generate_unif_lcg( &local_seed ) ) ; // generate a standard normal
tmp = 0.0f ;
for( j = 0 ; j < p ; j++ )
{
mu = 0.0f ;
for( kk = 0 ; kk < m ; kk++ )
mu += l[ k*p*m + kk*p + j ] * w[kk] ;
lo = (f10[ k*n*p + j*n + row ] - mu) * fmaxf( sqMin , expf( -lpsi[ k*p + j ] ) );
hi = (f101[ k*n*p + j*n + row ] - mu) * fmaxf( sqMin , expf( -lpsi[ k*p + j ] ) ) ;
if( hi < lo ) // possible via roundoffs
{
swp = lo ; // use as temp variable
lo = hi ;
hi = swp ;
}
if( 0.0f < lo ) // both positive likely results in an underflow
{
swp = lo ;
lo = -hi ;
hi = -swp ;
}
swp = logSubtract( log_norm_cdf(hi) , log_norm_cdf(lo) ) ;
if( swp != swp ) // round-off !
tmp += -FLT_MAX/1000.0f ;
else
tmp += swp ;
}
if( i == 1 )
*out = tmp ;
else
*out = logSum( *out , tmp ) ;
}
*out -= logf( (float) n_iter ) ;
}
__global__ void f12_sum_log_likes ( float *f11 , float *f9 , float *out , int n , int K )
{
int i = blockIdx.x * blockDim.x + threadIdx.x ;
if( i >= K )
return ;
out += i ; // i-th entry
*out = 0.0f ;
int j ;
for( j = 0 ; j < n ; j++ )
{
*out += f11[ i*n + j ] + f9[ i*n + j ] ;
}
}
|
b9bfb692496dd97d7751faf68b0ebdab59c6f3b8.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tingxing Dong
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_d
#include "gemv_template_kernel_batched_hip.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/***************************************************************************//**
Purpose
-------
DGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of DIMENSION ( ldda, n ) on the GPU
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE PRECISION
On entry, ALPHA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv_batched
*******************************************************************************/
extern "C" void
magmablas_dgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dA_array[], magma_int_t ldda,
magmaDouble_ptr dx_array[], magma_int_t incx,
double beta,
magmaDouble_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( trans == MagmaNoTrans ) {
if (max(m, n) <= 96) { // small size
if (m < n) { // Fat or square matrix
if ( m <= 16)
{
gemvn_template_batched<double, version(N, 72)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 32)
{
gemvn_template_batched<double, version(N, 100)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 64)
{
gemvn_template_batched<double, version(N, 122)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // Tall or square matrix
if ( n <= 16)
{
gemvn_template_batched<double, version(N, 128)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( n <= 64)
{
gemvn_template_batched<double, version(N, 132)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
else { // big size
if (m < n) { // Fat matrix
if (m <= 8)
{
gemvn_template_batched<double, version(N, 79)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 16)
{
gemvn_template_batched<double, version(N, 70)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<double, version(N, 104)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<double, version(N, 124)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // (m > n) Tall matrix
if (m <= 256)
{
gemvn_template_batched<double, version(N, 137)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 140)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}// big size
}
else {
if (max(m, n) <= 96) { // small size
if (m <= 16)
{
gemvc_template_batched<double, version(T, 42)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // big size
if (m <= n) { // Fat or square matrix
if (m <= 64)
{
gemvc_template_batched<double, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 91)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // (m > n) Tall matrix
if (n <= 64)
{
gemvc_template_batched<double, version(T, 90)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 91)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
}
}
| b9bfb692496dd97d7751faf68b0ebdab59c6f3b8.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tingxing Dong
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_d
#include "gemv_template_kernel_batched.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/***************************************************************************//**
Purpose
-------
DGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of DIMENSION ( ldda, n ) on the GPU
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE PRECISION
On entry, ALPHA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv_batched
*******************************************************************************/
extern "C" void
magmablas_dgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dA_array[], magma_int_t ldda,
magmaDouble_ptr dx_array[], magma_int_t incx,
double beta,
magmaDouble_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( trans == MagmaNoTrans ) {
if (max(m, n) <= 96) { // small size
if (m < n) { // Fat or square matrix
if ( m <= 16)
{
gemvn_template_batched<double, version(N, 72)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 32)
{
gemvn_template_batched<double, version(N, 100)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 64)
{
gemvn_template_batched<double, version(N, 122)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // Tall or square matrix
if ( n <= 16)
{
gemvn_template_batched<double, version(N, 128)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( n <= 64)
{
gemvn_template_batched<double, version(N, 132)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
else { // big size
if (m < n) { // Fat matrix
if (m <= 8)
{
gemvn_template_batched<double, version(N, 79)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 16)
{
gemvn_template_batched<double, version(N, 70)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<double, version(N, 104)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<double, version(N, 124)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // (m > n) Tall matrix
if (m <= 256)
{
gemvn_template_batched<double, version(N, 137)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 140)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}// big size
}
else {
if (max(m, n) <= 96) { // small size
if (m <= 16)
{
gemvc_template_batched<double, version(T, 42)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // big size
if (m <= n) { // Fat or square matrix
if (m <= 64)
{
gemvc_template_batched<double, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 91)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // (m > n) Tall matrix
if (n <= 64)
{
gemvc_template_batched<double, version(T, 90)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 91)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
}
}
|
b23b8b0dce04309df8305d10296e29592b13ff8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH.h>
#include <THHGeneral.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_ChannelNorm_updateOutput(const int n, const float* input1, const long4 input1_size, const long4 input1_stride, float* output, const long4 output_size, const long4 output_stride, int norm_deg) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int i1dim_c = DIM1(input1_size);
int i1dim_h = DIM2(input1_size);
int i1dim_w = DIM3(input1_size);
int i1dim_chw = i1dim_c * i1dim_h * i1dim_w;
int i1dim_hw = i1dim_h * i1dim_w;
float result = 0.0;
for (int c = 0; c < i1dim_c; ++c) {
int i1Index = b * i1dim_chw + c * i1dim_hw + y * i1dim_w + x;
float val = input1[i1Index];
result += val * val;
}
result = sqrt(result);
output[index] = result;
}
__global__ void kernel_ChannelNorm_backward_input1(const int n, const float* input1, const long4 input1_size, const long4 input1_stride,
const float* output, const long4 output_size, const long4 output_stride, const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
float* gradInput, const long4 gradInput_size, const long4 gradInput_stride, int norm_deg) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
float val = 0.0;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int outIndex = b * dim_hw + y * dim_w + x;
val = gradOutput[outIndex] * input1[index] / (output[outIndex]+1e-9);
gradInput[index] = val;
}
void ChannelNorm_kernel_forward(THCState* state, THCudaTensor* input1, THCudaTensor* output, int norm_deg) {
int n = 0;
const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]);
const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]);
const long4 output_size = make_long4(output->size[0], output->size[1], output->size[2], output->size[3]);
const long4 output_stride = make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]);
n = THCudaTensor_nElement(state, output);
hipLaunchKernelGGL(( kernel_ChannelNorm_updateOutput), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, output), output_size, output_stride,
norm_deg);
THCudaCheck(hipGetLastError());
}
void ChannelNorm_kernel_backward(THCState* state, THCudaTensor* input1, THCudaTensor* output, THCudaTensor* gradOutput, THCudaTensor* gradInput1, int norm_deg) {
int n = 0;
const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]);
const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]);
const long4 output_size = make_long4(output->size[0], output->size[1], output->size[2], output->size[3]);
const long4 output_stride = make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]);
const long4 gradOutput_size = make_long4(gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]);
const long4 gradOutput_stride = make_long4(gradOutput->stride[0], gradOutput->stride[1], gradOutput->stride[2], gradOutput->stride[3]);
const long4 gradInput1_size = make_long4(gradInput1->size[0], gradInput1->size[1], gradInput1->size[2], gradInput1->size[3]);
const long4 gradInput1_stride = make_long4(gradInput1->stride[0], gradInput1->stride[1], gradInput1->stride[2], gradInput1->stride[3]);
n = THCudaTensor_nElement(state, gradInput1);
hipLaunchKernelGGL(( kernel_ChannelNorm_backward_input1), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, output), output_size, output_stride,
THCudaTensor_data(state, gradOutput), gradOutput_size, gradOutput_stride, THCudaTensor_data(state, gradInput1), gradInput1_size, gradInput1_stride,
norm_deg
);
THCudaCheck(hipGetLastError());
}
#ifdef __cplusplus
}
#endif | b23b8b0dce04309df8305d10296e29592b13ff8d.cu | #include <THC.h>
#include <THCGeneral.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_ChannelNorm_updateOutput(const int n, const float* input1, const long4 input1_size, const long4 input1_stride, float* output, const long4 output_size, const long4 output_stride, int norm_deg) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int i1dim_c = DIM1(input1_size);
int i1dim_h = DIM2(input1_size);
int i1dim_w = DIM3(input1_size);
int i1dim_chw = i1dim_c * i1dim_h * i1dim_w;
int i1dim_hw = i1dim_h * i1dim_w;
float result = 0.0;
for (int c = 0; c < i1dim_c; ++c) {
int i1Index = b * i1dim_chw + c * i1dim_hw + y * i1dim_w + x;
float val = input1[i1Index];
result += val * val;
}
result = sqrt(result);
output[index] = result;
}
__global__ void kernel_ChannelNorm_backward_input1(const int n, const float* input1, const long4 input1_size, const long4 input1_stride,
const float* output, const long4 output_size, const long4 output_stride, const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
float* gradInput, const long4 gradInput_size, const long4 gradInput_stride, int norm_deg) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
float val = 0.0;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int outIndex = b * dim_hw + y * dim_w + x;
val = gradOutput[outIndex] * input1[index] / (output[outIndex]+1e-9);
gradInput[index] = val;
}
void ChannelNorm_kernel_forward(THCState* state, THCudaTensor* input1, THCudaTensor* output, int norm_deg) {
int n = 0;
const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]);
const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]);
const long4 output_size = make_long4(output->size[0], output->size[1], output->size[2], output->size[3]);
const long4 output_stride = make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]);
n = THCudaTensor_nElement(state, output);
kernel_ChannelNorm_updateOutput<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, output), output_size, output_stride,
norm_deg);
THCudaCheck(cudaGetLastError());
}
void ChannelNorm_kernel_backward(THCState* state, THCudaTensor* input1, THCudaTensor* output, THCudaTensor* gradOutput, THCudaTensor* gradInput1, int norm_deg) {
int n = 0;
const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]);
const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]);
const long4 output_size = make_long4(output->size[0], output->size[1], output->size[2], output->size[3]);
const long4 output_stride = make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]);
const long4 gradOutput_size = make_long4(gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]);
const long4 gradOutput_stride = make_long4(gradOutput->stride[0], gradOutput->stride[1], gradOutput->stride[2], gradOutput->stride[3]);
const long4 gradInput1_size = make_long4(gradInput1->size[0], gradInput1->size[1], gradInput1->size[2], gradInput1->size[3]);
const long4 gradInput1_stride = make_long4(gradInput1->stride[0], gradInput1->stride[1], gradInput1->stride[2], gradInput1->stride[3]);
n = THCudaTensor_nElement(state, gradInput1);
kernel_ChannelNorm_backward_input1<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, output), output_size, output_stride,
THCudaTensor_data(state, gradOutput), gradOutput_size, gradOutput_stride, THCudaTensor_data(state, gradInput1), gradInput1_size, gradInput1_stride,
norm_deg
);
THCudaCheck(cudaGetLastError());
}
#ifdef __cplusplus
}
#endif |
79f26531777c72002fc1e3199a94db29136d01ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dataStructures/array.hpp"
#include "dataStructures/hd_data.hpp"
#include "dataStructures/helper/vector_helper.h"
#include "helper/apply_operation.h"
#include "helper/cuda/cuda_thread_manager.hpp"
#include "matrixOperations/basic_operations.hpp"
#include "sstream"
__device__ __host__ void call_error(AccessError error) {
switch (error) {
case AccessDeviceOnHost:
printf("Error, trying to access device array from the host");
case AccessHostOnDevice:
printf("Error, trying to access host array from the device");
}
}
template <typename C>
__host__ d_array<C>::d_array(int n, bool is_device)
: n(n), is_device(is_device) {
mem_alloc();
}
template <typename C>
__host__ d_array<C>::d_array(const d_array<C> &m, bool copyToOtherMem)
: d_array<C>(m.n, m.is_device ^ copyToOtherMem) {
hipMemcpyKind memCpy =
(m.is_device)
? (is_device) ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost
: (is_device) ? hipMemcpyHostToDevice : hipMemcpyHostToHost;
gpuErrchk(hipMemcpy(data, m.data, sizeof(C) * n, memCpy));
}
template <typename C>
__host__ d_array<C>::d_array(d_array<C> &&other) : d_array(0, other.is_device) {
*this = other;
}
template <typename C>
__host__ void d_array<C>::operator=(const d_array<C> &other) {
if (is_device != other.is_device)
if (is_device)
throw("You cannot move an array host array into a device array");
else
throw("You cannot move an array device array into a host array");
mem_free();
n = other.n;
n_dataholders = other.n_dataholders;
*n_dataholders += 1;
data = other.data;
if (is_device)
_device = other._device;
}
template <typename C> __host__ void d_array<C>::resize(int n) {
mem_free();
this->n = n;
mem_alloc();
}
template <typename C> __host__ d_array<C>::~d_array<C>() { mem_free(); }
template <typename C> __host__ void d_array<C>::mem_alloc() {
if (n > 0) {
n_dataholders = new int[1];
*n_dataholders = 1;
if (is_device) {
gpuErrchk(hipMalloc(&data, n * sizeof(T)));
gpuErrchk(hipMalloc(&_device, sizeof(d_array<C>)));
gpuErrchk(hipMemcpy(_device, this, sizeof(d_array<C>),
hipMemcpyHostToDevice));
} else {
data = new C[n];
}
}
}
template <typename C> __host__ void d_array<C>::mem_free() {
if (n > 0) {
*n_dataholders -= 1;
if (*n_dataholders == 0) {
if (is_device) {
gpuErrchk(hipFree(data));
gpuErrchk(hipFree(_device));
gpuErrchk(hipDeviceSynchronize());
} else {
delete[] data;
}
}
}
}
template <typename C>
__host__ __device__ void d_array<C>::print(int printCount) const {
#ifndef __CUDA_ARCH__
if (is_device) {
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( print_vectorK), dim3(1), dim3(1), 0, 0, *_device, printCount);
gpuErrchk(hipDeviceSynchronize());
} else
#else
if (!is_device)
call_error(AccessHostOnDevice);
else
#endif
print_vectorBody(*this, printCount);
}
template <typename C> __host__ __device__ C &d_array<C>::at(int i) {
#ifndef __CUDA_ARCH__
if (is_device)
call_error(AccessDeviceOnHost);
#else
if (!is_device)
call_error(AccessHostOnDevice);
#endif
return data[i];
}
template <typename C> __host__ __device__ int d_array<C>::size() { return n; }
template <typename C>
__host__ hipsparseDnVecDescr_t d_array<C>::make_descriptor() {
hipsparseDnVecDescr_t descr;
cusparseErrchk(hipsparseCreateDnVec(&descr, n, data, T_Cuda));
return descr;
}
template <typename C> __host__ void d_array<C>::fill(C value) {
auto setTo = [value] __device__(C & a) { a = value; };
apply_func(*this, setTo);
}
#define quote(x) #x
__host__ void d_vector::prune(T value) {
auto setTo = [value] __device__(T & a) {
if (a < value)
a = value;
};
apply_func(*this, setTo);
}
__host__ void d_vector::prune_under(T value) {
auto setTo = [value] __device__(T & a) {
if (a > value)
a = value;
};
apply_func(*this, setTo);
}
__host__ std::string d_vector::to_string() {
int printCount = 5;
std::stringstream strs;
strs << "[ ";
T *printBuffer = new T[printCount + 1];
hipMemcpy(printBuffer, data, sizeof(T) * printCount,
(is_device) ? hipMemcpyDeviceToHost : hipMemcpyHostToHost);
hipMemcpy(printBuffer + printCount, data + n - 1, sizeof(T),
(is_device) ? hipMemcpyDeviceToHost : hipMemcpyHostToHost);
for (int i = 0; i < (n - 1) && i < printCount; i++)
strs << printBuffer[i] << ", ";
if (printCount < n - 1)
strs << "... ";
strs << printBuffer[printCount] << "]";
delete[] printBuffer;
return strs.str();
}
| 79f26531777c72002fc1e3199a94db29136d01ca.cu | #include "dataStructures/array.hpp"
#include "dataStructures/hd_data.hpp"
#include "dataStructures/helper/vector_helper.h"
#include "helper/apply_operation.h"
#include "helper/cuda/cuda_thread_manager.hpp"
#include "matrixOperations/basic_operations.hpp"
#include "sstream"
__device__ __host__ void call_error(AccessError error) {
switch (error) {
case AccessDeviceOnHost:
printf("Error, trying to access device array from the host");
case AccessHostOnDevice:
printf("Error, trying to access host array from the device");
}
}
template <typename C>
__host__ d_array<C>::d_array(int n, bool is_device)
: n(n), is_device(is_device) {
mem_alloc();
}
template <typename C>
__host__ d_array<C>::d_array(const d_array<C> &m, bool copyToOtherMem)
: d_array<C>(m.n, m.is_device ^ copyToOtherMem) {
cudaMemcpyKind memCpy =
(m.is_device)
? (is_device) ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost
: (is_device) ? cudaMemcpyHostToDevice : cudaMemcpyHostToHost;
gpuErrchk(cudaMemcpy(data, m.data, sizeof(C) * n, memCpy));
}
template <typename C>
__host__ d_array<C>::d_array(d_array<C> &&other) : d_array(0, other.is_device) {
*this = other;
}
template <typename C>
__host__ void d_array<C>::operator=(const d_array<C> &other) {
if (is_device != other.is_device)
if (is_device)
throw("You cannot move an array host array into a device array");
else
throw("You cannot move an array device array into a host array");
mem_free();
n = other.n;
n_dataholders = other.n_dataholders;
*n_dataholders += 1;
data = other.data;
if (is_device)
_device = other._device;
}
template <typename C> __host__ void d_array<C>::resize(int n) {
mem_free();
this->n = n;
mem_alloc();
}
template <typename C> __host__ d_array<C>::~d_array<C>() { mem_free(); }
template <typename C> __host__ void d_array<C>::mem_alloc() {
if (n > 0) {
n_dataholders = new int[1];
*n_dataholders = 1;
if (is_device) {
gpuErrchk(cudaMalloc(&data, n * sizeof(T)));
gpuErrchk(cudaMalloc(&_device, sizeof(d_array<C>)));
gpuErrchk(cudaMemcpy(_device, this, sizeof(d_array<C>),
cudaMemcpyHostToDevice));
} else {
data = new C[n];
}
}
}
template <typename C> __host__ void d_array<C>::mem_free() {
if (n > 0) {
*n_dataholders -= 1;
if (*n_dataholders == 0) {
if (is_device) {
gpuErrchk(cudaFree(data));
gpuErrchk(cudaFree(_device));
gpuErrchk(cudaDeviceSynchronize());
} else {
delete[] data;
}
}
}
}
template <typename C>
__host__ __device__ void d_array<C>::print(int printCount) const {
#ifndef __CUDA_ARCH__
if (is_device) {
gpuErrchk(cudaDeviceSynchronize());
print_vectorK<<<1, 1>>>(*_device, printCount);
gpuErrchk(cudaDeviceSynchronize());
} else
#else
if (!is_device)
call_error(AccessHostOnDevice);
else
#endif
print_vectorBody(*this, printCount);
}
template <typename C> __host__ __device__ C &d_array<C>::at(int i) {
#ifndef __CUDA_ARCH__
if (is_device)
call_error(AccessDeviceOnHost);
#else
if (!is_device)
call_error(AccessHostOnDevice);
#endif
return data[i];
}
template <typename C> __host__ __device__ int d_array<C>::size() { return n; }
template <typename C>
__host__ cusparseDnVecDescr_t d_array<C>::make_descriptor() {
cusparseDnVecDescr_t descr;
cusparseErrchk(cusparseCreateDnVec(&descr, n, data, T_Cuda));
return descr;
}
template <typename C> __host__ void d_array<C>::fill(C value) {
auto setTo = [value] __device__(C & a) { a = value; };
apply_func(*this, setTo);
}
#define quote(x) #x
__host__ void d_vector::prune(T value) {
auto setTo = [value] __device__(T & a) {
if (a < value)
a = value;
};
apply_func(*this, setTo);
}
__host__ void d_vector::prune_under(T value) {
auto setTo = [value] __device__(T & a) {
if (a > value)
a = value;
};
apply_func(*this, setTo);
}
__host__ std::string d_vector::to_string() {
int printCount = 5;
std::stringstream strs;
strs << "[ ";
T *printBuffer = new T[printCount + 1];
cudaMemcpy(printBuffer, data, sizeof(T) * printCount,
(is_device) ? cudaMemcpyDeviceToHost : cudaMemcpyHostToHost);
cudaMemcpy(printBuffer + printCount, data + n - 1, sizeof(T),
(is_device) ? cudaMemcpyDeviceToHost : cudaMemcpyHostToHost);
for (int i = 0; i < (n - 1) && i < printCount; i++)
strs << printBuffer[i] << ", ";
if (printCount < n - 1)
strs << "... ";
strs << printBuffer[printCount] << "]";
delete[] printBuffer;
return strs.str();
}
|
4e9bc0fc9ebe37293c88d6a87266de34f5f1fe5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_mparticles.h"
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
// ----------------------------------------------------------------------
// cuda_mparticles_set_domain_info
void
cuda_mparticles_set_domain_info(struct cuda_mparticles *cuda_mprts,
const struct cuda_domain_info *info)
{
cuda_mprts->nr_patches = info->nr_patches;
for (int d = 0; d < 3; d++) {
cuda_mprts->mx[d] = info->mx[d];
cuda_mprts->b_mx[d] = info->mx[d] / info->bs[d];
cuda_mprts->dx[d] = info->dx[d];
cuda_mprts->b_dxi[d] = 1.f / (info->bs[d] * info->dx[d]);
}
cuda_mprts->nr_blocks_per_patch =
cuda_mprts->b_mx[0] * cuda_mprts->b_mx[1] * cuda_mprts->b_mx[2];
cuda_mprts->nr_blocks = info->nr_patches * cuda_mprts->nr_blocks_per_patch;
}
// ----------------------------------------------------------------------
// cuda_mparticles_alloc
void
cuda_mparticles_alloc(struct cuda_mparticles *cuda_mprts, unsigned int *nr_prts_by_patch)
{
hipError_t ierr;
cuda_mprts->nr_prts = 0;
for (int p = 0; p < cuda_mprts->nr_patches; p++) {
cuda_mprts->nr_prts += nr_prts_by_patch[p];
}
unsigned int nr_alloced = cuda_mprts->nr_prts * 1.4;
cuda_mprts->nr_alloced = nr_alloced;
ierr = hipMalloc((void **) &cuda_mprts->d_xi4, nr_alloced * sizeof(float4)); cudaCheck(ierr);
ierr = hipMalloc((void **) &cuda_mprts->d_pxi4, nr_alloced * sizeof(float4)); cudaCheck(ierr);
ierr = hipMalloc((void **) &cuda_mprts->d_alt_xi4, nr_alloced * sizeof(float4)); cudaCheck(ierr);
ierr = hipMalloc((void **) &cuda_mprts->d_alt_pxi4, nr_alloced * sizeof(float4)); cudaCheck(ierr);
ierr = hipMalloc((void **) &cuda_mprts->d_bidx, nr_alloced * sizeof(unsigned int)); cudaCheck(ierr);
ierr = hipMalloc((void **) &cuda_mprts->d_id, nr_alloced * sizeof(unsigned int)); cudaCheck(ierr);
ierr = hipMalloc((void **) &cuda_mprts->d_nr_prts_by_patch, cuda_mprts->nr_patches * sizeof(unsigned int)); cudaCheck(ierr);
ierr = hipMalloc((void **) &cuda_mprts->d_off, (cuda_mprts->nr_blocks + 1) * sizeof(unsigned int)); cudaCheck(ierr);
}
// ----------------------------------------------------------------------
// cuda_mparticles_free
void
cuda_mparticles_free(struct cuda_mparticles *cuda_mprts)
{
hipError_t ierr;
ierr = hipFree(cuda_mprts->d_xi4); cudaCheck(ierr);
ierr = hipFree(cuda_mprts->d_pxi4); cudaCheck(ierr);
ierr = hipFree(cuda_mprts->d_alt_xi4); cudaCheck(ierr);
ierr = hipFree(cuda_mprts->d_alt_pxi4); cudaCheck(ierr);
ierr = hipFree(cuda_mprts->d_bidx); cudaCheck(ierr);
ierr = hipFree(cuda_mprts->d_id); cudaCheck(ierr);
ierr = hipFree(cuda_mprts->d_nr_prts_by_patch); cudaCheck(ierr);
ierr = hipFree(cuda_mprts->d_off); cudaCheck(ierr);
}
// ----------------------------------------------------------------------
// cuda_mparticles_dump
void
cuda_mparticles_dump(struct cuda_mparticles *cuda_mprts)
{
int nr_prts = cuda_mprts->nr_prts;
thrust::device_ptr<float4> d_xi4(cuda_mprts->d_xi4);
thrust::device_ptr<float4> d_pxi4(cuda_mprts->d_pxi4);
thrust::device_ptr<unsigned int> d_bidx(cuda_mprts->d_bidx);
thrust::device_ptr<unsigned int> d_id(cuda_mprts->d_id);
thrust::device_ptr<unsigned int> d_off(cuda_mprts->d_off);
printf("cuda_mparticles_dump: nr_prts = %d\n", nr_prts);
for (int n = 0; n < nr_prts; n++) {
float4 xi4 = d_xi4[n], pxi4 = d_pxi4[n];
unsigned int bidx = d_bidx[n], id = d_id[n];
printf("cuda_mparticles_dump: [%d] %g %g %g // %g // %g %g %g // %g || bidx %d id %d\n",
n, xi4.x, xi4.y, xi4.z, xi4.w, pxi4.x, pxi4.y, pxi4.z, pxi4.w,
bidx, id);
}
for (int b = 0; b <= cuda_mprts->nr_blocks; b++) {
unsigned int off = d_off[b];
printf("cuda_mparticles_dump: off[%d] = %d\n", b, off);
}
}
// ----------------------------------------------------------------------
// cuda_mparticles_swap_alt
void
cuda_mparticles_swap_alt(struct cuda_mparticles *cuda_mprts)
{
float4 *tmp_xi4 = cuda_mprts->d_alt_xi4;
float4 *tmp_pxi4 = cuda_mprts->d_alt_pxi4;
cuda_mprts->d_alt_xi4 = cuda_mprts->d_xi4;
cuda_mprts->d_alt_pxi4 = cuda_mprts->d_pxi4;
cuda_mprts->d_xi4 = tmp_xi4;
cuda_mprts->d_pxi4 = tmp_pxi4;
}
// ----------------------------------------------------------------------
// cuda_params
struct cuda_params {
unsigned int b_mx[3];
float b_dxi[3];
};
static void
cuda_params_set(struct cuda_params *prm, const struct cuda_mparticles *cuda_mprts)
{
for (int d = 0; d < 3; d++) {
prm->b_mx[d] = cuda_mprts->b_mx[d];
prm->b_dxi[d] = cuda_mprts->b_dxi[d];
}
}
static void
cuda_params_free(struct cuda_params *prm)
{
}
// ----------------------------------------------------------------------
// cuda_mparticles_find_block_indices_ids_total
#define THREADS_PER_BLOCK 512
__global__ static void
mprts_find_block_indices_ids_total(struct cuda_params prm, float4 *d_xi4,
unsigned int *d_nr_prts_by_patch,
unsigned int *d_bidx, unsigned int *d_id,
int nr_patches)
{
int n = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
int nr_blocks = prm.b_mx[1] * prm.b_mx[2];
unsigned int off = 0;
for (int p = 0; p < nr_patches; p++) {
if (n < d_nr_prts_by_patch[p]) {
float4 xi4 = d_xi4[n + off];
unsigned int block_pos_y = __float2int_rd(xi4.y * prm.b_dxi[1]);
unsigned int block_pos_z = __float2int_rd(xi4.z * prm.b_dxi[2]);
int block_idx;
if (block_pos_y >= prm.b_mx[1] || block_pos_z >= prm.b_mx[2]) {
block_idx = -1; // not supposed to happen here!
} else {
block_idx = block_pos_z * prm.b_mx[1] + block_pos_y + p * nr_blocks;
}
d_bidx[n + off] = block_idx;
d_id[n + off] = n + off;
}
off += d_nr_prts_by_patch[p];
}
}
void
cuda_mparticles_find_block_indices_ids_total(struct cuda_mparticles *cuda_mprts,
unsigned int *nr_prts_by_patch)
{
hipError_t ierr;
if (cuda_mprts->nr_patches == 0) {
return;
}
int max_nr_prts = 0;
for (int p = 0; p < cuda_mprts->nr_patches; p++) {
if (nr_prts_by_patch[p] > max_nr_prts) {
max_nr_prts = nr_prts_by_patch[p];
}
}
ierr = hipMemcpy(cuda_mprts->d_nr_prts_by_patch, nr_prts_by_patch,
cuda_mprts->nr_patches * sizeof(unsigned int),
hipMemcpyHostToDevice); cudaCheck(ierr);
struct cuda_params prm;
cuda_params_set(&prm, cuda_mprts);
dim3 dimGrid((max_nr_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( mprts_find_block_indices_ids_total), dim3(dimGrid), dim3(dimBlock), 0, 0, prm, cuda_mprts->d_xi4,
cuda_mprts->d_nr_prts_by_patch,
cuda_mprts->d_bidx,
cuda_mprts->d_id,
cuda_mprts->nr_patches);
cuda_sync_if_enabled();
cuda_params_free(&prm);
}
// ----------------------------------------------------------------------
// cuda_mparticles_reorder_and_offsets
__global__ static void
mprts_reorder_and_offsets(int nr_prts, float4 *xi4, float4 *pxi4, float4 *alt_xi4, float4 *alt_pxi4,
unsigned int *d_bidx, unsigned int *d_ids, unsigned int *d_off, int last_block)
{
int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i > nr_prts)
return;
int block, prev_block;
if (i < nr_prts) {
alt_xi4[i] = xi4[d_ids[i]];
alt_pxi4[i] = pxi4[d_ids[i]];
block = d_bidx[i];
} else { // needed if there is no particle in the last block
block = last_block;
}
// OPT: d_bidx[i-1] could use shmem
// create offsets per block into particle array
prev_block = -1;
if (i > 0) {
prev_block = d_bidx[i-1];
}
for (int b = prev_block + 1; b <= block; b++) {
d_off[b] = i;
}
}
void
cuda_mparticles_reorder_and_offsets(struct cuda_mparticles *cuda_mprts)
{
if (cuda_mprts->nr_patches == 0) {
return;
}
dim3 dimGrid((cuda_mprts->nr_prts + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( mprts_reorder_and_offsets), dim3(dimGrid), dim3(dimBlock), 0, 0, cuda_mprts->nr_prts, cuda_mprts->d_xi4, cuda_mprts->d_pxi4,
cuda_mprts->d_alt_xi4, cuda_mprts->d_alt_pxi4,
cuda_mprts->d_bidx, cuda_mprts->d_id,
cuda_mprts->d_off, cuda_mprts->nr_blocks);
cuda_sync_if_enabled();
cuda_mparticles_swap_alt(cuda_mprts);
}
| 4e9bc0fc9ebe37293c88d6a87266de34f5f1fe5e.cu |
#include "cuda_mparticles.h"
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
// ----------------------------------------------------------------------
// cuda_mparticles_set_domain_info
void
cuda_mparticles_set_domain_info(struct cuda_mparticles *cuda_mprts,
const struct cuda_domain_info *info)
{
cuda_mprts->nr_patches = info->nr_patches;
for (int d = 0; d < 3; d++) {
cuda_mprts->mx[d] = info->mx[d];
cuda_mprts->b_mx[d] = info->mx[d] / info->bs[d];
cuda_mprts->dx[d] = info->dx[d];
cuda_mprts->b_dxi[d] = 1.f / (info->bs[d] * info->dx[d]);
}
cuda_mprts->nr_blocks_per_patch =
cuda_mprts->b_mx[0] * cuda_mprts->b_mx[1] * cuda_mprts->b_mx[2];
cuda_mprts->nr_blocks = info->nr_patches * cuda_mprts->nr_blocks_per_patch;
}
// ----------------------------------------------------------------------
// cuda_mparticles_alloc
void
cuda_mparticles_alloc(struct cuda_mparticles *cuda_mprts, unsigned int *nr_prts_by_patch)
{
cudaError_t ierr;
cuda_mprts->nr_prts = 0;
for (int p = 0; p < cuda_mprts->nr_patches; p++) {
cuda_mprts->nr_prts += nr_prts_by_patch[p];
}
unsigned int nr_alloced = cuda_mprts->nr_prts * 1.4;
cuda_mprts->nr_alloced = nr_alloced;
ierr = cudaMalloc((void **) &cuda_mprts->d_xi4, nr_alloced * sizeof(float4)); cudaCheck(ierr);
ierr = cudaMalloc((void **) &cuda_mprts->d_pxi4, nr_alloced * sizeof(float4)); cudaCheck(ierr);
ierr = cudaMalloc((void **) &cuda_mprts->d_alt_xi4, nr_alloced * sizeof(float4)); cudaCheck(ierr);
ierr = cudaMalloc((void **) &cuda_mprts->d_alt_pxi4, nr_alloced * sizeof(float4)); cudaCheck(ierr);
ierr = cudaMalloc((void **) &cuda_mprts->d_bidx, nr_alloced * sizeof(unsigned int)); cudaCheck(ierr);
ierr = cudaMalloc((void **) &cuda_mprts->d_id, nr_alloced * sizeof(unsigned int)); cudaCheck(ierr);
ierr = cudaMalloc((void **) &cuda_mprts->d_nr_prts_by_patch, cuda_mprts->nr_patches * sizeof(unsigned int)); cudaCheck(ierr);
ierr = cudaMalloc((void **) &cuda_mprts->d_off, (cuda_mprts->nr_blocks + 1) * sizeof(unsigned int)); cudaCheck(ierr);
}
// ----------------------------------------------------------------------
// cuda_mparticles_free
void
cuda_mparticles_free(struct cuda_mparticles *cuda_mprts)
{
cudaError_t ierr;
ierr = cudaFree(cuda_mprts->d_xi4); cudaCheck(ierr);
ierr = cudaFree(cuda_mprts->d_pxi4); cudaCheck(ierr);
ierr = cudaFree(cuda_mprts->d_alt_xi4); cudaCheck(ierr);
ierr = cudaFree(cuda_mprts->d_alt_pxi4); cudaCheck(ierr);
ierr = cudaFree(cuda_mprts->d_bidx); cudaCheck(ierr);
ierr = cudaFree(cuda_mprts->d_id); cudaCheck(ierr);
ierr = cudaFree(cuda_mprts->d_nr_prts_by_patch); cudaCheck(ierr);
ierr = cudaFree(cuda_mprts->d_off); cudaCheck(ierr);
}
// ----------------------------------------------------------------------
// cuda_mparticles_dump
void
cuda_mparticles_dump(struct cuda_mparticles *cuda_mprts)
{
int nr_prts = cuda_mprts->nr_prts;
thrust::device_ptr<float4> d_xi4(cuda_mprts->d_xi4);
thrust::device_ptr<float4> d_pxi4(cuda_mprts->d_pxi4);
thrust::device_ptr<unsigned int> d_bidx(cuda_mprts->d_bidx);
thrust::device_ptr<unsigned int> d_id(cuda_mprts->d_id);
thrust::device_ptr<unsigned int> d_off(cuda_mprts->d_off);
printf("cuda_mparticles_dump: nr_prts = %d\n", nr_prts);
for (int n = 0; n < nr_prts; n++) {
float4 xi4 = d_xi4[n], pxi4 = d_pxi4[n];
unsigned int bidx = d_bidx[n], id = d_id[n];
printf("cuda_mparticles_dump: [%d] %g %g %g // %g // %g %g %g // %g || bidx %d id %d\n",
n, xi4.x, xi4.y, xi4.z, xi4.w, pxi4.x, pxi4.y, pxi4.z, pxi4.w,
bidx, id);
}
for (int b = 0; b <= cuda_mprts->nr_blocks; b++) {
unsigned int off = d_off[b];
printf("cuda_mparticles_dump: off[%d] = %d\n", b, off);
}
}
// ----------------------------------------------------------------------
// cuda_mparticles_swap_alt
void
cuda_mparticles_swap_alt(struct cuda_mparticles *cuda_mprts)
{
float4 *tmp_xi4 = cuda_mprts->d_alt_xi4;
float4 *tmp_pxi4 = cuda_mprts->d_alt_pxi4;
cuda_mprts->d_alt_xi4 = cuda_mprts->d_xi4;
cuda_mprts->d_alt_pxi4 = cuda_mprts->d_pxi4;
cuda_mprts->d_xi4 = tmp_xi4;
cuda_mprts->d_pxi4 = tmp_pxi4;
}
// ----------------------------------------------------------------------
// cuda_params
struct cuda_params {
unsigned int b_mx[3];
float b_dxi[3];
};
static void
cuda_params_set(struct cuda_params *prm, const struct cuda_mparticles *cuda_mprts)
{
for (int d = 0; d < 3; d++) {
prm->b_mx[d] = cuda_mprts->b_mx[d];
prm->b_dxi[d] = cuda_mprts->b_dxi[d];
}
}
static void
cuda_params_free(struct cuda_params *prm)
{
}
// ----------------------------------------------------------------------
// cuda_mparticles_find_block_indices_ids_total
#define THREADS_PER_BLOCK 512
__global__ static void
mprts_find_block_indices_ids_total(struct cuda_params prm, float4 *d_xi4,
unsigned int *d_nr_prts_by_patch,
unsigned int *d_bidx, unsigned int *d_id,
int nr_patches)
{
int n = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
int nr_blocks = prm.b_mx[1] * prm.b_mx[2];
unsigned int off = 0;
for (int p = 0; p < nr_patches; p++) {
if (n < d_nr_prts_by_patch[p]) {
float4 xi4 = d_xi4[n + off];
unsigned int block_pos_y = __float2int_rd(xi4.y * prm.b_dxi[1]);
unsigned int block_pos_z = __float2int_rd(xi4.z * prm.b_dxi[2]);
int block_idx;
if (block_pos_y >= prm.b_mx[1] || block_pos_z >= prm.b_mx[2]) {
block_idx = -1; // not supposed to happen here!
} else {
block_idx = block_pos_z * prm.b_mx[1] + block_pos_y + p * nr_blocks;
}
d_bidx[n + off] = block_idx;
d_id[n + off] = n + off;
}
off += d_nr_prts_by_patch[p];
}
}
void
cuda_mparticles_find_block_indices_ids_total(struct cuda_mparticles *cuda_mprts,
unsigned int *nr_prts_by_patch)
{
cudaError_t ierr;
if (cuda_mprts->nr_patches == 0) {
return;
}
int max_nr_prts = 0;
for (int p = 0; p < cuda_mprts->nr_patches; p++) {
if (nr_prts_by_patch[p] > max_nr_prts) {
max_nr_prts = nr_prts_by_patch[p];
}
}
ierr = cudaMemcpy(cuda_mprts->d_nr_prts_by_patch, nr_prts_by_patch,
cuda_mprts->nr_patches * sizeof(unsigned int),
cudaMemcpyHostToDevice); cudaCheck(ierr);
struct cuda_params prm;
cuda_params_set(&prm, cuda_mprts);
dim3 dimGrid((max_nr_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
mprts_find_block_indices_ids_total<<<dimGrid, dimBlock>>>(prm, cuda_mprts->d_xi4,
cuda_mprts->d_nr_prts_by_patch,
cuda_mprts->d_bidx,
cuda_mprts->d_id,
cuda_mprts->nr_patches);
cuda_sync_if_enabled();
cuda_params_free(&prm);
}
// ----------------------------------------------------------------------
// cuda_mparticles_reorder_and_offsets
__global__ static void
mprts_reorder_and_offsets(int nr_prts, float4 *xi4, float4 *pxi4, float4 *alt_xi4, float4 *alt_pxi4,
unsigned int *d_bidx, unsigned int *d_ids, unsigned int *d_off, int last_block)
{
int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i > nr_prts)
return;
int block, prev_block;
if (i < nr_prts) {
alt_xi4[i] = xi4[d_ids[i]];
alt_pxi4[i] = pxi4[d_ids[i]];
block = d_bidx[i];
} else { // needed if there is no particle in the last block
block = last_block;
}
// OPT: d_bidx[i-1] could use shmem
// create offsets per block into particle array
prev_block = -1;
if (i > 0) {
prev_block = d_bidx[i-1];
}
for (int b = prev_block + 1; b <= block; b++) {
d_off[b] = i;
}
}
void
cuda_mparticles_reorder_and_offsets(struct cuda_mparticles *cuda_mprts)
{
if (cuda_mprts->nr_patches == 0) {
return;
}
dim3 dimGrid((cuda_mprts->nr_prts + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK);
mprts_reorder_and_offsets<<<dimGrid, dimBlock>>>(cuda_mprts->nr_prts, cuda_mprts->d_xi4, cuda_mprts->d_pxi4,
cuda_mprts->d_alt_xi4, cuda_mprts->d_alt_pxi4,
cuda_mprts->d_bidx, cuda_mprts->d_id,
cuda_mprts->d_off, cuda_mprts->nr_blocks);
cuda_sync_if_enabled();
cuda_mparticles_swap_alt(cuda_mprts);
}
|
8622d597c39b46598542eb8766b6acc6737739e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <hip/hip_vector_types.h>
#include <optix_device.h>
#include "optixWhitted.h"
#include "helpers.h"
extern "C" {
__constant__ Params params;
}
static __device__ __inline__ RadiancePRD getRadiancePRD()
{
RadiancePRD prd;
prd.result.x = int_as_float( optixGetPayload_0() );
prd.result.y = int_as_float( optixGetPayload_1() );
prd.result.z = int_as_float( optixGetPayload_2() );
prd.importance = int_as_float( optixGetPayload_3() );
prd.depth = optixGetPayload_4();
return prd;
}
static __device__ __inline__ void setRadiancePRD( const RadiancePRD &prd )
{
optixSetPayload_0( float_as_int(prd.result.x) );
optixSetPayload_1( float_as_int(prd.result.y) );
optixSetPayload_2( float_as_int(prd.result.z) );
optixSetPayload_3( float_as_int(prd.importance) );
optixSetPayload_4( prd.depth );
}
static __device__ __inline__ OcclusionPRD getOcclusionPRD()
{
OcclusionPRD prd;
prd.attenuation.x = int_as_float( optixGetPayload_0() );
prd.attenuation.y = int_as_float( optixGetPayload_1() );
prd.attenuation.z = int_as_float( optixGetPayload_2() );
return prd;
}
static __device__ __inline__ void setOcclusionPRD( const OcclusionPRD &prd )
{
optixSetPayload_0( float_as_int(prd.attenuation.x) );
optixSetPayload_1( float_as_int(prd.attenuation.y) );
optixSetPayload_2( float_as_int(prd.attenuation.z) );
}
static __device__ __inline__ float3
traceRadianceRay(
float3 origin,
float3 direction,
int depth,
float importance)
{
RadiancePRD prd;
prd.depth = depth;
prd.importance = importance;
optixTrace(
params.handle,
origin,
direction,
params.scene_epsilon,
1e16f,
0.0f,
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE,
RAY_TYPE_COUNT,
RAY_TYPE_RADIANCE,
float3_as_args(prd.result),
/* Can't use float_as_int() because it returns rvalue but payload requires a lvalue */
reinterpret_cast<unsigned int&>(prd.importance),
reinterpret_cast<unsigned int&>(prd.depth) );
return prd.result;
}
static
__device__ void phongShadowed()
{
// this material is opaque, so it fully attenuates all shadow rays
OcclusionPRD prd;
prd.attenuation = make_float3(0.f);
setOcclusionPRD(prd);
}
static
__device__ void phongShade( float3 p_Kd,
float3 p_Ka,
float3 p_Ks,
float3 p_Kr,
float p_phong_exp,
float3 p_normal )
{
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
RadiancePRD prd = getRadiancePRD();
float3 hit_point = ray_orig + ray_t * ray_dir;
// ambient contribution
float3 result = p_Ka * params.ambient_light_color;
// compute direct lighting
BasicLight light = params.light;
float Ldist = length(light.pos - hit_point);
float3 L = normalize(light.pos - hit_point);
float nDl = dot( p_normal, L);
// cast shadow ray
float3 light_attenuation = make_float3(static_cast<float>( nDl > 0.0f ));
if ( nDl > 0.0f )
{
OcclusionPRD shadow_prd;
shadow_prd.attenuation = make_float3(1.0f);
optixTrace(
params.handle,
hit_point,
L,
0.01f,
Ldist,
0.0f,
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(shadow_prd.attenuation) );
light_attenuation = shadow_prd.attenuation;
}
// If not completely shadowed, light the hit point
if( fmaxf(light_attenuation) > 0.0f )
{
float3 Lc = light.color * light_attenuation;
result += p_Kd * nDl * Lc;
float3 H = normalize(L - ray_dir);
float nDh = dot( p_normal, H );
if(nDh > 0)
{
float power = pow(nDh, p_phong_exp);
result += p_Ks * power * Lc;
}
}
if( fmaxf( p_Kr ) > 0 )
{
// ray tree attenuation
float new_importance = prd.importance * luminance( p_Kr );
int new_depth = prd.depth + 1;
// reflection ray
// compare new_depth to max_depth - 1 to leave room for a potential shadow ray trace
if( new_importance >= 0.01f && new_depth <= params.max_depth - 1)
{
float3 R = reflect( ray_dir, p_normal );
result += p_Kr * traceRadianceRay(
hit_point,
R,
new_depth,
new_importance);
}
}
// pass the color back
prd.result = result;
setRadiancePRD(prd);
}
extern "C" __global__ void __closesthit__checker_radiance()
{
const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer();
const CheckerPhong &checker = sbt_data->shading.checker;
float3 Kd, Ka, Ks, Kr;
float phong_exp;
float2 texcoord = make_float2(
int_as_float( optixGetAttribute_3() ),
int_as_float( optixGetAttribute_4() ) );
float2 t = texcoord * checker.inv_checker_size;
t.x = floorf(t.x);
t.y = floorf(t.y);
int which_check = ( static_cast<int>( t.x ) +
static_cast<int>( t.y ) ) & 1;
if ( which_check )
{
Kd = checker.Kd1;
Ka = checker.Ka1;
Ks = checker.Ks1;
Kr = checker.Kr1;
phong_exp = checker.phong_exp1;
} else
{
Kd = checker.Kd2;
Ka = checker.Ka2;
Ks = checker.Ks2;
Kr = checker.Kr2;
phong_exp = checker.phong_exp2;
}
float3 object_normal = make_float3(
int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace(object_normal) );
float3 ffnormal = faceforward( world_normal, -optixGetWorldRayDirection(), world_normal );
phongShade( Kd, Ka, Ks, Kr, phong_exp, ffnormal );
}
extern "C" __global__ void __closesthit__metal_radiance()
{
const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer();
const Phong &phong = sbt_data->shading.metal;
float3 object_normal = make_float3(
int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) );
float3 ffnormal = faceforward( world_normal, -optixGetWorldRayDirection(), world_normal );
phongShade( phong.Kd, phong.Ka, phong.Ks, phong.Kr, phong.phong_exp, ffnormal );
}
extern "C" __global__ void __closesthit__full_occlusion()
{
phongShadowed();
}
extern "C" __global__ void __closesthit__glass_radiance()
{
const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer();
const Glass &glass = sbt_data->shading.glass;
RadiancePRD prd_radiance = getRadiancePRD();
float3 object_normal = make_float3(
int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
object_normal = normalize( object_normal );
// intersection vectors
const float3 n = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal) ); // normal
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection(); // incident direction
const float ray_t = optixGetRayTmax();
float3 t; // transmission direction
float3 r; // reflection direction
float3 hit_point = ray_orig + ray_t * ray_dir;
SphereShellHitType hit_type = (SphereShellHitType) optixGetHitKind();
float3 front_hit_point = hit_point, back_hit_point = hit_point;
if (hit_type & HIT_OUTSIDE_FROM_OUTSIDE || hit_type & HIT_INSIDE_FROM_INSIDE)
{
front_hit_point += params.scene_epsilon * object_normal;
back_hit_point -= params.scene_epsilon * object_normal;
}
else
{
front_hit_point -= params.scene_epsilon * object_normal;
back_hit_point += params.scene_epsilon * object_normal;
}
const float3 fhp = optixTransformPointFromObjectToWorldSpace( front_hit_point );
const float3 bhp = optixTransformPointFromObjectToWorldSpace( back_hit_point );
float reflection = 1.0f;
float3 result = make_float3(0.0f);
const int depth = prd_radiance.depth;
float3 beer_attenuation;
if(dot(n, ray_dir) > 0)
{
// Beer's law attenuation
beer_attenuation = exp(glass.extinction_constant * ray_t);
} else
{
beer_attenuation = make_float3(1);
}
// refraction
// compare depth to max_depth - 1 to leave room for a potential shadow ray trace
if (depth < min(glass.refraction_maxdepth, params.max_depth - 1))
{
if ( refract(t, ray_dir, n, glass.refraction_index) )
{
// check for external or internal reflection
float cos_theta = dot(ray_dir, n);
if (cos_theta < 0.0f)
cos_theta = -cos_theta;
else
cos_theta = dot(t, n);
reflection = fresnel_schlick(
cos_theta,
glass.fresnel_exponent,
glass.fresnel_minimum,
glass.fresnel_maximum);
float importance =
prd_radiance.importance
* (1.0f-reflection)
* luminance( glass.refraction_color * beer_attenuation );
float3 color = glass.cutoff_color;
if ( importance > glass.importance_cutoff )
{
color = traceRadianceRay(bhp, t, depth+1, importance);
}
result += (1.0f - reflection) * glass.refraction_color * color;
}
// else TIR
} // else reflection==1 so refraction has 0 weight
// reflection
// compare depth to max_depth - 1 to leave room for a potential shadow ray trace
float3 color = glass.cutoff_color;
if (depth < min(glass.reflection_maxdepth, params.max_depth - 1))
{
r = reflect(ray_dir, n);
float importance =
prd_radiance.importance
* reflection
* luminance( glass.reflection_color * beer_attenuation );
if ( importance > glass.importance_cutoff )
{
color = traceRadianceRay( fhp, r, depth+1, importance );
}
}
result += reflection * glass.reflection_color * color;
result = result * beer_attenuation;
prd_radiance.result = result;
setRadiancePRD(prd_radiance);
}
extern "C" __global__ void __anyhit__glass_occlusion()
{
const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer();
const Glass &glass = sbt_data->shading.glass;
float3 object_normal = make_float3(
int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
OcclusionPRD shadow_prd = getOcclusionPRD();
float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) );
float nDi = fabs(dot(world_normal, optixGetWorldRayDirection()));
shadow_prd.attenuation *= 1-fresnel_schlick(nDi, 5, 1-glass.shadow_attenuation, make_float3(1));
setOcclusionPRD(shadow_prd);
// Test the attenuation of the light from the glass shell
if(luminance(shadow_prd.attenuation) < glass.importance_cutoff)
// The attenuation is so high, > 99% blocked, that we can consider testing to be done.
optixTerminateRay();
else
// There is still some light coming through the glass shell that we should test other occluders.
// We "ignore" the intersection with the glass shell, meaning that shadow testing will continue.
// If the ray does not hit another occluder, the light's attenuation from this glass shell
// (along with other glass shells) is then used.
optixIgnoreIntersection();
}
extern "C" __global__ void __miss__constant_bg()
{
const MissData* sbt_data = (MissData*) optixGetSbtDataPointer();
RadiancePRD prd = getRadiancePRD();
prd.result = sbt_data->bg_color;
setRadiancePRD(prd);
}
| 8622d597c39b46598542eb8766b6acc6737739e1.cu | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <vector_types.h>
#include <optix_device.h>
#include "optixWhitted.h"
#include "helpers.h"
extern "C" {
__constant__ Params params;
}
static __device__ __inline__ RadiancePRD getRadiancePRD()
{
RadiancePRD prd;
prd.result.x = int_as_float( optixGetPayload_0() );
prd.result.y = int_as_float( optixGetPayload_1() );
prd.result.z = int_as_float( optixGetPayload_2() );
prd.importance = int_as_float( optixGetPayload_3() );
prd.depth = optixGetPayload_4();
return prd;
}
static __device__ __inline__ void setRadiancePRD( const RadiancePRD &prd )
{
optixSetPayload_0( float_as_int(prd.result.x) );
optixSetPayload_1( float_as_int(prd.result.y) );
optixSetPayload_2( float_as_int(prd.result.z) );
optixSetPayload_3( float_as_int(prd.importance) );
optixSetPayload_4( prd.depth );
}
static __device__ __inline__ OcclusionPRD getOcclusionPRD()
{
OcclusionPRD prd;
prd.attenuation.x = int_as_float( optixGetPayload_0() );
prd.attenuation.y = int_as_float( optixGetPayload_1() );
prd.attenuation.z = int_as_float( optixGetPayload_2() );
return prd;
}
static __device__ __inline__ void setOcclusionPRD( const OcclusionPRD &prd )
{
optixSetPayload_0( float_as_int(prd.attenuation.x) );
optixSetPayload_1( float_as_int(prd.attenuation.y) );
optixSetPayload_2( float_as_int(prd.attenuation.z) );
}
static __device__ __inline__ float3
traceRadianceRay(
float3 origin,
float3 direction,
int depth,
float importance)
{
RadiancePRD prd;
prd.depth = depth;
prd.importance = importance;
optixTrace(
params.handle,
origin,
direction,
params.scene_epsilon,
1e16f,
0.0f,
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE,
RAY_TYPE_COUNT,
RAY_TYPE_RADIANCE,
float3_as_args(prd.result),
/* Can't use float_as_int() because it returns rvalue but payload requires a lvalue */
reinterpret_cast<unsigned int&>(prd.importance),
reinterpret_cast<unsigned int&>(prd.depth) );
return prd.result;
}
static
__device__ void phongShadowed()
{
// this material is opaque, so it fully attenuates all shadow rays
OcclusionPRD prd;
prd.attenuation = make_float3(0.f);
setOcclusionPRD(prd);
}
static
__device__ void phongShade( float3 p_Kd,
float3 p_Ka,
float3 p_Ks,
float3 p_Kr,
float p_phong_exp,
float3 p_normal )
{
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
RadiancePRD prd = getRadiancePRD();
float3 hit_point = ray_orig + ray_t * ray_dir;
// ambient contribution
float3 result = p_Ka * params.ambient_light_color;
// compute direct lighting
BasicLight light = params.light;
float Ldist = length(light.pos - hit_point);
float3 L = normalize(light.pos - hit_point);
float nDl = dot( p_normal, L);
// cast shadow ray
float3 light_attenuation = make_float3(static_cast<float>( nDl > 0.0f ));
if ( nDl > 0.0f )
{
OcclusionPRD shadow_prd;
shadow_prd.attenuation = make_float3(1.0f);
optixTrace(
params.handle,
hit_point,
L,
0.01f,
Ldist,
0.0f,
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(shadow_prd.attenuation) );
light_attenuation = shadow_prd.attenuation;
}
// If not completely shadowed, light the hit point
if( fmaxf(light_attenuation) > 0.0f )
{
float3 Lc = light.color * light_attenuation;
result += p_Kd * nDl * Lc;
float3 H = normalize(L - ray_dir);
float nDh = dot( p_normal, H );
if(nDh > 0)
{
float power = pow(nDh, p_phong_exp);
result += p_Ks * power * Lc;
}
}
if( fmaxf( p_Kr ) > 0 )
{
// ray tree attenuation
float new_importance = prd.importance * luminance( p_Kr );
int new_depth = prd.depth + 1;
// reflection ray
// compare new_depth to max_depth - 1 to leave room for a potential shadow ray trace
if( new_importance >= 0.01f && new_depth <= params.max_depth - 1)
{
float3 R = reflect( ray_dir, p_normal );
result += p_Kr * traceRadianceRay(
hit_point,
R,
new_depth,
new_importance);
}
}
// pass the color back
prd.result = result;
setRadiancePRD(prd);
}
extern "C" __global__ void __closesthit__checker_radiance()
{
const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer();
const CheckerPhong &checker = sbt_data->shading.checker;
float3 Kd, Ka, Ks, Kr;
float phong_exp;
float2 texcoord = make_float2(
int_as_float( optixGetAttribute_3() ),
int_as_float( optixGetAttribute_4() ) );
float2 t = texcoord * checker.inv_checker_size;
t.x = floorf(t.x);
t.y = floorf(t.y);
int which_check = ( static_cast<int>( t.x ) +
static_cast<int>( t.y ) ) & 1;
if ( which_check )
{
Kd = checker.Kd1;
Ka = checker.Ka1;
Ks = checker.Ks1;
Kr = checker.Kr1;
phong_exp = checker.phong_exp1;
} else
{
Kd = checker.Kd2;
Ka = checker.Ka2;
Ks = checker.Ks2;
Kr = checker.Kr2;
phong_exp = checker.phong_exp2;
}
float3 object_normal = make_float3(
int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace(object_normal) );
float3 ffnormal = faceforward( world_normal, -optixGetWorldRayDirection(), world_normal );
phongShade( Kd, Ka, Ks, Kr, phong_exp, ffnormal );
}
extern "C" __global__ void __closesthit__metal_radiance()
{
const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer();
const Phong &phong = sbt_data->shading.metal;
float3 object_normal = make_float3(
int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) );
float3 ffnormal = faceforward( world_normal, -optixGetWorldRayDirection(), world_normal );
phongShade( phong.Kd, phong.Ka, phong.Ks, phong.Kr, phong.phong_exp, ffnormal );
}
extern "C" __global__ void __closesthit__full_occlusion()
{
phongShadowed();
}
extern "C" __global__ void __closesthit__glass_radiance()
{
const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer();
const Glass &glass = sbt_data->shading.glass;
RadiancePRD prd_radiance = getRadiancePRD();
float3 object_normal = make_float3(
int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
object_normal = normalize( object_normal );
// intersection vectors
const float3 n = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal) ); // normal
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection(); // incident direction
const float ray_t = optixGetRayTmax();
float3 t; // transmission direction
float3 r; // reflection direction
float3 hit_point = ray_orig + ray_t * ray_dir;
SphereShellHitType hit_type = (SphereShellHitType) optixGetHitKind();
float3 front_hit_point = hit_point, back_hit_point = hit_point;
if (hit_type & HIT_OUTSIDE_FROM_OUTSIDE || hit_type & HIT_INSIDE_FROM_INSIDE)
{
front_hit_point += params.scene_epsilon * object_normal;
back_hit_point -= params.scene_epsilon * object_normal;
}
else
{
front_hit_point -= params.scene_epsilon * object_normal;
back_hit_point += params.scene_epsilon * object_normal;
}
const float3 fhp = optixTransformPointFromObjectToWorldSpace( front_hit_point );
const float3 bhp = optixTransformPointFromObjectToWorldSpace( back_hit_point );
float reflection = 1.0f;
float3 result = make_float3(0.0f);
const int depth = prd_radiance.depth;
float3 beer_attenuation;
if(dot(n, ray_dir) > 0)
{
// Beer's law attenuation
beer_attenuation = exp(glass.extinction_constant * ray_t);
} else
{
beer_attenuation = make_float3(1);
}
// refraction
// compare depth to max_depth - 1 to leave room for a potential shadow ray trace
if (depth < min(glass.refraction_maxdepth, params.max_depth - 1))
{
if ( refract(t, ray_dir, n, glass.refraction_index) )
{
// check for external or internal reflection
float cos_theta = dot(ray_dir, n);
if (cos_theta < 0.0f)
cos_theta = -cos_theta;
else
cos_theta = dot(t, n);
reflection = fresnel_schlick(
cos_theta,
glass.fresnel_exponent,
glass.fresnel_minimum,
glass.fresnel_maximum);
float importance =
prd_radiance.importance
* (1.0f-reflection)
* luminance( glass.refraction_color * beer_attenuation );
float3 color = glass.cutoff_color;
if ( importance > glass.importance_cutoff )
{
color = traceRadianceRay(bhp, t, depth+1, importance);
}
result += (1.0f - reflection) * glass.refraction_color * color;
}
// else TIR
} // else reflection==1 so refraction has 0 weight
// reflection
// compare depth to max_depth - 1 to leave room for a potential shadow ray trace
float3 color = glass.cutoff_color;
if (depth < min(glass.reflection_maxdepth, params.max_depth - 1))
{
r = reflect(ray_dir, n);
float importance =
prd_radiance.importance
* reflection
* luminance( glass.reflection_color * beer_attenuation );
if ( importance > glass.importance_cutoff )
{
color = traceRadianceRay( fhp, r, depth+1, importance );
}
}
result += reflection * glass.reflection_color * color;
result = result * beer_attenuation;
prd_radiance.result = result;
setRadiancePRD(prd_radiance);
}
extern "C" __global__ void __anyhit__glass_occlusion()
{
const HitGroupData* sbt_data = (HitGroupData*) optixGetSbtDataPointer();
const Glass &glass = sbt_data->shading.glass;
float3 object_normal = make_float3(
int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
OcclusionPRD shadow_prd = getOcclusionPRD();
float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) );
float nDi = fabs(dot(world_normal, optixGetWorldRayDirection()));
shadow_prd.attenuation *= 1-fresnel_schlick(nDi, 5, 1-glass.shadow_attenuation, make_float3(1));
setOcclusionPRD(shadow_prd);
// Test the attenuation of the light from the glass shell
if(luminance(shadow_prd.attenuation) < glass.importance_cutoff)
// The attenuation is so high, > 99% blocked, that we can consider testing to be done.
optixTerminateRay();
else
// There is still some light coming through the glass shell that we should test other occluders.
// We "ignore" the intersection with the glass shell, meaning that shadow testing will continue.
// If the ray does not hit another occluder, the light's attenuation from this glass shell
// (along with other glass shells) is then used.
optixIgnoreIntersection();
}
extern "C" __global__ void __miss__constant_bg()
{
const MissData* sbt_data = (MissData*) optixGetSbtDataPointer();
RadiancePRD prd = getRadiancePRD();
prd.result = sbt_data->bg_color;
setRadiancePRD(prd);
}
|
881fbfa8ff84c8332294256eac72a7b97bc67e1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softmax_op.h"
namespace caffe2 {
#define SOFTMAX_NUM_THREADS 128
namespace {
// The softmax kernel. This kernel has to be called with the number of threads
// per block being no more than SOFTMAX_NUM_THREADS.
__global__ void softmax_kernel(
const int dim, const float* data, float* out) {
// For the softmax kernel, each block is a data example.
data += blockIdx.x * dim;
out += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to get the max.
tmp = -FLT_MAX;
for (int i = idx; i < dim; i += blockDim.x) {
tmp = fmaxf(data[i], tmp);
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i) {
tmp = fmaxf(reduction_buffer[i], tmp);
}
reduction_buffer[0] = tmp;
}
__syncthreads();
// compute sum with a two-level reduction.
float maxval = reduction_buffer[0];
reduction_buffer[idx] = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp = __expf(data[i] - maxval);
reduction_buffer[idx] += tmp;
out[i] = tmp;
}
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i) {
tmp += reduction_buffer[i];
}
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute the softmax;
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
out[i] /= tmp;
}
}
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
__global__ void softmax_gradient_kernel(
const int dim, const float* Y, const float* dY, float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i) tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
// Implementation for the CPU context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
Y->ResizeLike(X);
hipLaunchKernelGGL(( softmax_kernel), dim3(N), dim3(SOFTMAX_NUM_THREADS), 0, context_.cuda_stream(),
D, X.data<float>(), Y->mutable_data<float>());
return true;
}
// Implementation for the CPU context.
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_EQ(Y.ndim(), 2);
int N = Y.dim32(0);
int D = Y.dim32(1);
DCHECK_EQ(dY.dim32(0), N);
DCHECK_EQ(dY.dim32(1), D);
dX->ResizeLike(Y);
hipLaunchKernelGGL(( softmax_gradient_kernel), dim3(N), dim3(SOFTMAX_NUM_THREADS), 0,
context_.cuda_stream(),
D, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
| 881fbfa8ff84c8332294256eac72a7b97bc67e1a.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softmax_op.h"
namespace caffe2 {
#define SOFTMAX_NUM_THREADS 128
namespace {
// The softmax kernel. This kernel has to be called with the number of threads
// per block being no more than SOFTMAX_NUM_THREADS.
__global__ void softmax_kernel(
const int dim, const float* data, float* out) {
// For the softmax kernel, each block is a data example.
data += blockIdx.x * dim;
out += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to get the max.
tmp = -FLT_MAX;
for (int i = idx; i < dim; i += blockDim.x) {
tmp = fmaxf(data[i], tmp);
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i) {
tmp = fmaxf(reduction_buffer[i], tmp);
}
reduction_buffer[0] = tmp;
}
__syncthreads();
// compute sum with a two-level reduction.
float maxval = reduction_buffer[0];
reduction_buffer[idx] = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp = __expf(data[i] - maxval);
reduction_buffer[idx] += tmp;
out[i] = tmp;
}
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i) {
tmp += reduction_buffer[i];
}
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute the softmax;
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
out[i] /= tmp;
}
}
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
__global__ void softmax_gradient_kernel(
const int dim, const float* Y, const float* dY, float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i) tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
// Implementation for the CPU context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
Y->ResizeLike(X);
softmax_kernel<<<N, SOFTMAX_NUM_THREADS, 0, context_.cuda_stream()>>>(
D, X.data<float>(), Y->mutable_data<float>());
return true;
}
// Implementation for the CPU context.
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_EQ(Y.ndim(), 2);
int N = Y.dim32(0);
int D = Y.dim32(1);
DCHECK_EQ(dY.dim32(0), N);
DCHECK_EQ(dY.dim32(1), D);
dX->ResizeLike(Y);
softmax_gradient_kernel<<<N, SOFTMAX_NUM_THREADS, 0,
context_.cuda_stream()>>>(
D, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
b69fc7a73e7affb32450e84427f14814d4f0559c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH/THH.h>
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 1024;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// kernels borrowed from Caffe
__global__ void MaxPoolForward(const int nthreads, const float* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, float* top_data,
int* mask, float* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
float maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
extern "C"
void SpatialMaxPoolingCaffe_updateOutput(THCState* state, THCudaTensor* input,
THCudaTensor* output, THCudaTensor* indices, int kW, int kH, int dW, int dH, bool train)
{
long nInputCols, nInputRows, nInputPlane, batchSize;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
long nOutputCols = ceil(float(nInputCols - kW) / float(dW)) + 1;
long nOutputRows = ceil(float(nInputRows - kH) / float(dH)) + 1;
int pW = 0, pH = 0; //TODO
input = THCudaTensor_newContiguous(state, input);
float* input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resizeAs(state, indices, output);
float* indices_data = THCudaTensor_data(state, indices);
float* output_data = THCudaTensor_data(state, output);
int count = THCudaTensor_nElement(state, output);
hipLaunchKernelGGL(( MaxPoolForward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS) , 0, 0, count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, pH, pW, output_data, NULL, indices_data);
if(input->nDimension == 3)
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
// clean
THCudaTensor_free(state, input);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialMaxPoolingCaffe_updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
}
__global__ void MaxPoolBackward(const int nthreads, const float* top_diff,
const int* mask, const float* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
float gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
extern "C"
void SpatialMaxPoolingCaffe_updateGradInput(THCState* state, THCudaTensor* input,
THCudaTensor* gradInput, THCudaTensor* gradOutput, THCudaTensor* indices, int kW, int kH, int dW, int dH)
{
long nInputCols, nInputRows, nInputPlane, batchSize;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
long nOutputCols = ceil(float(nInputCols - kW) / float(dW)) + 1;
long nOutputRows = ceil(float(nInputRows - kH) / float(dH)) + 1;
int pW = 0, pH = 0; //TODO
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
float* gradOutput_data = THCudaTensor_data(state, gradOutput);
THCudaTensor_resizeAs(state, gradInput, input);
int count = THCudaTensor_nElement(state, input);
hipLaunchKernelGGL(( MaxPoolBackward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS) , 0, 0, count,
gradOutput_data,
NULL, THCudaTensor_data(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, pH, pW,
THCudaTensor_data(state, gradInput));
// clean
THCudaTensor_free(state, gradOutput);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialMaxPoolingCaffe_updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
}
| b69fc7a73e7affb32450e84427f14814d4f0559c.cu | #include <THC/THC.h>
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 1024;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// kernels borrowed from Caffe
__global__ void MaxPoolForward(const int nthreads, const float* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, float* top_data,
int* mask, float* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
float maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
extern "C"
void SpatialMaxPoolingCaffe_updateOutput(THCState* state, THCudaTensor* input,
THCudaTensor* output, THCudaTensor* indices, int kW, int kH, int dW, int dH, bool train)
{
long nInputCols, nInputRows, nInputPlane, batchSize;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
long nOutputCols = ceil(float(nInputCols - kW) / float(dW)) + 1;
long nOutputRows = ceil(float(nInputRows - kH) / float(dH)) + 1;
int pW = 0, pH = 0; //TODO
input = THCudaTensor_newContiguous(state, input);
float* input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resizeAs(state, indices, output);
float* indices_data = THCudaTensor_data(state, indices);
float* output_data = THCudaTensor_data(state, output);
int count = THCudaTensor_nElement(state, output);
MaxPoolForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS >>> (count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, pH, pW, output_data, NULL, indices_data);
if(input->nDimension == 3)
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
// clean
THCudaTensor_free(state, input);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxPoolingCaffe_updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
}
__global__ void MaxPoolBackward(const int nthreads, const float* top_diff,
const int* mask, const float* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
float gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
extern "C"
void SpatialMaxPoolingCaffe_updateGradInput(THCState* state, THCudaTensor* input,
THCudaTensor* gradInput, THCudaTensor* gradOutput, THCudaTensor* indices, int kW, int kH, int dW, int dH)
{
long nInputCols, nInputRows, nInputPlane, batchSize;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
long nOutputCols = ceil(float(nInputCols - kW) / float(dW)) + 1;
long nOutputRows = ceil(float(nInputRows - kH) / float(dH)) + 1;
int pW = 0, pH = 0; //TODO
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
float* gradOutput_data = THCudaTensor_data(state, gradOutput);
THCudaTensor_resizeAs(state, gradInput, input);
int count = THCudaTensor_nElement(state, input);
MaxPoolBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS >>> (count,
gradOutput_data,
NULL, THCudaTensor_data(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, pH, pW,
THCudaTensor_data(state, gradInput));
// clean
THCudaTensor_free(state, gradOutput);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxPoolingCaffe_updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
}
|
7e20ef1d17b7c1db4f31b3b927487fc75d827843.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
/* added function */
int ceil_div_2(int a, int b){
return (a + b - 1) / b;
}
template <typename T>
__global__ void RoIPoolFForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const T* bottom_rois, T* top_data, int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void RoIPoolFBackward(const int nthreads, const T* top_diff,
const int* argmax_data, const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
atomicAdd(
offset_bottom_diff + argmax,
static_cast<T>(offset_top_diff[ph * pooled_width + pw]));
}
}
}
std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt));
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(ceil_div_2((int)output_size, 512), 4096));
//dim3 grid(::min(THCCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return std::make_tuple(output, argmax);
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIPool_forward", [&] {
hipLaunchKernelGGL(( RoIPoolFForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>(),
argmax.data<int>());
});
THCudaCheck(hipGetLastError());
return std::make_tuple(output, argmax);
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIPool_backward_cuda(const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& rois,
const at::Tensor& argmax,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
// TODO add more checks
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(ceil_div_2((int)grad.numel(), 512), 4096));
//dim3 grid(::min(THCCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIPool_backward", [&] {
hipLaunchKernelGGL(( RoIPoolFBackward<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
argmax.data<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return grad_input;
}
| 7e20ef1d17b7c1db4f31b3b927487fc75d827843.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
/* added function */
int ceil_div_2(int a, int b){
return (a + b - 1) / b;
}
template <typename T>
__global__ void RoIPoolFForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const T* bottom_rois, T* top_data, int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void RoIPoolFBackward(const int nthreads, const T* top_diff,
const int* argmax_data, const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
atomicAdd(
offset_bottom_diff + argmax,
static_cast<T>(offset_top_diff[ph * pooled_width + pw]));
}
}
}
std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt));
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(ceil_div_2((int)output_size, 512), 4096));
//dim3 grid(std::min(THCCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return std::make_tuple(output, argmax);
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIPool_forward", [&] {
RoIPoolFForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>(),
argmax.data<int>());
});
THCudaCheck(cudaGetLastError());
return std::make_tuple(output, argmax);
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIPool_backward_cuda(const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& rois,
const at::Tensor& argmax,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
// TODO add more checks
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(ceil_div_2((int)grad.numel(), 512), 4096));
//dim3 grid(std::min(THCCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIPool_backward", [&] {
RoIPoolFBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
argmax.data<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
387afafacf9db726e419a23b53fa1babc3a3edca.hip | // !!! This is a file automatically generated by hipify!!!
#include "config.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
//#define DEBUG_GPU
#ifdef DEBUG_GPU
#define __global__
#define __device__
#define __constant__
#define __inline__ static
#endif
__inline__ static int xLog2(UInt32 x)
{
UInt32 r;
union {
float f;
unsigned i;
} tmp;
tmp.f = (float)x;
r = (tmp.i >> 23);
r &= 7;
r += 1;
return r;
}
__inline__ __device__ UInt32 xLog2_new( UInt32 x )
{
#if 0//defined(DEBUG_GPU)
UInt32 r;
union {
float f;
unsigned i;
} tmp;
tmp.f = x;
r = (tmp.i >> 23);
r &= 7;
r += 1;
return r;
#else
return __log2f( x );
#endif
}
__inline__ __device__ UInt32 Clip3( Int32 minVal, Int32 maxVal, Int32 a )
{
if ( a < minVal )
a = minVal;
if ( a > maxVal )
a = maxVal;
return a;
}
#define Clip(x) Clip3( 0, 255, (x))
__constant__ __device__ __declspec(align(4)) static CUInt8 ucModeIdx[4][NUM_INTRA_MODE][2*MAX_CU_SIZE] = {
// 4x4
{
// Mode 0
{
0,
},
// Mode 1
{
0x07, 0x06, 0x05, 0x04,
0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 2 Info:( 2-> 5), [ 2, 8], size= 7
{
0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 3 Info:( 1-> 4), [ 1, 8], size= 8
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 4 Info:( 1-> 3), [ 1, 7], size= 7
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
},
// Mode 5 Info:( 1-> 3), [ 1, 7], size= 7
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
},
// Mode 6 Info:( 1-> 2), [ 1, 6], size= 6
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
},
// Mode 7 Info:( 1-> 2), [ 1, 6], size= 6
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
},
// Mode 8 Info:( 1-> 1), [ 1, 5], size= 5
{
0x07, 0x06, 0x05, 0x04, 0x03,
},
// Mode 9 Info:( 1-> 1), [ 1, 5], size= 5
{
0x07, 0x06, 0x05, 0x04, 0x03,
},
// Mode 10 Info:( 1-> 1), [ 1, 4], size= 4
{
0x07, 0x06, 0x05, 0x04,
},
// Mode 11 Info:( 0-> 0), [ 0, 4], size= 5
{
0x00, 0x00, 0x00, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 12 Info:( 0-> 0), [ 0, 4], size= 5
{
0x00, 0x00, 0x00, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 13 Info:( 0-> -1), [ -1, 4], size= 6
{
0x00, 0x00, 0x0C, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 14 Info:( 0-> -1), [ -1, 4], size= 6
{
0x00, 0x00, 0x0A, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 15 Info:( 0-> -2), [ -2, 4], size= 7
{
0x00, 0x0C, 0x0A, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 16 Info:( 0-> -2), [ -2, 4], size= 7
{
0x00, 0x0B, 0x0A, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 17 Info:( 0-> -3), [ -3, 4], size= 8
{
0x0C, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 18 Info:( 0-> -3), [ -3, 3], size= 7
{
0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
},
// Mode 19 Info:( 0-> -3), [ -3, 4], size= 8
{
0x04, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 20 Info:( 0-> -2), [ -2, 4], size= 7
{
0x00, 0x05, 0x06, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 21 Info:( 0-> -2), [ -2, 4], size= 7
{
0x00, 0x04, 0x06, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 22 Info:( 0-> -1), [ -1, 4], size= 6
{
0x00, 0x00, 0x06, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 23 Info:( 0-> -1), [ -1, 4], size= 6
{
0x00, 0x00, 0x04, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 24 Info:( 0-> 0), [ 0, 4], size= 5
{
0x00, 0x00, 0x00, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 25 Info:( 0-> 0), [ 0, 4], size= 5
{
0x00, 0x00, 0x00, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 26 Info:( 1-> 1), [ 1, 4], size= 4
{
0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 27 Info:( 1-> 1), [ 1, 5], size= 5
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D,
},
// Mode 28 Info:( 1-> 1), [ 1, 5], size= 5
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D,
},
// Mode 29 Info:( 1-> 2), [ 1, 6], size= 6
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
},
// Mode 30 Info:( 1-> 2), [ 1, 6], size= 6
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
},
// Mode 31 Info:( 1-> 3), [ 1, 7], size= 7
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
},
// Mode 32 Info:( 1-> 3), [ 1, 7], size= 7
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
},
// Mode 33 Info:( 1-> 4), [ 1, 8], size= 8
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
},
// Mode 34 Info:( 2-> 5), [ 2, 8], size= 7
{
0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
},
},
// 8x8
{
// Mode 0
{
0,
},
// Mode 1
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 2 Info:( 2-> 9), [ 2, 16], size=15
{
0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 3 Info:( 1-> 7), [ 1, 15], size=15
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
},
// Mode 4 Info:( 1-> 6), [ 1, 14], size=14
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
},
// Mode 5 Info:( 1-> 5), [ 1, 13], size=13
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03,
},
// Mode 6 Info:( 1-> 4), [ 1, 12], size=12
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 7 Info:( 1-> 3), [ 1, 11], size=11
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05,
},
// Mode 8 Info:( 1-> 2), [ 1, 10], size=10
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06,
},
// Mode 9 Info:( 1-> 1), [ 1, 9], size= 9
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07,
},
// Mode 10 Info:( 1-> 1), [ 1, 8], size= 8
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 11 Info:( 0-> 0), [ 0, 8], size= 9
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 12 Info:( 0-> -1), [ -1, 8], size=10
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 13 Info:( 0-> -2), [ -2, 8], size=11
{
0x00, 0x00, 0x00, 0x00, 0x17, 0x14, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 14 Info:( 0-> -3), [ -3, 8], size=12
{
0x00, 0x00, 0x00, 0x17, 0x15, 0x12, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 15 Info:( 0-> -4), [ -4, 8], size=13
{
0x00, 0x00, 0x18, 0x16, 0x14, 0x12, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 16 Info:( 0-> -5), [ -5, 8], size=14
{
0x00, 0x18, 0x16, 0x15, 0x13, 0x12, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 17 Info:( 0-> -6), [ -6, 8], size=15
{
0x17, 0x16, 0x15, 0x14, 0x12, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 18 Info:( 0-> -7), [ -7, 7], size=15
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
},
// Mode 19 Info:( 0-> -6), [ -6, 8], size=15
{
0x09, 0x0A, 0x0B, 0x0C, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 20 Info:( 0-> -5), [ -5, 8], size=14
{
0x00, 0x08, 0x0A, 0x0B, 0x0D, 0x0E, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 21 Info:( 0-> -4), [ -4, 8], size=13
{
0x00, 0x00, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 22 Info:( 0-> -3), [ -3, 8], size=12
{
0x00, 0x00, 0x00, 0x09, 0x0B, 0x0E, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 23 Info:( 0-> -2), [ -2, 8], size=11
{
0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 24 Info:( 0-> -1), [ -1, 8], size=10
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 25 Info:( 0-> 0), [ 0, 8], size= 9
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 26 Info:( 1-> 1), [ 1, 8], size= 8
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 27 Info:( 1-> 1), [ 1, 9], size= 9
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
},
// Mode 28 Info:( 1-> 2), [ 1, 10], size=10
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A,
},
// Mode 29 Info:( 1-> 3), [ 1, 11], size=11
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B,
},
// Mode 30 Info:( 1-> 4), [ 1, 12], size=12
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C,
},
// Mode 31 Info:( 1-> 5), [ 1, 13], size=13
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
},
// Mode 32 Info:( 1-> 6), [ 1, 14], size=14
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
},
// Mode 33 Info:( 1-> 7), [ 1, 15], size=15
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
},
// Mode 34 Info:( 2-> 9), [ 2, 16], size=15
{
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
},
},
// 16x16
{
// Mode 0
{
0,
},
// Mode 1
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 2 Info:( 2-> 17), [ 2, 32], size=31
{
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0F,
0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 3 Info:( 1-> 14), [ 1, 29], size=29
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03,
},
// Mode 4 Info:( 1-> 11), [ 1, 27], size=27
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05,
},
// Mode 5 Info:( 1-> 9), [ 1, 25], size=25
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07,
},
// Mode 6 Info:( 1-> 7), [ 1, 23], size=23
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09,
},
// Mode 7 Info:( 1-> 5), [ 1, 21], size=21
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B,
},
// Mode 8 Info:( 1-> 3), [ 1, 19], size=19
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D,
},
// Mode 9 Info:( 1-> 2), [ 1, 17], size=17
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F,
},
// Mode 10 Info:( 1-> 1), [ 1, 16], size=16
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 11 Info:( 0-> 0), [ 0, 16], size=17
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 12 Info:( 0-> -2), [ -2, 16], size=19
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2D, 0x26, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 13 Info:( 0-> -4), [ -4, 16], size=21
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x2B, 0x27, 0x24, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 14 Info:( 0-> -6), [ -6, 16], size=23
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2F, 0x2C, 0x2A, 0x27, 0x25, 0x22, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 15 Info:( 0-> -8), [ -8, 16], size=25
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2F, 0x2D, 0x2B, 0x29, 0x28, 0x26, 0x24, 0x22, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 16 Info:( 0->-10), [-10, 16], size=27
{
0x00, 0x00, 0x00, 0x00, 0x2F, 0x2E, 0x2C, 0x2B, 0x29, 0x28, 0x26, 0x25, 0x23, 0x22, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 17 Info:( 0->-12), [-12, 16], size=29
{
0x00, 0x00, 0x2F, 0x2E, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, 0x25, 0x24, 0x22, 0x21, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 18 Info:( 0->-15), [-15, 15], size=31
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
},
// Mode 19 Info:( 0->-12), [-12, 16], size=29
{
0x00, 0x00, 0x11, 0x12, 0x14, 0x15, 0x16, 0x17, 0x19, 0x1A, 0x1B, 0x1C, 0x1E, 0x1F, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 20 Info:( 0->-10), [-10, 16], size=27
{
0x00, 0x00, 0x00, 0x00, 0x11, 0x12, 0x14, 0x15, 0x17, 0x18, 0x1A, 0x1B, 0x1D, 0x1E, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 21 Info:( 0-> -8), [ -8, 16], size=25
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x13, 0x15, 0x17, 0x18, 0x1A, 0x1C, 0x1E, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 22 Info:( 0-> -6), [ -6, 16], size=23
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x14, 0x16, 0x19, 0x1B, 0x1E, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 23 Info:( 0-> -4), [ -4, 16], size=21
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x15, 0x19, 0x1C, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 24 Info:( 0-> -2), [ -2, 16], size=19
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x1A, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 25 Info:( 0-> 0), [ 0, 16], size=17
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 26 Info:( 1-> 1), [ 1, 16], size=16
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 27 Info:( 1-> 2), [ 1, 17], size=17
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31,
},
// Mode 28 Info:( 1-> 3), [ 1, 19], size=19
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33,
},
// Mode 29 Info:( 1-> 5), [ 1, 21], size=21
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35,
},
// Mode 30 Info:( 1-> 7), [ 1, 23], size=23
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
},
// Mode 31 Info:( 1-> 9), [ 1, 25], size=25
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
},
// Mode 32 Info:( 1-> 11), [ 1, 27], size=27
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
},
// Mode 33 Info:( 1-> 14), [ 1, 29], size=29
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
},
// Mode 34 Info:( 2-> 17), [ 2, 32], size=31
{
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31,
0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40,
},
},
// 32x32
{
// Mode 0
{
0,
},
// Mode 1
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 2 Info:( 2-> 33), [ 2, 64], size=63
{
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0F,
0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 3 Info:( 1-> 27), [ 1, 58], size=58
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06,
},
// Mode 4 Info:( 1-> 22), [ 1, 53], size=53
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B,
},
// Mode 5 Info:( 1-> 18), [ 1, 49], size=49
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F,
},
// Mode 6 Info:( 1-> 14), [ 1, 45], size=45
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13,
},
// Mode 7 Info:( 1-> 10), [ 1, 41], size=41
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17,
},
// Mode 8 Info:( 1-> 6), [ 1, 37], size=37
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B,
},
// Mode 9 Info:( 1-> 3), [ 1, 34], size=34
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E,
},
// Mode 10 Info:( 1-> 1), [ 1, 32], size=32
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 11 Info:( 0-> -1), [ -1, 32], size=34
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 12 Info:( 0-> -4), [ -4, 32], size=37
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5A, 0x53, 0x4D, 0x46, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 13 Info:( 0-> -8), [ -8, 32], size=41
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5C, 0x59, 0x55, 0x52, 0x4E, 0x4B, 0x47, 0x44, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 14 Info:( 0->-12), [-12, 32], size=45
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x5E, 0x5B, 0x59, 0x56, 0x54, 0x51, 0x4F, 0x4C, 0x4A, 0x47, 0x45, 0x42, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 15 Info:( 0->-16), [-16, 32], size=49
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5E, 0x5C,
0x5A, 0x58, 0x57, 0x55, 0x53, 0x51, 0x4F, 0x4D, 0x4B, 0x49, 0x48, 0x46, 0x44, 0x42, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 16 Info:( 0->-20), [-20, 32], size=53
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5E, 0x5D, 0x5B, 0x5A, 0x58, 0x57,
0x55, 0x54, 0x52, 0x51, 0x4F, 0x4E, 0x4C, 0x4B, 0x49, 0x48, 0x46, 0x45, 0x43, 0x42, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 17 Info:( 0->-25), [-25, 32], size=58
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x5F, 0x5E, 0x5C, 0x5B, 0x5A, 0x59, 0x57, 0x56, 0x55, 0x54, 0x52,
0x51, 0x50, 0x4F, 0x4E, 0x4C, 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x44, 0x42, 0x41, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 18 Info:( 0->-31), [-31, 31], size=63
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
},
// Mode 19 Info:( 0->-25), [-25, 32], size=58
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x22, 0x24, 0x25, 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2E,
0x2F, 0x30, 0x31, 0x32, 0x34, 0x35, 0x36, 0x37, 0x39, 0x3A, 0x3B, 0x3C, 0x3E, 0x3F, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 20 Info:( 0->-20), [-20, 32], size=53
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x23, 0x25, 0x26, 0x28, 0x29,
0x2B, 0x2C, 0x2E, 0x2F, 0x31, 0x32, 0x34, 0x35, 0x37, 0x38, 0x3A, 0x3B, 0x3D, 0x3E, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 21 Info:( 0->-16), [-16, 32], size=49
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x24,
0x26, 0x28, 0x29, 0x2B, 0x2D, 0x2F, 0x31, 0x33, 0x35, 0x37, 0x38, 0x3A, 0x3C, 0x3E, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 22 Info:( 0->-12), [-12, 32], size=45
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x22, 0x25, 0x27, 0x2A, 0x2C, 0x2F, 0x31, 0x34, 0x36, 0x39, 0x3B, 0x3E, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 23 Info:( 0-> -8), [ -8, 32], size=41
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x27, 0x2B, 0x2E, 0x32, 0x35, 0x39, 0x3C, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 24 Info:( 0-> -4), [ -4, 32], size=37
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x2D, 0x33, 0x3A, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 25 Info:( 0-> -1), [ -1, 32], size=34
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 26 Info:( 1-> 1), [ 1, 32], size=32
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 27 Info:( 1-> 3), [ 1, 34], size=34
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62,
},
// Mode 28 Info:( 1-> 6), [ 1, 37], size=37
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65,
},
// Mode 29 Info:( 1-> 10), [ 1, 41], size=41
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
},
// Mode 30 Info:( 1-> 14), [ 1, 45], size=45
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
},
// Mode 31 Info:( 1-> 18), [ 1, 49], size=49
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
0x71,
},
// Mode 32 Info:( 1-> 22), [ 1, 53], size=53
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75,
},
// Mode 33 Info:( 1-> 27), [ 1, 58], size=58
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
},
// Mode 34 Info:( 2-> 33), [ 2, 64], size=63
{
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61,
0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71,
0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80,
},
},
};
//Int offset = (nIntraPredAngle < 0 ? 2*nSize-1-nSize+1-1 + (nMode == 18 && nSize != 4) + (nSize == 4) : 0);
// __constant__ static CUInt8 ucOffset[4][NUM_INTRA_MODE] = {
// // 4x4
// {
// 0, 0, // Invalid
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0, 4, 4, 4, 4, 4, 4, 4,
// 4, 4, 4, 4, 4, 4, 4, 4,
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0
// },
// // 8x8
// {
// 0, 0, // Invalid
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0, 7, 7, 7, 7, 7, 7, 7,
// 8, 7, 7, 7, 7, 7, 7, 7,
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0
// },
// // 16x16
// {
// 0, 0, // Invalid
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0, 15, 15, 15, 15, 15, 15, 15,
// 16, 15, 15, 15, 15, 15, 15, 15,
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0
// },
// // 32x32
// {
// 0, 0, // Invalid
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0, 31, 31, 31, 31, 31, 31, 31,
// 32, 31, 31, 31, 31, 31, 31, 31,
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0
// },
// };
// Table 8-5 Specification of intraPredAngle
__constant__ CInt8 cxg_aucIntraPredAngle[NUM_INTRA_MODE] = {
0, 0, // Invalid
32, 26, 21, 17, 13, 9, 5, 2,
0, -2, -5, -9,-13,-17,-21,-26,
-32,-26,-21,-17,-13, -9, -5, -2,
0, 2, 5, 9, 13, 17, 21, 26,
32
};
// Table 8-4 (Changed)
__constant__ UInt64 cxg_aucIntraFilterTypePacked[5] = {
0x000000000ULL,
0x400040005ULL,
0x7F1FFF1FDULL,
0x7FBFFFBFDULL,
0x000000000ULL,
};
// __constant__ CUInt8 cxg_aucIntraFilterType[5][NUM_INTRA_MODE] = {
// // Index: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12,13,14,15, 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31, 32,33,34
// // Diff: 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8
// /* 4x4 */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
// /* 8x8 */ { 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 },
// /* 16x16 */ { 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1 },
// /* 32x32 */ { 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
// /* 64x64 */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
// };
// typedef union PackedUInt8
// {
// struct __align__(8)
// {
// UInt8 uc[4];
// };
// UInt32 hInt;
// } PackedUInt8;
//
static __device__ __inline__ void __prefetch(void *ptr)
{
asm volatile ("prefetch.L1 [%0];" : : "r"(ptr));
}
#ifndef DEBUG_GPU
__global__
#endif
void xPredIntraAng_gpu0(
UInt8 *pucDst,
CUInt8 *pucRef,
CUInt32 nSize,
CUInt32 bLuma
)
{
CUInt32 nMode = 1 + threadIdx.x + blockIdx.x * (NUM_INTRA_MODE - 1)/2;
CUInt32 nLog2Size = xLog2_new( nSize );
CInt32 nIntraPredAngle = cxg_aucIntraPredAngle[nMode];
//CUInt bFilter = cxg_aucIntraFilterType[nLog2Size-2][nMode];
CUInt bFilter = (cxg_aucIntraFilterTypePacked[nLog2Size-2] >> nMode) & 1;
CUInt bModeHor = (nMode < 18);
CUInt8 *pucTopLeft;
UInt32 i, x, k;
__shared__ __declspec(align(8)) UInt8 ucRef_s[2][4*MAX_CU_SIZE+1+7]; // +3 -> +7
UInt8 pucRefMain[2*MAX_CU_SIZE];
//__shared__ UInt8 ucRefMain[NUM_INTRA_MODE][2*MAX_CU_SIZE];
//UInt8 *pucRefMain = ucRefMain[nMode];
UInt8 uclDst[MAX_CU_SIZE*MAX_CU_SIZE];
Int8 pucFilterPix[MAX_CU_SIZE];
UInt8 ucDcVal;
UInt32 uiSumTop=0, uiSumLeft=0;
CUInt8 *pModeIdx = ucModeIdx[nLog2Size-2][nMode];
Int offset;
if ( threadIdx.x == 0 ) {
UInt64 *P0 = (UInt64 *)&ucRef_s[0][0];
UInt64 *P1 = (UInt64 *)&ucRef_s[1][0];
UInt64 *Q0 = (UInt64 *)(pucRef );
UInt64 *Q1 = (UInt64 *)(pucRef + 4*MAX_CU_SIZE+1+7);
for( i=0; i<nSize>>1; i++ ) {
*P0++ = *Q0++;
*P1++ = *Q1++;
}
ucRef_s[0][4*nSize] = pucRef[4*nSize];
ucRef_s[1][4*nSize] = pucRef[4*nSize+4*MAX_CU_SIZE+1+7];
//memcpy( ucRef_s, pucRef, 4*nSize+1 );
}
__syncthreads();
pucRef = ucRef_s[bFilter];
pucDst += nMode * MAX_CU_SIZE * MAX_CU_SIZE;
//pucRef += (bFilter ? 4*MAX_CU_SIZE+1+7 : 0);
//__prefetch( (void *)(pucRef ) );
//__prefetch( (void *)(pucRef+64) );
pucTopLeft = pucRef + 2 * nSize;
// Get reference pixel
// for( i=0; i<2*nSize; i++ ) {
// CUInt8 ucIdx = ucModeIdx[nLog2Size-2][nMode][i];
// pucRefMain[i] = pucRef[ ucIdx ];
// }
for( i=0; i<nSize; i+=4 ) {
pucRefMain[i*2+0] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+1] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+2] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+3] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+4] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+5] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+6] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+7] = pucRef[ *pModeIdx++ ];
pucFilterPix[i+0] = (pucTopLeft[(i+1+0)*(bModeHor ? 1 : -1)] - pucTopLeft[0]) >> 1;
uiSumLeft += pucTopLeft[i+0-nSize];
uiSumTop += pucTopLeft[i+0+1];
pucFilterPix[i+1] = (pucTopLeft[(i+1+1)*(bModeHor ? 1 : -1)] - pucTopLeft[0]) >> 1;
uiSumLeft += pucTopLeft[i+1-nSize];
uiSumTop += pucTopLeft[i+1+1];
pucFilterPix[i+2] = (pucTopLeft[(i+1+2)*(bModeHor ? 1 : -1)] - pucTopLeft[0]) >> 1;
uiSumLeft += pucTopLeft[i+2-nSize];
uiSumTop += pucTopLeft[i+2+1];
pucFilterPix[i+3] = (pucTopLeft[(i+1+3)*(bModeHor ? 1 : -1)] - pucTopLeft[0]) >> 1;
uiSumLeft += pucTopLeft[i+3-nSize];
uiSumTop += pucTopLeft[i+3+1];
}
//ucDcVal = (uiSumTop + uiSumLeft + nSize) / (nSize + nSize);
ucDcVal = (uiSumTop + uiSumLeft + nSize) >> (1 + nLog2Size);
// 8.4.3.1.6
Int deltaPos = ((nMode == 2) || (nMode == 34) ? -32 : 0);
offset = (nMode > 10 && nMode < 26) ? nSize-1 + (nMode == 18 || nSize == 4 ? 1 : 0) : 0;
//offset = (nIntraPredAngle < 0 ? 2*nSize-1-nSize+1-1 + (nMode == 18 && nSize != 4) + (nSize == 4) : 0);
//offset = ucOffset[nLog2Size-2][nMode];
for( k=0; k<nSize; k++ ) {
deltaPos += nIntraPredAngle;
Int32 iIdx = deltaPos >> 5; // (8-53)
UInt32 iFact = deltaPos & 31; // (8-54)
Int refMainIndex = iIdx+offset;
UInt32 pix0 = pucRefMain[refMainIndex ];
UInt32 pix1 = pucRefMain[refMainIndex+1];
UInt32 pix = (nMode == DC_IDX) ? ucDcVal : ( ( ((32-iFact)*pix0+8) + (iFact*pix1+8) ) >> 5 );
Int8 pix_filter = ( bLuma && (nIntraPredAngle == 0) ) ? pucFilterPix[k] : 0;
UInt px, py;
//pix = Clip( pix + pix_filter );
asm volatile ( "vadd.u32.u32.s32.sat %0.b0, %1.b0, %2.b0, 0;" : "=r"(pix) : "r"(pix), "r"((int)pix_filter) );
px = bModeHor ? k : k*MAX_CU_SIZE;
uclDst[px] = pix;
// Do linear filtering
for( x=1; x<nSize; x++ ) {
refMainIndex++;
pix0 = pix1;
pix1 = pucRefMain[refMainIndex+1];
pix = ( nMode == DC_IDX ) ? ucDcVal : ( ( (32-iFact)*pix0 + (iFact*pix1+16) ) >> 5 );
px = bModeHor ? k : x;
py = bModeHor ? x : k;
uclDst[py*MAX_CU_SIZE+px] = pix;
}
}
if ( bLuma && nMode == DC_IDX ) {
for( x=1; x<nSize; x++ ) {
uclDst[x ] = ( (pucRefMain[x+nSize] + 3 * ucDcVal + 2) >> 2 );
uclDst[x*MAX_CU_SIZE] = ( (pucRefMain[x ] + 3 * ucDcVal + 2) >> 2 );
}
uclDst[0] = ( pucRefMain[0] + pucRefMain[0+nSize] + 2 * ucDcVal + 2 ) >> 2;
}
{
for( i=0; i<nSize; i++ ) {
UInt64 *P0 = (UInt64 *)&pucDst[i*MAX_CU_SIZE];
UInt64 *Q0 = (UInt64 *)&uclDst[i*MAX_CU_SIZE];
for( x=0; x<nSize; x+=8 ) {
*P0++ = *Q0++;
}
}
}
#ifndef DEBUG_GPU
__syncthreads();
#endif
}
#ifndef DEBUG_GPU
__global__
#endif
void xSads_gpu0(
CUInt8 *pucDst,
CUInt8 *pucPix,
UInt32 *puiSads,
CUInt32 nSize,
CUInt32 lambda,
CUInt32 ucMostModeY0,
CUInt32 ucMostModeY1,
CUInt32 ucMostModeY2
)
{
CUInt32 bid = blockIdx.x; // Mode-1
CUInt32 tid = threadIdx.x; // Line
CUInt32 nMode = bid + 1;;
// __shared__ UInt32 uiSads[MAX_CU_SIZE];
UInt32 *P0, *P1, *Q0, *Q1;
UInt32 *P2, *Q2;
Int32 uiSad = 0;
UInt32 uiSadL = 0;
UInt32 i;
__declspec(align(8)) UInt8 ucSrc[MAX_CU_SIZE*MAX_CU_SIZE];
__declspec(align(8)) UInt8 ucRef[MAX_CU_SIZE*MAX_CU_SIZE];
if( nMode == ucMostModeY0 )
uiSadL = 1 * lambda;
else if( nMode == ucMostModeY1 || nMode == ucMostModeY2 )
uiSadL = 2 * lambda;
else
uiSadL = 3 * lambda;
pucDst += nMode * MAX_CU_SIZE * MAX_CU_SIZE + tid * MAX_CU_SIZE;
pucPix += tid * MAX_CU_SIZE;
puiSads += nMode;
P0 = (UInt32*)ucSrc;
P1 = (UInt32*)ucRef;
Q0 = (UInt32*)pucPix;
Q1 = (UInt32*)pucDst;
for( i=0; i<nSize; i+=4 ) {
*P0++ = *Q0++;
*P1++ = *Q1++;
}
// UInt32 uiSad0 = 0;
// for( i=0; i<nSize; i++ ) {
// uiSad0 = __usad( ucRef[i], ucSrc[i], uiSad0 );
// }
P2 = (UInt32*)ucSrc;
Q2 = (UInt32*)ucRef;
//P0[0] = 0x01020304;
//P1[0] = 0x05060708;
//UInt32 tmp = 0, tmp1=0;
for( i=0; i<nSize>>2; i++ ) {
asm volatile ( "vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(uiSad) : "r"(P2[i]), "r"(Q2[i]), "r"(uiSad) );
//asm volatile ( "vabsdiff4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(tmp) : "r"(P0[i]), "r"(P1[i]), "r"(uiSad) );
}
// if ( uiSad != uiSad0 ) {
// printf("[%2d]: uiSad=%08X, uiSad0=%08X\n", tid, uiSad, uiSad0);
// }
// uiSads[tid] = uiSad;
//uiSad = uiSad0;
//printf("[%2d]: uiSad=%08X, tmp=%08X\n", tid, uiSad, tmp);
#if 1
for( i=16; i>=1; i>>=1 ) {
uiSad += __shfl_xor(uiSad, i, 32);
}
if ( tid == 0 ) {
*puiSads = uiSad + uiSadL;
//printf("(%2d,%2d) -> %08X\n", nMode, tid, uiSad);
}
#else
if ( tid == 0 ) {
if( nMode == ucMostModeY0 )
uiSad += 1 * lambda;
else if( nMode == ucMostModeY1 || nMode == ucMostModeY2 )
uiSad += 2 * lambda;
else
uiSad += 3 * lambda;
for( i=1; i<nSize; i++ ) {
uiSad += uiSads[i];
}
*puiSads = uiSad;
//printf("(%2d,%2d) -> %08X\n", nMode, tid, uiSad);
}
#endif
__syncthreads();
}
extern "C" __host__
void xInitGPU()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if ( deviceCount > 0 ) {
hipSetDevice( 0 );
}
}
extern "C" __host__
void xFreeGPU()
{
hipDeviceReset();
}
typedef UInt32 __cdecl xSad( const UInt N, const UInt8 *pSrc, const UInt nStrideSrc, const UInt8 *pRef, const UInt nStrideRef );
extern xSad *xSadN[MAX_CU_DEPTH+1];
extern CUInt8 xg_aucIntraFilterType[5][NUM_INTRA_MODE];
extern void __cdecl xEncIntraPred( UInt8 *pucDstY, UInt8 *pucRefY, UInt nStride, UInt nMode, UInt nSize, UInt bIsLuma );
void __cdecl tPrintMatrix( FILE *fp, char *name, UInt8 *P, UInt uiStride, Int iSize );
extern "C" __host__
void __cdecl xEncIntraPred_gpu(
UInt8 *pucDst[2],
UInt8 *pucRef,
UInt nSize,
UInt bLuma,
UInt32 lambda,
UInt8 aucMostModeY[3],
UInt8 *pucPixY,
UInt32 *puiSad,
UInt32 *puiBestModeY,
UInt32 *puiTmpIdx,
hipStream_t stream
)
{
CUInt nLog2Size = xLog2( nSize );
UInt32 /*nMode, */nBestModeY;
UInt32 uiSad, uiBestSadY;
UInt32 nTmpIdx;
__declspec(align(128)) UInt8 my_dst[NUM_INTRA_MODE][MAX_CU_SIZE*MAX_CU_SIZE];
UInt32 uiSads[NUM_INTRA_MODE];
UInt32 changed = FALSE;
UInt32 i;
// UInt8 *pucDst_gpu[2];
UInt8 *pucRef_gpu;
UInt8 *pucPixY_gpu;
UInt8 *my_dst_gpu;
UInt32 *puiSads_gpu;
float elapsedTimeInMs = 0.0f;
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
#ifndef DEBUG_GPU
// hipHostRegister( pucDst[0], nSize*nSize, hipHostRegisterMapped );
// hipHostRegister( pucDst[1], nSize*nSize, hipHostRegisterMapped );
hipHostRegister( pucRef, 2*(4*MAX_CU_SIZE+1+7), hipHostRegisterMapped );
hipHostRegister( pucPixY, nSize*MAX_CU_SIZE, hipHostRegisterMapped );
//hipHostRegister( my_tmp, sizeof(my_tmp), hipHostRegisterMapped );
hipHostRegister( my_dst[0], sizeof(my_dst), hipHostRegisterMapped );
hipHostRegister( uiSads, sizeof(uiSads), hipHostRegisterMapped );
// hipHostGetDevicePointer( &pucDst_gpu[0], pucDst[0], 0 );
// hipHostGetDevicePointer( &pucDst_gpu[1], pucDst[1], 0 );
hipHostGetDevicePointer( &pucRef_gpu, pucRef, 0 );
hipHostGetDevicePointer( &pucPixY_gpu, pucPixY, 0 );
//hipHostGetDevicePointer( &pucMyTmp_gpu, my_tmp, 0 );
hipHostGetDevicePointer( &my_dst_gpu, my_dst[0], 0 );
hipHostGetDevicePointer( &puiSads_gpu, uiSads, 0 );
#else
// pucDst_gpu[0] = pucDst[0];
// pucDst_gpu[1] = pucDst[1];
pucRef_gpu = pucRef;
#endif
//memset( my_dst, 0xCD, sizeof(my_dst) );
nBestModeY = *puiBestModeY;
uiBestSadY = *puiSad;
nTmpIdx = *puiTmpIdx;
/*for( nMode=1; nMode<NUM_INTRA_MODE; nMode+=34 ) */{
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( xPredIntraAng_gpu0), dim3(2), (NUM_INTRA_MODE-1)/2
#ifdef _OPENMP
, 0, stream
#endif
,
my_dst_gpu,//pucDst_gpu[nTmpIdx],
pucRef_gpu,
nSize,
bLuma
);
hipLaunchKernelGGL(( xSads_gpu0), dim3(NUM_INTRA_MODE-1), nSize
#ifdef _OPENMP
, 0, stream
#endif
,
my_dst_gpu,
pucPixY_gpu,
puiSads_gpu,
nSize,
lambda,
aucMostModeY[0],
aucMostModeY[1],
aucMostModeY[2]
);
hipEventRecord( stop, 0 );
hipEventSynchronize(stop);
//hipDeviceSynchronize();
hipEventElapsedTime( &elapsedTimeInMs, start, stop );
//printf("GPU Time=%f ms\n", elapsedTimeInMs);
{
static int sn = 0;
static int ss[4] = {0, 0, 0, 0};
static double sum[4] = {0, 0, 0, 0};
sum[nLog2Size-2] += elapsedTimeInMs;
ss[nLog2Size-2]++;
sn++;
if ( sn % 1024 == 0 ) {
printf("Avg = (%.4lf, %.4lf, %.4lf, %.4lf)/(%4d, %4d, %4d, %4d)\n", sum[0]/ss[0], sum[1]/ss[1], sum[2]/ss[2], sum[3]/ss[3], ss[0], ss[1], ss[2], ss[3]);
}
}
UInt32 modeOffset;
for( modeOffset=0; modeOffset<34; modeOffset++ ) {
UInt32 nRealMode = 1 + modeOffset;
#if 1//defined(DEBUG_GPU)
UInt i, j;
UInt8 tmp0[MAX_CU_SIZE*MAX_CU_SIZE];
UInt bFilter;
bFilter = xg_aucIntraFilterType[nLog2Size-2][nRealMode];
xEncIntraPred( tmp0, pucRef + (bFilter ? 4*MAX_CU_SIZE+1+7 : 0), MAX_CU_SIZE, nRealMode, nSize, TRUE );
for( i=0; i<nSize; i++ ) {
for( j=0; j<nSize; j++ ) {
if ( tmp0[i*MAX_CU_SIZE+j] != my_dst[nRealMode][i*MAX_CU_SIZE+j] ) {
printf("ERROR: Mode %2d at (%2d,%2d): %02X -> %02X\n", nRealMode, i, j, tmp0[i*MAX_CU_SIZE+j] & 0xFF, my_dst[nRealMode][i*MAX_CU_SIZE+j] & 0xFF );
tPrintMatrix( stdout, "Gold\n", tmp0, MAX_CU_SIZE, nSize );
tPrintMatrix( stdout, "My\n", my_dst[nRealMode], MAX_CU_SIZE, nSize );
xEncIntraPred( tmp0, pucRef + (bFilter ? 4*MAX_CU_SIZE+1+7 : 0), MAX_CU_SIZE, nRealMode, nSize, TRUE );
hipLaunchKernelGGL(( xPredIntraAng_gpu0), dim3(1), dim3(1) , 0, 0,
my_dst_gpu,
pucRef_gpu,
nSize,
bLuma
);
}
}
}
if( nRealMode == aucMostModeY[0] )
uiSad = 1 * lambda;
else if( nRealMode == aucMostModeY[1] || nRealMode == aucMostModeY[2] )
uiSad = 2 * lambda;
else
uiSad = 3 * lambda;
uiSad += xSadN[nLog2Size-2]( nSize,
pucPixY, MAX_CU_SIZE,
my_dst[nRealMode], MAX_CU_SIZE );
if ( uiSad != uiSads[nRealMode] ) {
printf( "ERROR: Mode %2d, Sad=%d -> %d\n", nRealMode, uiSad, uiSads[nRealMode] );
}
#endif
uiSad = uiSads[nRealMode];
if( uiSad < uiBestSadY ) {
uiBestSadY = uiSad;
nBestModeY = nRealMode;
//nTmpIdx ^= 1;
changed = TRUE;
}
}
}
if ( changed ) {
for( i=0;i<nSize; i++ ) {
memcpy( pucDst[nTmpIdx^1] + i*MAX_CU_SIZE, &my_dst[nBestModeY][i*MAX_CU_SIZE], nSize );
}
}
*puiSad = uiBestSadY;
*puiBestModeY = nBestModeY;
*puiTmpIdx = nTmpIdx;
#ifndef DEBUG_GPU
hipHostUnregister( uiSads );
hipHostUnregister( my_dst[0] );
hipHostUnregister( pucPixY );
// hipHostUnregister( pucDst[0] );
// hipHostUnregister( pucDst[1] );
hipHostUnregister( pucRef );
hipEventDestroy(stop);
hipEventDestroy(start);
#endif
}
| 387afafacf9db726e419a23b53fa1babc3a3edca.cu | #include "config.h"
#include <cuda.h>
#include <stdio.h>
//#define DEBUG_GPU
#ifdef DEBUG_GPU
#define __global__
#define __device__
#define __constant__
#define __inline__ static
#endif
__inline__ static int xLog2(UInt32 x)
{
UInt32 r;
union {
float f;
unsigned i;
} tmp;
tmp.f = (float)x;
r = (tmp.i >> 23);
r &= 7;
r += 1;
return r;
}
__inline__ __device__ UInt32 xLog2_new( UInt32 x )
{
#if 0//defined(DEBUG_GPU)
UInt32 r;
union {
float f;
unsigned i;
} tmp;
tmp.f = x;
r = (tmp.i >> 23);
r &= 7;
r += 1;
return r;
#else
return __log2f( x );
#endif
}
__inline__ __device__ UInt32 Clip3( Int32 minVal, Int32 maxVal, Int32 a )
{
if ( a < minVal )
a = minVal;
if ( a > maxVal )
a = maxVal;
return a;
}
#define Clip(x) Clip3( 0, 255, (x))
__constant__ __device__ __declspec(align(4)) static CUInt8 ucModeIdx[4][NUM_INTRA_MODE][2*MAX_CU_SIZE] = {
// 4x4
{
// Mode 0
{
0,
},
// Mode 1
{
0x07, 0x06, 0x05, 0x04,
0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 2 Info:( 2-> 5), [ 2, 8], size= 7
{
0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 3 Info:( 1-> 4), [ 1, 8], size= 8
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 4 Info:( 1-> 3), [ 1, 7], size= 7
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
},
// Mode 5 Info:( 1-> 3), [ 1, 7], size= 7
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
},
// Mode 6 Info:( 1-> 2), [ 1, 6], size= 6
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
},
// Mode 7 Info:( 1-> 2), [ 1, 6], size= 6
{
0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
},
// Mode 8 Info:( 1-> 1), [ 1, 5], size= 5
{
0x07, 0x06, 0x05, 0x04, 0x03,
},
// Mode 9 Info:( 1-> 1), [ 1, 5], size= 5
{
0x07, 0x06, 0x05, 0x04, 0x03,
},
// Mode 10 Info:( 1-> 1), [ 1, 4], size= 4
{
0x07, 0x06, 0x05, 0x04,
},
// Mode 11 Info:( 0-> 0), [ 0, 4], size= 5
{
0x00, 0x00, 0x00, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 12 Info:( 0-> 0), [ 0, 4], size= 5
{
0x00, 0x00, 0x00, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 13 Info:( 0-> -1), [ -1, 4], size= 6
{
0x00, 0x00, 0x0C, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 14 Info:( 0-> -1), [ -1, 4], size= 6
{
0x00, 0x00, 0x0A, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 15 Info:( 0-> -2), [ -2, 4], size= 7
{
0x00, 0x0C, 0x0A, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 16 Info:( 0-> -2), [ -2, 4], size= 7
{
0x00, 0x0B, 0x0A, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 17 Info:( 0-> -3), [ -3, 4], size= 8
{
0x0C, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 18 Info:( 0-> -3), [ -3, 3], size= 7
{
0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
},
// Mode 19 Info:( 0-> -3), [ -3, 4], size= 8
{
0x04, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 20 Info:( 0-> -2), [ -2, 4], size= 7
{
0x00, 0x05, 0x06, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 21 Info:( 0-> -2), [ -2, 4], size= 7
{
0x00, 0x04, 0x06, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 22 Info:( 0-> -1), [ -1, 4], size= 6
{
0x00, 0x00, 0x06, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 23 Info:( 0-> -1), [ -1, 4], size= 6
{
0x00, 0x00, 0x04, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 24 Info:( 0-> 0), [ 0, 4], size= 5
{
0x00, 0x00, 0x00, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 25 Info:( 0-> 0), [ 0, 4], size= 5
{
0x00, 0x00, 0x00, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 26 Info:( 1-> 1), [ 1, 4], size= 4
{
0x09, 0x0A, 0x0B, 0x0C,
},
// Mode 27 Info:( 1-> 1), [ 1, 5], size= 5
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D,
},
// Mode 28 Info:( 1-> 1), [ 1, 5], size= 5
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D,
},
// Mode 29 Info:( 1-> 2), [ 1, 6], size= 6
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
},
// Mode 30 Info:( 1-> 2), [ 1, 6], size= 6
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
},
// Mode 31 Info:( 1-> 3), [ 1, 7], size= 7
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
},
// Mode 32 Info:( 1-> 3), [ 1, 7], size= 7
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
},
// Mode 33 Info:( 1-> 4), [ 1, 8], size= 8
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
},
// Mode 34 Info:( 2-> 5), [ 2, 8], size= 7
{
0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
},
},
// 8x8
{
// Mode 0
{
0,
},
// Mode 1
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 2 Info:( 2-> 9), [ 2, 16], size=15
{
0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 3 Info:( 1-> 7), [ 1, 15], size=15
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
},
// Mode 4 Info:( 1-> 6), [ 1, 14], size=14
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
},
// Mode 5 Info:( 1-> 5), [ 1, 13], size=13
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03,
},
// Mode 6 Info:( 1-> 4), [ 1, 12], size=12
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04,
},
// Mode 7 Info:( 1-> 3), [ 1, 11], size=11
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05,
},
// Mode 8 Info:( 1-> 2), [ 1, 10], size=10
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06,
},
// Mode 9 Info:( 1-> 1), [ 1, 9], size= 9
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07,
},
// Mode 10 Info:( 1-> 1), [ 1, 8], size= 8
{
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 11 Info:( 0-> 0), [ 0, 8], size= 9
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 12 Info:( 0-> -1), [ -1, 8], size=10
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 13 Info:( 0-> -2), [ -2, 8], size=11
{
0x00, 0x00, 0x00, 0x00, 0x17, 0x14, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 14 Info:( 0-> -3), [ -3, 8], size=12
{
0x00, 0x00, 0x00, 0x17, 0x15, 0x12, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 15 Info:( 0-> -4), [ -4, 8], size=13
{
0x00, 0x00, 0x18, 0x16, 0x14, 0x12, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 16 Info:( 0-> -5), [ -5, 8], size=14
{
0x00, 0x18, 0x16, 0x15, 0x13, 0x12, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 17 Info:( 0-> -6), [ -6, 8], size=15
{
0x17, 0x16, 0x15, 0x14, 0x12, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
},
// Mode 18 Info:( 0-> -7), [ -7, 7], size=15
{
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
},
// Mode 19 Info:( 0-> -6), [ -6, 8], size=15
{
0x09, 0x0A, 0x0B, 0x0C, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 20 Info:( 0-> -5), [ -5, 8], size=14
{
0x00, 0x08, 0x0A, 0x0B, 0x0D, 0x0E, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 21 Info:( 0-> -4), [ -4, 8], size=13
{
0x00, 0x00, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 22 Info:( 0-> -3), [ -3, 8], size=12
{
0x00, 0x00, 0x00, 0x09, 0x0B, 0x0E, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 23 Info:( 0-> -2), [ -2, 8], size=11
{
0x00, 0x00, 0x00, 0x00, 0x09, 0x0C, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 24 Info:( 0-> -1), [ -1, 8], size=10
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 25 Info:( 0-> 0), [ 0, 8], size= 9
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 26 Info:( 1-> 1), [ 1, 8], size= 8
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
},
// Mode 27 Info:( 1-> 1), [ 1, 9], size= 9
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
},
// Mode 28 Info:( 1-> 2), [ 1, 10], size=10
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A,
},
// Mode 29 Info:( 1-> 3), [ 1, 11], size=11
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B,
},
// Mode 30 Info:( 1-> 4), [ 1, 12], size=12
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C,
},
// Mode 31 Info:( 1-> 5), [ 1, 13], size=13
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
},
// Mode 32 Info:( 1-> 6), [ 1, 14], size=14
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
},
// Mode 33 Info:( 1-> 7), [ 1, 15], size=15
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
},
// Mode 34 Info:( 2-> 9), [ 2, 16], size=15
{
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
},
},
// 16x16
{
// Mode 0
{
0,
},
// Mode 1
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 2 Info:( 2-> 17), [ 2, 32], size=31
{
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0F,
0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 3 Info:( 1-> 14), [ 1, 29], size=29
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03,
},
// Mode 4 Info:( 1-> 11), [ 1, 27], size=27
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05,
},
// Mode 5 Info:( 1-> 9), [ 1, 25], size=25
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07,
},
// Mode 6 Info:( 1-> 7), [ 1, 23], size=23
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09,
},
// Mode 7 Info:( 1-> 5), [ 1, 21], size=21
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B,
},
// Mode 8 Info:( 1-> 3), [ 1, 19], size=19
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D,
},
// Mode 9 Info:( 1-> 2), [ 1, 17], size=17
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F,
},
// Mode 10 Info:( 1-> 1), [ 1, 16], size=16
{
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 11 Info:( 0-> 0), [ 0, 16], size=17
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 12 Info:( 0-> -2), [ -2, 16], size=19
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2D, 0x26, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 13 Info:( 0-> -4), [ -4, 16], size=21
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x2B, 0x27, 0x24, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 14 Info:( 0-> -6), [ -6, 16], size=23
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2F, 0x2C, 0x2A, 0x27, 0x25, 0x22, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 15 Info:( 0-> -8), [ -8, 16], size=25
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2F, 0x2D, 0x2B, 0x29, 0x28, 0x26, 0x24, 0x22, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 16 Info:( 0->-10), [-10, 16], size=27
{
0x00, 0x00, 0x00, 0x00, 0x2F, 0x2E, 0x2C, 0x2B, 0x29, 0x28, 0x26, 0x25, 0x23, 0x22, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 17 Info:( 0->-12), [-12, 16], size=29
{
0x00, 0x00, 0x2F, 0x2E, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, 0x25, 0x24, 0x22, 0x21, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
},
// Mode 18 Info:( 0->-15), [-15, 15], size=31
{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
},
// Mode 19 Info:( 0->-12), [-12, 16], size=29
{
0x00, 0x00, 0x11, 0x12, 0x14, 0x15, 0x16, 0x17, 0x19, 0x1A, 0x1B, 0x1C, 0x1E, 0x1F, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 20 Info:( 0->-10), [-10, 16], size=27
{
0x00, 0x00, 0x00, 0x00, 0x11, 0x12, 0x14, 0x15, 0x17, 0x18, 0x1A, 0x1B, 0x1D, 0x1E, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 21 Info:( 0-> -8), [ -8, 16], size=25
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x13, 0x15, 0x17, 0x18, 0x1A, 0x1C, 0x1E, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 22 Info:( 0-> -6), [ -6, 16], size=23
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x14, 0x16, 0x19, 0x1B, 0x1E, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 23 Info:( 0-> -4), [ -4, 16], size=21
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x15, 0x19, 0x1C, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 24 Info:( 0-> -2), [ -2, 16], size=19
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x1A, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 25 Info:( 0-> 0), [ 0, 16], size=17
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 26 Info:( 1-> 1), [ 1, 16], size=16
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
},
// Mode 27 Info:( 1-> 2), [ 1, 17], size=17
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31,
},
// Mode 28 Info:( 1-> 3), [ 1, 19], size=19
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33,
},
// Mode 29 Info:( 1-> 5), [ 1, 21], size=21
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35,
},
// Mode 30 Info:( 1-> 7), [ 1, 23], size=23
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
},
// Mode 31 Info:( 1-> 9), [ 1, 25], size=25
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
},
// Mode 32 Info:( 1-> 11), [ 1, 27], size=27
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
},
// Mode 33 Info:( 1-> 14), [ 1, 29], size=29
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
},
// Mode 34 Info:( 2-> 17), [ 2, 32], size=31
{
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31,
0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40,
},
},
// 32x32
{
// Mode 0
{
0,
},
// Mode 1
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 2 Info:( 2-> 33), [ 2, 64], size=63
{
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1F,
0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0F,
0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
},
// Mode 3 Info:( 1-> 27), [ 1, 58], size=58
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06,
},
// Mode 4 Info:( 1-> 22), [ 1, 53], size=53
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B,
},
// Mode 5 Info:( 1-> 18), [ 1, 49], size=49
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F,
},
// Mode 6 Info:( 1-> 14), [ 1, 45], size=45
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13,
},
// Mode 7 Info:( 1-> 10), [ 1, 41], size=41
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17,
},
// Mode 8 Info:( 1-> 6), [ 1, 37], size=37
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B,
},
// Mode 9 Info:( 1-> 3), [ 1, 34], size=34
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E,
},
// Mode 10 Info:( 1-> 1), [ 1, 32], size=32
{
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 11 Info:( 0-> -1), [ -1, 32], size=34
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 12 Info:( 0-> -4), [ -4, 32], size=37
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5A, 0x53, 0x4D, 0x46, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 13 Info:( 0-> -8), [ -8, 32], size=41
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5C, 0x59, 0x55, 0x52, 0x4E, 0x4B, 0x47, 0x44, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 14 Info:( 0->-12), [-12, 32], size=45
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x5E, 0x5B, 0x59, 0x56, 0x54, 0x51, 0x4F, 0x4C, 0x4A, 0x47, 0x45, 0x42, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 15 Info:( 0->-16), [-16, 32], size=49
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5E, 0x5C,
0x5A, 0x58, 0x57, 0x55, 0x53, 0x51, 0x4F, 0x4D, 0x4B, 0x49, 0x48, 0x46, 0x44, 0x42, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 16 Info:( 0->-20), [-20, 32], size=53
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5E, 0x5D, 0x5B, 0x5A, 0x58, 0x57,
0x55, 0x54, 0x52, 0x51, 0x4F, 0x4E, 0x4C, 0x4B, 0x49, 0x48, 0x46, 0x45, 0x43, 0x42, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 17 Info:( 0->-25), [-25, 32], size=58
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x5F, 0x5E, 0x5C, 0x5B, 0x5A, 0x59, 0x57, 0x56, 0x55, 0x54, 0x52,
0x51, 0x50, 0x4F, 0x4E, 0x4C, 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x44, 0x42, 0x41, 0x40, 0x3F,
0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F,
0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
},
// Mode 18 Info:( 0->-31), [-31, 31], size=63
{
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
},
// Mode 19 Info:( 0->-25), [-25, 32], size=58
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x22, 0x24, 0x25, 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2E,
0x2F, 0x30, 0x31, 0x32, 0x34, 0x35, 0x36, 0x37, 0x39, 0x3A, 0x3B, 0x3C, 0x3E, 0x3F, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 20 Info:( 0->-20), [-20, 32], size=53
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x23, 0x25, 0x26, 0x28, 0x29,
0x2B, 0x2C, 0x2E, 0x2F, 0x31, 0x32, 0x34, 0x35, 0x37, 0x38, 0x3A, 0x3B, 0x3D, 0x3E, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 21 Info:( 0->-16), [-16, 32], size=49
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x24,
0x26, 0x28, 0x29, 0x2B, 0x2D, 0x2F, 0x31, 0x33, 0x35, 0x37, 0x38, 0x3A, 0x3C, 0x3E, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 22 Info:( 0->-12), [-12, 32], size=45
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x22, 0x25, 0x27, 0x2A, 0x2C, 0x2F, 0x31, 0x34, 0x36, 0x39, 0x3B, 0x3E, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 23 Info:( 0-> -8), [ -8, 32], size=41
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x27, 0x2B, 0x2E, 0x32, 0x35, 0x39, 0x3C, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 24 Info:( 0-> -4), [ -4, 32], size=37
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x2D, 0x33, 0x3A, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 25 Info:( 0-> -1), [ -1, 32], size=34
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 26 Info:( 1-> 1), [ 1, 32], size=32
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
},
// Mode 27 Info:( 1-> 3), [ 1, 34], size=34
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62,
},
// Mode 28 Info:( 1-> 6), [ 1, 37], size=37
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65,
},
// Mode 29 Info:( 1-> 10), [ 1, 41], size=41
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
},
// Mode 30 Info:( 1-> 14), [ 1, 45], size=45
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
},
// Mode 31 Info:( 1-> 18), [ 1, 49], size=49
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
0x71,
},
// Mode 32 Info:( 1-> 22), [ 1, 53], size=53
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75,
},
// Mode 33 Info:( 1-> 27), [ 1, 58], size=58
{
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
},
// Mode 34 Info:( 2-> 33), [ 2, 64], size=63
{
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61,
0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71,
0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80,
},
},
};
//Int offset = (nIntraPredAngle < 0 ? 2*nSize-1-nSize+1-1 + (nMode == 18 && nSize != 4) + (nSize == 4) : 0);
// __constant__ static CUInt8 ucOffset[4][NUM_INTRA_MODE] = {
// // 4x4
// {
// 0, 0, // Invalid
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0, 4, 4, 4, 4, 4, 4, 4,
// 4, 4, 4, 4, 4, 4, 4, 4,
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0
// },
// // 8x8
// {
// 0, 0, // Invalid
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0, 7, 7, 7, 7, 7, 7, 7,
// 8, 7, 7, 7, 7, 7, 7, 7,
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0
// },
// // 16x16
// {
// 0, 0, // Invalid
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0, 15, 15, 15, 15, 15, 15, 15,
// 16, 15, 15, 15, 15, 15, 15, 15,
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0
// },
// // 32x32
// {
// 0, 0, // Invalid
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0, 31, 31, 31, 31, 31, 31, 31,
// 32, 31, 31, 31, 31, 31, 31, 31,
// 0, 0, 0, 0, 0, 0, 0, 0,
// 0
// },
// };
// Table 8-5 Specification of intraPredAngle
__constant__ CInt8 cxg_aucIntraPredAngle[NUM_INTRA_MODE] = {
0, 0, // Invalid
32, 26, 21, 17, 13, 9, 5, 2,
0, -2, -5, -9,-13,-17,-21,-26,
-32,-26,-21,-17,-13, -9, -5, -2,
0, 2, 5, 9, 13, 17, 21, 26,
32
};
// Table 8-4 (Changed)
__constant__ UInt64 cxg_aucIntraFilterTypePacked[5] = {
0x000000000ULL,
0x400040005ULL,
0x7F1FFF1FDULL,
0x7FBFFFBFDULL,
0x000000000ULL,
};
// __constant__ CUInt8 cxg_aucIntraFilterType[5][NUM_INTRA_MODE] = {
// // Index: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12,13,14,15, 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31, 32,33,34
// // Diff: 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8
// /* 4x4 */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
// /* 8x8 */ { 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 },
// /* 16x16 */ { 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1 },
// /* 32x32 */ { 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
// /* 64x64 */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
// };
// typedef union PackedUInt8
// {
// struct __align__(8)
// {
// UInt8 uc[4];
// };
// UInt32 hInt;
// } PackedUInt8;
//
static __device__ __inline__ void __prefetch(void *ptr)
{
asm volatile ("prefetch.L1 [%0];" : : "r"(ptr));
}
#ifndef DEBUG_GPU
__global__
#endif
void xPredIntraAng_gpu0(
UInt8 *pucDst,
CUInt8 *pucRef,
CUInt32 nSize,
CUInt32 bLuma
)
{
CUInt32 nMode = 1 + threadIdx.x + blockIdx.x * (NUM_INTRA_MODE - 1)/2;
CUInt32 nLog2Size = xLog2_new( nSize );
CInt32 nIntraPredAngle = cxg_aucIntraPredAngle[nMode];
//CUInt bFilter = cxg_aucIntraFilterType[nLog2Size-2][nMode];
CUInt bFilter = (cxg_aucIntraFilterTypePacked[nLog2Size-2] >> nMode) & 1;
CUInt bModeHor = (nMode < 18);
CUInt8 *pucTopLeft;
UInt32 i, x, k;
__shared__ __declspec(align(8)) UInt8 ucRef_s[2][4*MAX_CU_SIZE+1+7]; // +3 -> +7
UInt8 pucRefMain[2*MAX_CU_SIZE];
//__shared__ UInt8 ucRefMain[NUM_INTRA_MODE][2*MAX_CU_SIZE];
//UInt8 *pucRefMain = ucRefMain[nMode];
UInt8 uclDst[MAX_CU_SIZE*MAX_CU_SIZE];
Int8 pucFilterPix[MAX_CU_SIZE];
UInt8 ucDcVal;
UInt32 uiSumTop=0, uiSumLeft=0;
CUInt8 *pModeIdx = ucModeIdx[nLog2Size-2][nMode];
Int offset;
if ( threadIdx.x == 0 ) {
UInt64 *P0 = (UInt64 *)&ucRef_s[0][0];
UInt64 *P1 = (UInt64 *)&ucRef_s[1][0];
UInt64 *Q0 = (UInt64 *)(pucRef );
UInt64 *Q1 = (UInt64 *)(pucRef + 4*MAX_CU_SIZE+1+7);
for( i=0; i<nSize>>1; i++ ) {
*P0++ = *Q0++;
*P1++ = *Q1++;
}
ucRef_s[0][4*nSize] = pucRef[4*nSize];
ucRef_s[1][4*nSize] = pucRef[4*nSize+4*MAX_CU_SIZE+1+7];
//memcpy( ucRef_s, pucRef, 4*nSize+1 );
}
__syncthreads();
pucRef = ucRef_s[bFilter];
pucDst += nMode * MAX_CU_SIZE * MAX_CU_SIZE;
//pucRef += (bFilter ? 4*MAX_CU_SIZE+1+7 : 0);
//__prefetch( (void *)(pucRef ) );
//__prefetch( (void *)(pucRef+64) );
pucTopLeft = pucRef + 2 * nSize;
// Get reference pixel
// for( i=0; i<2*nSize; i++ ) {
// CUInt8 ucIdx = ucModeIdx[nLog2Size-2][nMode][i];
// pucRefMain[i] = pucRef[ ucIdx ];
// }
for( i=0; i<nSize; i+=4 ) {
pucRefMain[i*2+0] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+1] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+2] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+3] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+4] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+5] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+6] = pucRef[ *pModeIdx++ ];
pucRefMain[i*2+7] = pucRef[ *pModeIdx++ ];
pucFilterPix[i+0] = (pucTopLeft[(i+1+0)*(bModeHor ? 1 : -1)] - pucTopLeft[0]) >> 1;
uiSumLeft += pucTopLeft[i+0-nSize];
uiSumTop += pucTopLeft[i+0+1];
pucFilterPix[i+1] = (pucTopLeft[(i+1+1)*(bModeHor ? 1 : -1)] - pucTopLeft[0]) >> 1;
uiSumLeft += pucTopLeft[i+1-nSize];
uiSumTop += pucTopLeft[i+1+1];
pucFilterPix[i+2] = (pucTopLeft[(i+1+2)*(bModeHor ? 1 : -1)] - pucTopLeft[0]) >> 1;
uiSumLeft += pucTopLeft[i+2-nSize];
uiSumTop += pucTopLeft[i+2+1];
pucFilterPix[i+3] = (pucTopLeft[(i+1+3)*(bModeHor ? 1 : -1)] - pucTopLeft[0]) >> 1;
uiSumLeft += pucTopLeft[i+3-nSize];
uiSumTop += pucTopLeft[i+3+1];
}
//ucDcVal = (uiSumTop + uiSumLeft + nSize) / (nSize + nSize);
ucDcVal = (uiSumTop + uiSumLeft + nSize) >> (1 + nLog2Size);
// 8.4.3.1.6
Int deltaPos = ((nMode == 2) || (nMode == 34) ? -32 : 0);
offset = (nMode > 10 && nMode < 26) ? nSize-1 + (nMode == 18 || nSize == 4 ? 1 : 0) : 0;
//offset = (nIntraPredAngle < 0 ? 2*nSize-1-nSize+1-1 + (nMode == 18 && nSize != 4) + (nSize == 4) : 0);
//offset = ucOffset[nLog2Size-2][nMode];
for( k=0; k<nSize; k++ ) {
deltaPos += nIntraPredAngle;
Int32 iIdx = deltaPos >> 5; // (8-53)
UInt32 iFact = deltaPos & 31; // (8-54)
Int refMainIndex = iIdx+offset;
UInt32 pix0 = pucRefMain[refMainIndex ];
UInt32 pix1 = pucRefMain[refMainIndex+1];
UInt32 pix = (nMode == DC_IDX) ? ucDcVal : ( ( ((32-iFact)*pix0+8) + (iFact*pix1+8) ) >> 5 );
Int8 pix_filter = ( bLuma && (nIntraPredAngle == 0) ) ? pucFilterPix[k] : 0;
UInt px, py;
//pix = Clip( pix + pix_filter );
asm volatile ( "vadd.u32.u32.s32.sat %0.b0, %1.b0, %2.b0, 0;" : "=r"(pix) : "r"(pix), "r"((int)pix_filter) );
px = bModeHor ? k : k*MAX_CU_SIZE;
uclDst[px] = pix;
// Do linear filtering
for( x=1; x<nSize; x++ ) {
refMainIndex++;
pix0 = pix1;
pix1 = pucRefMain[refMainIndex+1];
pix = ( nMode == DC_IDX ) ? ucDcVal : ( ( (32-iFact)*pix0 + (iFact*pix1+16) ) >> 5 );
px = bModeHor ? k : x;
py = bModeHor ? x : k;
uclDst[py*MAX_CU_SIZE+px] = pix;
}
}
if ( bLuma && nMode == DC_IDX ) {
for( x=1; x<nSize; x++ ) {
uclDst[x ] = ( (pucRefMain[x+nSize] + 3 * ucDcVal + 2) >> 2 );
uclDst[x*MAX_CU_SIZE] = ( (pucRefMain[x ] + 3 * ucDcVal + 2) >> 2 );
}
uclDst[0] = ( pucRefMain[0] + pucRefMain[0+nSize] + 2 * ucDcVal + 2 ) >> 2;
}
{
for( i=0; i<nSize; i++ ) {
UInt64 *P0 = (UInt64 *)&pucDst[i*MAX_CU_SIZE];
UInt64 *Q0 = (UInt64 *)&uclDst[i*MAX_CU_SIZE];
for( x=0; x<nSize; x+=8 ) {
*P0++ = *Q0++;
}
}
}
#ifndef DEBUG_GPU
__syncthreads();
#endif
}
#ifndef DEBUG_GPU
__global__
#endif
void xSads_gpu0(
CUInt8 *pucDst,
CUInt8 *pucPix,
UInt32 *puiSads,
CUInt32 nSize,
CUInt32 lambda,
CUInt32 ucMostModeY0,
CUInt32 ucMostModeY1,
CUInt32 ucMostModeY2
)
{
CUInt32 bid = blockIdx.x; // Mode-1
CUInt32 tid = threadIdx.x; // Line
CUInt32 nMode = bid + 1;;
// __shared__ UInt32 uiSads[MAX_CU_SIZE];
UInt32 *P0, *P1, *Q0, *Q1;
UInt32 *P2, *Q2;
Int32 uiSad = 0;
UInt32 uiSadL = 0;
UInt32 i;
__declspec(align(8)) UInt8 ucSrc[MAX_CU_SIZE*MAX_CU_SIZE];
__declspec(align(8)) UInt8 ucRef[MAX_CU_SIZE*MAX_CU_SIZE];
if( nMode == ucMostModeY0 )
uiSadL = 1 * lambda;
else if( nMode == ucMostModeY1 || nMode == ucMostModeY2 )
uiSadL = 2 * lambda;
else
uiSadL = 3 * lambda;
pucDst += nMode * MAX_CU_SIZE * MAX_CU_SIZE + tid * MAX_CU_SIZE;
pucPix += tid * MAX_CU_SIZE;
puiSads += nMode;
P0 = (UInt32*)ucSrc;
P1 = (UInt32*)ucRef;
Q0 = (UInt32*)pucPix;
Q1 = (UInt32*)pucDst;
for( i=0; i<nSize; i+=4 ) {
*P0++ = *Q0++;
*P1++ = *Q1++;
}
// UInt32 uiSad0 = 0;
// for( i=0; i<nSize; i++ ) {
// uiSad0 = __usad( ucRef[i], ucSrc[i], uiSad0 );
// }
P2 = (UInt32*)ucSrc;
Q2 = (UInt32*)ucRef;
//P0[0] = 0x01020304;
//P1[0] = 0x05060708;
//UInt32 tmp = 0, tmp1=0;
for( i=0; i<nSize>>2; i++ ) {
asm volatile ( "vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(uiSad) : "r"(P2[i]), "r"(Q2[i]), "r"(uiSad) );
//asm volatile ( "vabsdiff4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(tmp) : "r"(P0[i]), "r"(P1[i]), "r"(uiSad) );
}
// if ( uiSad != uiSad0 ) {
// printf("[%2d]: uiSad=%08X, uiSad0=%08X\n", tid, uiSad, uiSad0);
// }
// uiSads[tid] = uiSad;
//uiSad = uiSad0;
//printf("[%2d]: uiSad=%08X, tmp=%08X\n", tid, uiSad, tmp);
#if 1
for( i=16; i>=1; i>>=1 ) {
uiSad += __shfl_xor(uiSad, i, 32);
}
if ( tid == 0 ) {
*puiSads = uiSad + uiSadL;
//printf("(%2d,%2d) -> %08X\n", nMode, tid, uiSad);
}
#else
if ( tid == 0 ) {
if( nMode == ucMostModeY0 )
uiSad += 1 * lambda;
else if( nMode == ucMostModeY1 || nMode == ucMostModeY2 )
uiSad += 2 * lambda;
else
uiSad += 3 * lambda;
for( i=1; i<nSize; i++ ) {
uiSad += uiSads[i];
}
*puiSads = uiSad;
//printf("(%2d,%2d) -> %08X\n", nMode, tid, uiSad);
}
#endif
__syncthreads();
}
extern "C" __host__
void xInitGPU()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if ( deviceCount > 0 ) {
cudaSetDevice( 0 );
}
}
extern "C" __host__
void xFreeGPU()
{
cudaDeviceReset();
}
typedef UInt32 __cdecl xSad( const UInt N, const UInt8 *pSrc, const UInt nStrideSrc, const UInt8 *pRef, const UInt nStrideRef );
extern xSad *xSadN[MAX_CU_DEPTH+1];
extern CUInt8 xg_aucIntraFilterType[5][NUM_INTRA_MODE];
extern void __cdecl xEncIntraPred( UInt8 *pucDstY, UInt8 *pucRefY, UInt nStride, UInt nMode, UInt nSize, UInt bIsLuma );
void __cdecl tPrintMatrix( FILE *fp, char *name, UInt8 *P, UInt uiStride, Int iSize );
extern "C" __host__
void __cdecl xEncIntraPred_gpu(
UInt8 *pucDst[2],
UInt8 *pucRef,
UInt nSize,
UInt bLuma,
UInt32 lambda,
UInt8 aucMostModeY[3],
UInt8 *pucPixY,
UInt32 *puiSad,
UInt32 *puiBestModeY,
UInt32 *puiTmpIdx,
cudaStream_t stream
)
{
CUInt nLog2Size = xLog2( nSize );
UInt32 /*nMode, */nBestModeY;
UInt32 uiSad, uiBestSadY;
UInt32 nTmpIdx;
__declspec(align(128)) UInt8 my_dst[NUM_INTRA_MODE][MAX_CU_SIZE*MAX_CU_SIZE];
UInt32 uiSads[NUM_INTRA_MODE];
UInt32 changed = FALSE;
UInt32 i;
// UInt8 *pucDst_gpu[2];
UInt8 *pucRef_gpu;
UInt8 *pucPixY_gpu;
UInt8 *my_dst_gpu;
UInt32 *puiSads_gpu;
float elapsedTimeInMs = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
#ifndef DEBUG_GPU
// cudaHostRegister( pucDst[0], nSize*nSize, cudaHostRegisterMapped );
// cudaHostRegister( pucDst[1], nSize*nSize, cudaHostRegisterMapped );
cudaHostRegister( pucRef, 2*(4*MAX_CU_SIZE+1+7), cudaHostRegisterMapped );
cudaHostRegister( pucPixY, nSize*MAX_CU_SIZE, cudaHostRegisterMapped );
//cudaHostRegister( my_tmp, sizeof(my_tmp), cudaHostRegisterMapped );
cudaHostRegister( my_dst[0], sizeof(my_dst), cudaHostRegisterMapped );
cudaHostRegister( uiSads, sizeof(uiSads), cudaHostRegisterMapped );
// cudaHostGetDevicePointer( &pucDst_gpu[0], pucDst[0], 0 );
// cudaHostGetDevicePointer( &pucDst_gpu[1], pucDst[1], 0 );
cudaHostGetDevicePointer( &pucRef_gpu, pucRef, 0 );
cudaHostGetDevicePointer( &pucPixY_gpu, pucPixY, 0 );
//cudaHostGetDevicePointer( &pucMyTmp_gpu, my_tmp, 0 );
cudaHostGetDevicePointer( &my_dst_gpu, my_dst[0], 0 );
cudaHostGetDevicePointer( &puiSads_gpu, uiSads, 0 );
#else
// pucDst_gpu[0] = pucDst[0];
// pucDst_gpu[1] = pucDst[1];
pucRef_gpu = pucRef;
#endif
//memset( my_dst, 0xCD, sizeof(my_dst) );
nBestModeY = *puiBestModeY;
uiBestSadY = *puiSad;
nTmpIdx = *puiTmpIdx;
/*for( nMode=1; nMode<NUM_INTRA_MODE; nMode+=34 ) */{
cudaEventRecord( start, 0 );
xPredIntraAng_gpu0<<< 2, (NUM_INTRA_MODE-1)/2
#ifdef _OPENMP
, 0, stream
#endif
>>>(
my_dst_gpu,//pucDst_gpu[nTmpIdx],
pucRef_gpu,
nSize,
bLuma
);
xSads_gpu0<<< NUM_INTRA_MODE-1, nSize
#ifdef _OPENMP
, 0, stream
#endif
>>>(
my_dst_gpu,
pucPixY_gpu,
puiSads_gpu,
nSize,
lambda,
aucMostModeY[0],
aucMostModeY[1],
aucMostModeY[2]
);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
//cudaDeviceSynchronize();
cudaEventElapsedTime( &elapsedTimeInMs, start, stop );
//printf("GPU Time=%f ms\n", elapsedTimeInMs);
{
static int sn = 0;
static int ss[4] = {0, 0, 0, 0};
static double sum[4] = {0, 0, 0, 0};
sum[nLog2Size-2] += elapsedTimeInMs;
ss[nLog2Size-2]++;
sn++;
if ( sn % 1024 == 0 ) {
printf("Avg = (%.4lf, %.4lf, %.4lf, %.4lf)/(%4d, %4d, %4d, %4d)\n", sum[0]/ss[0], sum[1]/ss[1], sum[2]/ss[2], sum[3]/ss[3], ss[0], ss[1], ss[2], ss[3]);
}
}
UInt32 modeOffset;
for( modeOffset=0; modeOffset<34; modeOffset++ ) {
UInt32 nRealMode = 1 + modeOffset;
#if 1//defined(DEBUG_GPU)
UInt i, j;
UInt8 tmp0[MAX_CU_SIZE*MAX_CU_SIZE];
UInt bFilter;
bFilter = xg_aucIntraFilterType[nLog2Size-2][nRealMode];
xEncIntraPred( tmp0, pucRef + (bFilter ? 4*MAX_CU_SIZE+1+7 : 0), MAX_CU_SIZE, nRealMode, nSize, TRUE );
for( i=0; i<nSize; i++ ) {
for( j=0; j<nSize; j++ ) {
if ( tmp0[i*MAX_CU_SIZE+j] != my_dst[nRealMode][i*MAX_CU_SIZE+j] ) {
printf("ERROR: Mode %2d at (%2d,%2d): %02X -> %02X\n", nRealMode, i, j, tmp0[i*MAX_CU_SIZE+j] & 0xFF, my_dst[nRealMode][i*MAX_CU_SIZE+j] & 0xFF );
tPrintMatrix( stdout, "Gold\n", tmp0, MAX_CU_SIZE, nSize );
tPrintMatrix( stdout, "My\n", my_dst[nRealMode], MAX_CU_SIZE, nSize );
xEncIntraPred( tmp0, pucRef + (bFilter ? 4*MAX_CU_SIZE+1+7 : 0), MAX_CU_SIZE, nRealMode, nSize, TRUE );
xPredIntraAng_gpu0<<< 1, 1 >>>(
my_dst_gpu,
pucRef_gpu,
nSize,
bLuma
);
}
}
}
if( nRealMode == aucMostModeY[0] )
uiSad = 1 * lambda;
else if( nRealMode == aucMostModeY[1] || nRealMode == aucMostModeY[2] )
uiSad = 2 * lambda;
else
uiSad = 3 * lambda;
uiSad += xSadN[nLog2Size-2]( nSize,
pucPixY, MAX_CU_SIZE,
my_dst[nRealMode], MAX_CU_SIZE );
if ( uiSad != uiSads[nRealMode] ) {
printf( "ERROR: Mode %2d, Sad=%d -> %d\n", nRealMode, uiSad, uiSads[nRealMode] );
}
#endif
uiSad = uiSads[nRealMode];
if( uiSad < uiBestSadY ) {
uiBestSadY = uiSad;
nBestModeY = nRealMode;
//nTmpIdx ^= 1;
changed = TRUE;
}
}
}
if ( changed ) {
for( i=0;i<nSize; i++ ) {
memcpy( pucDst[nTmpIdx^1] + i*MAX_CU_SIZE, &my_dst[nBestModeY][i*MAX_CU_SIZE], nSize );
}
}
*puiSad = uiBestSadY;
*puiBestModeY = nBestModeY;
*puiTmpIdx = nTmpIdx;
#ifndef DEBUG_GPU
cudaHostUnregister( uiSads );
cudaHostUnregister( my_dst[0] );
cudaHostUnregister( pucPixY );
// cudaHostUnregister( pucDst[0] );
// cudaHostUnregister( pucDst[1] );
cudaHostUnregister( pucRef );
cudaEventDestroy(stop);
cudaEventDestroy(start);
#endif
}
|
52d0b6ac70b2df9f97ff9752bdb105a5ef44e27f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Copyright 2019, UChicago Argonne, LLC
//
//All Rights Reserved
//
//Software Name: ptychopy
//
//By: Argonne National Laboratory
//
//OPEN SOURCE LICENSE
//Any publication using the package should cite for
//Yue K, Deng J, Jiang Y, Nashed Y, Vine D, Vogt S.
//Ptychopy: GPU framework for ptychographic data analysis.
//X-Ray Nanoimaging: Instruments and Methods V 2021 .
//International Society for Optics and Photonics.
//
//
//Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
//following conditions are met:
//
//1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
//disclaimer.
//
//2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
//disclaimer in the documentation and/or other materials provided with the distribution.
//
//3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
//derived from this software without specific prior written permission.
//
//DISCLAIMER
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
//INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
//DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
//SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
//SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
//WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
//OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef UTILITIESKERNELS_CU_
#define UTILITIESKERNELS_CU_
#include "utilities.h"
#include "reductions.cu"
#include <math.h>
#include <math_constants.h>
#include <float.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <thrust/extrema.h>
#define EPS 1e-3
//#include <hipcub/hipcub.hpp>
//using namespace cub;
/* extern shared memory for dynamic allocation */
extern __shared__ real_t shared_array[];
// ~10800 is the maximum of const memory
const unsigned int MAX_IND_READ = 3000;
//__constant__ unsigned int gC_ind_read[MAX_IND_READ];
__constant__ unsigned int gC_pos_X[MAX_IND_READ];
__constant__ unsigned int gC_pos_Y[MAX_IND_READ];
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
texture<float4, 1, hipReadModeElementType> g_transferTex; // 1D transfer function texture
hipArray *d_transferFuncArray = 0;
struct complexSum
{
complex_t normalizeBy;
complexSum(complex_t f=make_complex_t(1,0)) : normalizeBy(f)
{}
__host__ __device__ complex_t operator()(const complex_t&lhs, const complex_t&rhs) const
{return mul_complex_t(add_complex_t(lhs,rhs),normalizeBy);}
};
struct maxFloat2
{
__host__ __device__ float2 operator()(float2 lhs, float2 rhs)
{return make_float2(thrust::max(lhs.x, rhs.x), thrust::max(lhs.y, rhs.y));}
};
struct minFloat2
{
__host__ __device__ float2 operator()(float2 lhs, float2 rhs)
{return make_float2(thrust::min(lhs.x, rhs.x), thrust::min(lhs.y, rhs.y));}
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
//__global__ void d_check(real_t* d_data)
//{
//
// unsigned int Index = (blockIdx.x * blockDim.x) + threadIdx.x;
//
//// real_t temp=d_data[Index];
//// unsigned int sq1=1;
//}
//__global__ void d_checkcomplex(complex_t* d_data)
//{
//
// unsigned int Index = (blockIdx.x * blockDim.x) + threadIdx.x;
//
//// complex_t temp=d_data[Index];
//// unsigned int sq1=1;
//}
template<unsigned int threadNum>
__global__ void d_reduceToSum(const complex_t* d_u, complex_t* d_output, unsigned int x1, unsigned int y1,
unsigned int xNum, unsigned int yNum, unsigned int alignedY, bool enoughThreads)
{
complex_t* s_addends = (complex_t*)shared_array;
unsigned int row = blockIdx.x;//(blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outIndex = enoughThreads? blockIdx.x : (blockIdx.y*gridDim.x) + blockIdx.x;
unsigned int index = ((row+x1)*alignedY) + col + y1;
if(row<xNum)
{
s_addends[threadIdx.x] = (col<yNum)? d_u[index] : make_complex_t(0,0);
reduceToSumComplex<threadNum>(s_addends, threadIdx.x);
if(threadIdx.x == 0)
d_output[outIndex] = s_addends[0];
}
}
template<unsigned int threadNum>
__global__ void d_reduceToSum(const real_t* d_u, real_t* d_output, unsigned int x1, unsigned int y1,
unsigned int xNum, unsigned int yNum, unsigned int alignedY, bool enoughThreads)
{
real_t* s_addends = (real_t*)shared_array;
unsigned int row = blockIdx.x;//(blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outIndex = enoughThreads? blockIdx.x : (blockIdx.y*gridDim.x) + blockIdx.x;
unsigned int index = ((row+x1)*alignedY) + col + y1;
if(row<xNum)
{
s_addends[threadIdx.x] = (col<yNum)? d_u[index] : 0;
reduceToSum<real_t,threadNum>(s_addends, threadIdx.x);
if(threadIdx.x == 0)
d_output[outIndex] = s_addends[0];
}
}
__global__ void d_complexSubtract(const complex_t* a, const complex_t* b, complex_t* result, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
result[index] = sub_complex_t(a[index], b[index]);
}
__global__ void d_subtract(const real_t* a, const real_t* b, real_t* result, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
result[index] = a[index]-b[index];
}
__global__ void d_addFactorDivide(real_t* a, real_t* result, real_t factor, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
real_t tmp=a[index];
result[index]=tmp/(tmp+factor);
}
}
__global__ void d_object_sum_update_Gfun(complex_t* a, real_t* b, complex_t* result, real_t factor, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
complex_t tmp= make_complex_t((b[index]+factor), 0);
result[index]=div_complex_t(a[index], tmp);
}
}
__global__ void d_addFactor(complex_t* a, complex_t* result, complex_t factor, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
result[index]=add_complex_t(a[index], factor);
}
}
__global__ void d_addFactor(real_t* a, real_t* result, real_t factor, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
result[index]=a[index]+factor;
}
}
template<bool enoughThreads>
__global__ void d_multiplyConju(const complex_t* a, const complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c,
unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset,
unsigned int byOffset)
{
unsigned int aRow = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int aCol = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int bRow = aRow + bxOffset;
unsigned int bCol = aCol + byOffset;
unsigned int bIndex = (bRow * alignedY) + bCol;
aRow += axOffset;
aCol += ayOffset;
unsigned int aIndex = (aRow * alignedY) + aCol;
if(max(aRow,bRow)<x && max(aCol,bCol)<y)
{
complex_t temp = mul_complex_t(a[aIndex], conj_complex_t(b[bIndex]));
result[bIndex] = mul_complex_t(temp, make_complex_t(c, 0));
}
}
template<bool enoughThreads>
__global__ void d_complexMultiply(const complex_t* a, const complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c,
unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset,
unsigned int byOffset)
{
unsigned int aRow = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int aCol = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int bRow = aRow + bxOffset;
unsigned int bCol = aCol + byOffset;
unsigned int bIndex = (bRow * alignedY) + bCol;
aRow += axOffset;
aCol += ayOffset;
unsigned int aIndex = (aRow * alignedY) + aCol;
if(max(aRow,bRow)<x && max(aCol,bCol)<y)
{
complex_t temp = mul_complex_t(a[aIndex], b[bIndex]);
result[bIndex] = mul_complex_t(temp, make_complex_t(c, 0));
}
}
template<bool enoughThreads>
__global__ void d_complexMultiply(const real_t* a, const complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c,
unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset,
unsigned int byOffset)
{
unsigned int aRow = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int aCol = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int bRow = aRow + bxOffset;
unsigned int bCol = aCol + byOffset;
unsigned int bIndex = (bRow * alignedY) + bCol;
aRow += axOffset;
aCol += ayOffset;
unsigned int aIndex = (aRow * alignedY) + aCol;
if(max(aRow,bRow)<x && max(aCol,bCol)<y)
{
result[bIndex] = mul_complex_t(make_complex_t(a[aIndex], 0), b[bIndex]);
}
}
template<bool enoughThreads>
__global__ void d_complexMultiply(const complex_t* a, complex_t b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp = mul_complex_t(a[index], b);
result[index] = mul_complex_t(temp, make_complex_t(c, 0));
}
}
template<bool enoughThreads>
__global__ void d_multiply(const real_t* a, real_t b, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
result[index] = a[index]*b;
}
}
template<bool enoughThreads>
__global__ void d_mul_rca_mulc_rcr(complex_t* a, complex_t* b, complex_t* c, real_t* weight_proj,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp1=mul_complex_t(a[index],b[index]);
float sum2denom=abs_complex_t(temp1);
complex_t temp3=mul_complex_t(c[index], conj_complex_t(temp1));
float sum2nom=real_complex_t(temp3);
weight_proj[index]=0.1*sum2nom/(sum2denom*sum2denom);
}
}
// Only has one row of factor col from 1 to alignedy
template<bool enoughThreads>
__global__ void d_multiplyPage(const complex_t* a, complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, unsigned int pagex)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
// bcol=col
unsigned int brow=row%pagex;
unsigned int aindex = (row * alignedY) + col;
unsigned int bindex = (brow * alignedY) + col;
if(row<x && col<y)
{
result[aindex] = mul_complex_t(a[aindex], b[bindex]);
}
}
template<bool enoughThreads>
__global__ void d_multiplyAbsConjuRealWhole(const complex_t* a, complex_t* b, complex_t* c, real_t* result1,
real_t* result2, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t img = mul_complex_t(a[index], b[index]);
real_t temp = abs_complex_t(img);
result1[index] = temp*temp;
img = mul_complex_t(c[index], conj_complex_t(img));
result2[index]=real_complex_t(img);
}
}
template<bool enoughThreads>
__global__ void d_multiplyAbsConjuReal(const complex_t* a, complex_t* b, complex_t* c, real_t* result1,
real_t* result2, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
// bcol=col
unsigned int brow=row%pagex;
unsigned int aindex = (row * alignedY) + col;
unsigned int bindex = (brow * alignedY) + col;
if(row<x && col<y)
{
complex_t img = mul_complex_t(a[aindex], b[bindex]);
real_t temp = abs_complex_t(img);
result1[aindex] = temp*temp;
img = mul_complex_t(c[aindex], conj_complex_t(img));
result2[aindex]=real_complex_t(img);
}
}
// Only has one row of factor col from 1 to alignedy
template<bool enoughThreads>
__global__ void d_multiplyRow(const complex_t* a, real_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp = mul_complex_t(a[index], make_complex_t(b[col], 0));
result[index] = temp;
}
}
// the factor is from 0 to x;
template<bool enoughThreads>
__global__ void d_multiplyColum(const complex_t* a, real_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp = mul_complex_t(a[index], make_complex_t(b[row], 0));
result[index] = temp;
}
}
//function [AA1,AA2,AA4, Atb1,Atb2] = ...
// get_optimal_step_lsq(chi,dO,dP,O,P, lambda)
// % fast kernel for estimation of optimal probe and object steps
// dOP = dO.*P;
// dPO = dP.*O;
// cdOP = conj(dOP);
// cdPO = conj(dPO);
//
// AA1 = real(dOP .* cdOP)+lambda;
// AA2 = (dOP .* cdPO);
// AA4 = real(dPO .* cdPO)+lambda;
// Atb1 = real(cdOP .* chi);
// Atb2 = real(cdPO .* chi);
//end
template<bool enoughThreads>
__global__ void d_get_optimal_step_lsq(complex_t* chi, complex_t* object_update_proj, complex_t* dPO, complex_t* probe, real_t lambda,
real_t* AA1, complex_t* AA2, real_t* AA4, real_t* Atb1, real_t* Atb2,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t dOP=mul_complex_t(object_update_proj[index], probe[index]);
complex_t cdOP=conj_complex_t(dOP);
complex_t cdPO=conj_complex_t(dPO[index]);
AA1[index]=real_complex_t(mul_complex_t(dOP, cdOP))+lambda;
AA2[index]=mul_complex_t(dOP, cdPO);
AA4[index] = real_complex_t(mul_complex_t(dPO[index], cdPO))+lambda;
Atb1[index]=real_complex_t(mul_complex_t(cdOP, chi[index]));
Atb2[index]=real_complex_t(mul_complex_t(cdPO, chi[index]));
}
}
template<bool enoughThreads>
__global__ void d_complexMultiply(const complex_t* a, real_t c, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp=mul_complex_t(a[index], make_complex_t(c, 0.0));
result[index] = temp;
}
// int temp=1;
}
template<bool enoughThreads>
__global__ void d_realMultiply(real_t* a, real_t* b, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c,
unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset,
unsigned int byOffset)
{
unsigned int aRow = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int aCol = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int bRow = aRow + bxOffset;
unsigned int bCol = aCol + byOffset;
unsigned int bIndex = (bRow * alignedY) + bCol;
aRow += axOffset;
aCol += ayOffset;
unsigned int aIndex = (aRow * alignedY) + aCol;
if(max(aRow,bRow)<x && max(aCol,bCol)<y)
{
result[aIndex]=a[aIndex]*b[bIndex];
}
}
template<bool enoughThreads>
__global__ void d_realMultiply(real_t* a, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
real_t temp=a[index]*result[index];
result[index] = temp;
}
}
template<bool enoughThreads>
__global__ void d_realToRGBA(const real_t* a, real_t c, float4* result, unsigned int X, unsigned int Y, unsigned int alignedY, float transferOffset, float transferScale)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
unsigned int oIndex = (row * Y) + col;
if(row<X && col<Y)
{
float normalizedV = (float) (a[index]*c);
result[oIndex] = tex1D(g_transferTex, (normalizedV-transferOffset)*transferScale);
}
}
template<bool enoughThreads>
__global__ void d_realToGray(const real_t* a, real_t c, float* result, unsigned int X, unsigned int Y, unsigned int alignedY, bool outAligned)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
unsigned int oIndex = (row * Y) + col;
if(row<X && col<Y)
{
float normalizedV = (float) (a[index]*c);
result[outAligned?index:oIndex] = normalizedV;
}
}
template<unsigned char op, bool enoughThreads>
__global__ void d_complexToDouble(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
real_t temp = 0;
switch(op)
{
case 'a': temp = abs_complex_t(a[index]); break;
case 'p': temp = atan2_real_t(a[index].y, a[index].x); break;
case 'r': temp = real_complex_t(a[index]); break;
case 'i': temp = imag_complex_t(a[index]); break;
default: temp = 0; break;
}
result[index] = squared? temp*temp: temp;
}
}
template<bool enoughThreads>
__global__ void d_realComplexExp(const real_t* src, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY, real_t factor)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp=make_complex_t((cos_real_t(src[index]*factor)), (sin_real_t(src[index]*factor)));
result[index]=temp;
}
}
template<bool enoughThreads>
__global__ void d_realsquareRoot(real_t* d_arr, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
// real_t temp = 0;
// temp= sqrt_real_t(d_arr[index]);
result[index]=sqrt_real_t(d_arr[index]);
}
}
template<bool enoughThreads>
__global__ void d_square(real_t* d_arr, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
real_t tmp=d_arr[index];
result[index]=tmp*tmp;
}
}
__device__ bool letFloat(const real_t* beamstopMask, unsigned int index, const real_t saturationValue, const real_t diffValue)
{
bool toFloat = beamstopMask? beamstopMask[index]<0.99:false;
toFloat = toFloat || (diffValue-saturationValue)>=0;
return toFloat;
}
__device__ complex_t modulusConstraint(complex_t psi, real_t det_mod)
{
real_t sinFunc, cosFunc;
sincos_real_t(atan2_real_t(psi.y,psi.x),&sinFunc,&cosFunc);
return make_complex_t(det_mod*cosFunc, det_mod*sinFunc);
}
__global__ void d_adjustFFT(const complex_t* d_psi, complex_t* d_output, const real_t* d_det_mod, const real_t* d_mask,
const real_t saturationValue, unsigned int y, real_t normalizeBy)
{
unsigned int row = (blockIdx.x*blockDim.y)+threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int psiIndex = (row * blockDim.x) + col;
if(col < y)
{
complex_t psi = d_psi[psiIndex];
real_t diffValue = d_det_mod[psiIndex];
// if(diffValue>=saturationValue)
// {
// printf("diffValue is %f, saturationValue is %f \n", diffValue, saturationValue);
// printf(" row is %u, column is %u, complex_t psi x is %f, psi y is %f \n", row, col, psi.x, psi.y);
// }
bool toFloat = letFloat(d_mask,psiIndex,saturationValue, diffValue);
// d_output[psiIndex] = toFloat?psi:mul_complex_t(modulusConstraint(psi, diffValue), make_complex_t(normalizeBy, 0.0));
d_output[psiIndex] = mul_complex_t(toFloat?psi:modulusConstraint(psi, diffValue), make_complex_t(normalizeBy, 0.0));
}
}
__global__ void d_adjustModalFFT(const complex_t* d_psi, complex_t* d_output, const real_t* d_det_mod, const real_t* d_mask,
const real_t saturationValue, unsigned int modeNum, unsigned int x, unsigned int y, real_t normalizeBy)
{
unsigned int modeIndex = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int detIndex = (modeIndex*blockDim.x) + threadIdx.x;
if(modeIndex<x && threadIdx.x<y)
{
real_t modalSum = 0, avdata = d_det_mod[detIndex];
for(unsigned int i=0; i<modeNum; ++i)
{
unsigned int psiIndex = ((modeIndex+(i*x))*blockDim.x) + threadIdx.x;
real_t psiFFtAbs = abs_complex_t(d_psi[psiIndex]);
modalSum += psiFFtAbs * psiFFtAbs;
}
modalSum = rsqrt_real_t(modalSum);
for(unsigned int i=0; i<modeNum; ++i)
{
unsigned int psiIndex = ((modeIndex+(i*x))*blockDim.x) + threadIdx.x;
bool toFloat = letFloat(d_mask, detIndex, saturationValue, avdata);
// d_output[psiIndex] = toFloat?d_psi[psiIndex]:mul_complex_t(d_psi[psiIndex], make_complex_t(avdata*modalSum*normalizeBy, 0.0));
d_output[psiIndex] = mul_complex_t(d_psi[psiIndex], make_complex_t((toFloat?1:avdata)*modalSum*normalizeBy, 0.0));
}
}
}
template<unsigned int threadNum>
__global__ void d_calculateER(const complex_t* d_psi, const real_t* d_detMod, real_t* d_output,
unsigned int x, unsigned int y, unsigned int alignedY, unsigned int modeNum, bool enoughThreads)
{
real_t* s_addends = (real_t*)shared_array;
unsigned int row = blockIdx.x;//(blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outIndex = enoughThreads? blockIdx.x : (blockIdx.y*gridDim.x) + blockIdx.x;
unsigned int index = (row*alignedY) + col;
if(row<x)
{
complex_t modalSum = make_complex_t(0,0);
for(unsigned int i=0; i<modeNum; ++i)
{
unsigned int psiIndex = ((row+(i*x))*alignedY) + col;
modalSum = add_complex_t(modalSum, d_psi[psiIndex]);
}
s_addends[threadIdx.x] = (col<y)? abs_real_t( d_detMod[index] - abs_complex_t(modalSum) ) : 0;
reduceToSum<real_t, threadNum>(s_addends, threadIdx.x);
if(threadIdx.x == 0)
d_output[outIndex] = s_addends[0];
}
}
template<bool enoughThreads>
__global__ void d_realSpaceER(const complex_t* d_GT, const complex_t* d_obj, real_t* d_output,
unsigned int qx, unsigned int qy,
unsigned int outX, unsigned int outY,
unsigned int x1, unsigned int y1, unsigned int alignedY1,
unsigned int x2, unsigned int y2, unsigned int alignedY2)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outIndex = (row*outY) + col;
unsigned int gtIndex = ((row+qx)*alignedY1) + col + qy;
unsigned int objIndex = ((row+qx)*alignedY2) + col + qy;
if(row<outX && col<outY)
{
complex_t gtVal = d_GT[gtIndex];
complex_t objVal = d_obj[objIndex];
real_t diff = abs_complex_t(gtVal) - abs_complex_t(objVal);
d_output[outIndex] = diff*diff;
}
}
template<typename T, bool enoughThreads>
__global__ void d_shiftY(const T* d_objectArray, T* d_output, float nx,
unsigned int offset, unsigned int X, unsigned int Y, unsigned int alignedY)
{
unsigned int xIndex = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int yIndex = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int objectArrayIndex = (xIndex * alignedY) + yIndex;
T saved = d_objectArray[objectArrayIndex];
if(xIndex<X && yIndex<Y)
{
int offsetY = yIndex - (roundf(nx) - offset);
if(offsetY < 0) offsetY += Y;
if(offsetY >= Y) offsetY -= Y;
offsetY += (xIndex * alignedY);
__syncthreads();
d_output[offsetY] = saved;
}
}
template<typename T, bool enoughThreads>
__global__ void d_shiftX(const T* d_objectArray, T* d_output, float ny,
unsigned int offset, unsigned int X, unsigned int Y, unsigned int alignedY)
{
unsigned int xIndex = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int yIndex = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
if(xIndex<X && yIndex<Y)
{
unsigned int objectArrayIndex = (xIndex * alignedY) + yIndex;
T saved = d_objectArray[objectArrayIndex];
int offsetX = xIndex - (roundf(ny) - offset);
if(offsetX < 0) offsetX += X;
if(offsetX >= X) offsetX -= X;
offsetX = (offsetX * alignedY) + yIndex;
__syncthreads();
d_output[offsetX] = saved;
}
}
template<typename T>
__global__ void d_imshift_fft(T* d_data, unsigned int midx, unsigned int midy, float radNo1, float radNo2,
unsigned int X, unsigned int Y, unsigned int alignedY)
{
unsigned int xIndex = threadIdx.x;
unsigned int yIndex = (blockIdx.x*blockDim.y) + threadIdx.y;
if(xIndex<Y && yIndex<X)
{
unsigned int objectArrayIndex = (yIndex * alignedY) + xIndex;
T saved = d_data[objectArrayIndex];
float xgridindex=xIndex;
float ygridindex=yIndex;
if (xIndex < midx)
xgridindex+=midx;
else
xgridindex-=midx;
if (yIndex < midy)
ygridindex+=midy;
else
ygridindex-=midy;
xgridindex=radNo1*(xgridindex/X-0.5);
ygridindex=radNo2*(ygridindex/Y-0.5);
real_t sumInitx=2*CUDART_PI*xgridindex;
real_t sumInity=2*CUDART_PI*ygridindex;
real_t costx=cos_real_t(sumInitx);
real_t sintx=-1*sin_real_t(sumInitx);
real_t costy=cos_real_t(sumInity);
real_t sinty=-1*sin_real_t(sumInity);
complex_t tempmulx = make_complex_t(costx, sintx);
complex_t tempmuly = make_complex_t(costy, sinty);
d_data[objectArrayIndex]=mul_complex_t(saved,mul_complex_t(tempmulx, tempmuly));
}
}
template<typename T>
__global__ void d_mirrorY(const T* d_objectArray, T* d_output, unsigned int objectArrayY)
{
unsigned int objectArrayIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
T saved = d_objectArray[objectArrayIndex];
if(threadIdx.x < objectArrayY)
{
unsigned int mirrorIndex = (blockIdx.x * blockDim.x) + (objectArrayY-threadIdx.x);
d_output[--mirrorIndex] = saved;
}
}
template<typename T>
__global__ void d_rot90(const T* src, T* dst, unsigned int rows, unsigned int cols, unsigned int pitch)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int tid = row * pitch + col;
unsigned int tid_out = (rows-col-1) * pitch + row;
//saved[threadIdx.x*blockDim.y+threadIdx.y] = srcDst[tid];
if(row<rows && col<cols)
dst[tid_out] = src[tid];//saved[threadIdx.x*blockDim.y+threadIdx.y];
}
template<unsigned int threadNum>
__global__ void d_innerProduct(const complex_t* d_u, const complex_t* d_v, complex_t* d_output,
real_t oneOverN, unsigned int y)
{
complex_t* s_addends = (complex_t*)shared_array;
unsigned int probeIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
complex_t value = (threadIdx.x<y)?mul_complex_t( conj_complex_t(d_u[probeIndex]), d_v[probeIndex]): make_complex_t(0,0);
s_addends[threadIdx.x] = make_complex_t(value.x*oneOverN,value.y*oneOverN);
reduceToSumComplex<threadNum>(s_addends,threadIdx.x);
if(threadIdx.x == 0)
d_output[blockIdx.x] = s_addends[0];
}
template<unsigned int threadNum>
__global__ void d_innerProductOne(const complex_t* d_u, const complex_t* d_v, complex_t* d_output,
real_t oneOverN, unsigned int y)
{
complex_t* s_addends = (complex_t*)shared_array;
unsigned int probeIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
complex_t value = (threadIdx.x<y)?mul_complex_t( conj_complex_t(d_u[probeIndex]), d_v[probeIndex]): make_complex_t(0,0);
s_addends[threadIdx.x] = make_complex_t(value.x*oneOverN,value.y*oneOverN);
reduceToSumComplex<threadNum>(s_addends,threadIdx.x);
if(threadIdx.x == 0)
d_output[blockIdx.x] = s_addends[0];
}
__global__ void d_innerProductModes(complex_t* d_u, complex_t* d_v, complex_t* d_factor,
unsigned int index, unsigned int ModeNumber, unsigned int probeX, unsigned int probeY, unsigned int offset)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
// unsigned int modeIndex = ((row+((blockIdx.x) * probeX)) * blockDim.x) + threadIdx.x;
unsigned int baseIndex = (row*blockDim.x) + threadIdx.x;
if(row<probeX && threadIdx.x<probeY)
{
complex_t value=make_complex_t(0, 0);
for(int i=0; i< ModeNumber; i++)
{
value = add_complex_t(value, mul_complex_t(d_u[baseIndex+offset*i], d_factor[index+i*ModeNumber]));
}
// complex_t value=add_complex_t(mul_complex_t(d_u[baseIndex], d_factor[index]), mul_complex_t(d_u[baseIndex+offset], d_factor[index+5]));
// value = add_complex_t(value, mul_complex_t(d_u[baseIndex+offset*2], d_factor[index+2*5]));
// value = add_complex_t(value, mul_complex_t(d_u[baseIndex+offset*3], d_factor[index+3*5]));
// value = add_complex_t(value, mul_complex_t(d_u[baseIndex+offset*4], d_factor[index+4*5]));
d_v[baseIndex]=value;
}
}
template<typename T>
__global__ void d_modalSum(const T* d_modes, T* d_output, unsigned int modeNum, unsigned int x, unsigned int y, bool sqaureRoot)
{
//unsigned int baseIndex = (blockIdx.x * probeX) + ((blockIdx.y*blockDim.y) + threadIdx.y);
unsigned int modeIndex = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int outIndex = (modeIndex*blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
T val = d_modes[outIndex];
for(unsigned int i=1; i<modeNum; ++i)
val += d_modes[((modeIndex+(i*x))*blockDim.x) + threadIdx.x];
d_output[outIndex] = sqaureRoot? sqrt_real_t(val) : val;
}
}
__global__ void d_modalSumComplex(const complex_t* d_modes, complex_t* d_output, unsigned int modeNum, unsigned int x, unsigned int y, bool sqaureRoot)
{
unsigned int modeIndex = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int outIndex = (modeIndex*blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
complex_t val = d_modes[outIndex];
for(unsigned int i=1; i<modeNum; ++i)
val=add_complex_t(val, d_modes[((modeIndex+(i*x))*blockDim.x) + threadIdx.x]);
d_output[outIndex]=val;
}
}
__global__ void d_complexSum(complex_t* d_leftArr, complex_t* d_rightArr, complex_t* d_result, real_t leftFactor, real_t rightFactor, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(threadIdx.x < y)
{
complex_t leftOp=mul_complex_t(d_leftArr[index], make_complex_t(leftFactor,0));
complex_t rightOp=mul_complex_t(d_rightArr[index], make_complex_t(rightFactor,0));
d_result[index]=add_complex_t(leftOp, rightOp);
}
}
__global__ void d_realSum(real_t* d_leftArr, real_t* d_rightArr, real_t* d_result, real_t leftFactor, real_t rightFactor, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(threadIdx.x < y)
{
d_result[index]=d_leftArr[index]*leftFactor+d_rightArr[index]*rightFactor;
}
}
// The first row and the first column combine to a new matrix by adding the duplicated elments of each line
// 26*256 26*256
__global__ void d_realSingleSum(real_t* d_leftArr, real_t* d_rightArr, real_t* d_result, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int leftIndex= col;
// unsigned int rightindex = row * alignedY;
unsigned int rightindex= row;
unsigned int index = (row * alignedY) + col;
if(threadIdx.x < y)
{
d_result[index]=d_leftArr[leftIndex]+d_rightArr[rightindex];
}
}
template<bool enoughThreads>
__global__ void d_extractArrReal(real_t* d_objectArray, real_t* d_output, unsigned int sampleX, unsigned int sampleY,
float offsetX, float offsetY, unsigned int alignedSampleY, unsigned int alignedObjectArrayY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outputIndex = (row * alignedSampleY) + col;
unsigned int inputIndex=(row+offsetX)*alignedObjectArrayY+col+offsetY;
if(row<sampleX && col<sampleY)
{
d_output[outputIndex] = d_objectArray[inputIndex];
}
}
template<bool enoughThreads>
__global__ void d_extractArrComplex(complex_t* d_objectArray, complex_t* d_output, unsigned int sampleX, unsigned int sampleY,
float offsetX, float offsetY, unsigned int alignedSampleY, unsigned int alignedObjectArrayY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outputIndex = (row * alignedSampleY) + col;
unsigned int inputIndex=(row+offsetX)*alignedObjectArrayY+col+offsetY;
if(row<sampleX && col<sampleY)
{
d_output[outputIndex] = d_objectArray[inputIndex];
}
}
__global__ void d_addToArray_r(float * sarray, float* larray, unsigned int* pos_X, unsigned int* posY,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_ox, unsigned int Np_oy,
unsigned int Npos, unsigned int alignedObjectY, const bool isFlat)
{
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px && idy < Np_py && id < Npos)
{
int idz = id; // go only through some of the indices
int id_large = alignedObjectY*(pos_X[idz]+idx)+(posY[idz]+idy);
int id_small = Np_py*idx + idy ;
// if (!isFlat)
// id_small = id_small + Np_px*Np_py*idz ;
if (!isFlat)
id_small = id_small + Np_px*alignedObjectY*idz ;
atomicAdd(&larray[id_large] ,sarray[id_small]);
}
}
__global__ void d_addToArray_c(complex_t * sarray, complex_t* larray, unsigned int* pos_X, unsigned int* posY,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_ox, unsigned int Np_oy,
unsigned int Npos, unsigned int alignedObjectY, unsigned int alignedProbeY, const bool isFlat)
{
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px && idy < Np_py && id < Npos)
{
int idz = id; // go only through some of the indices
int id_large = alignedObjectY*(pos_X[idz]+idx)+(posY[idz]+idy);
int id_small = Np_py*idx + idy ;
if (!isFlat)
id_small = id_small + Np_px*alignedProbeY*idz ;
// id_small = id_small + Np_px*Np_py*idz ;
atomicAdd(&larray[id_large].x ,sarray[id_small].x);
atomicAdd(&larray[id_large].y ,sarray[id_small].y);
}
}
__global__ void d_readFromArray_c(complex_t * sarray, const complex_t * larray, /*unsigned int* ind_read,*/ unsigned int* pos_X, unsigned int* pos_Y,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_pz, unsigned int Np_ox, unsigned int Np_oy,
unsigned int alignedObjectY, unsigned int alignedProbeY, unsigned int Npos) {
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px & idy < Np_py & id < Npos)
{
// int idz = ind_read[id]; // go only through some of the indices
int idz = id;
int id_large = alignedObjectY*(pos_X[idz]+idx)+pos_Y[idz]+idy;
// int id_large = pos_X[idz]+idx + Np_ox*(pos_Y[idz]+idy);
// int id_small = idx + Np_px*idy + Np_px*Np_py*idz ;
int id_small = alignedProbeY*idx + idy + Np_px*alignedProbeY*idz ;
// sarray[id_small].x = larray[id_large].x ;
// sarray[id_small].y = larray[id_large].y ;
sarray[id_small]= larray[id_large];
}
}
__global__ void d_readFromArray_r(real_t * sarray, const real_t * larray, /*unsigned int* ind_read,*/ unsigned int* pos_X, unsigned int* pos_Y,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_pz, unsigned int Np_ox, unsigned int Np_oy,
unsigned int alignedObjectY, unsigned int alignedProbeY, unsigned int Npos)
{
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px & idy < Np_py & id < Npos)
{
// int idz = ind_read[id]; // go only through some of the indices
int idz = id;
int id_large = alignedObjectY*(pos_X[idz]+idx)+pos_Y[idz]+idy;
// int id_large = pos_X[idz]+idx + Np_ox*(pos_Y[idz]+idy);
// int id_small = idx + Np_px*idy + Np_px*Np_py*idz ;
int id_small = alignedProbeY*idx + idy + Np_px*alignedProbeY*idz ;
// sarray[id_small].x = larray[id_large].x ;
// sarray[id_small].y = larray[id_large].y ;
sarray[id_small]= larray[id_large];
}
}
__global__ void d_readFromArray_r_fast(real_t * sarray, const real_t * larray,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_pz, unsigned int Np_ox, unsigned int Np_oy,
unsigned int alignedObjectY, unsigned int alignedProbeY, unsigned int Npos)
{
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px & idy < Np_py & id < Npos)
{
// int idz = gC_ind_read[id]-1; // go only through some of the indices
int idz = id;
// int id_large = gC_pos_X[idz]+idx + Np_ox*(gC_pos_Y[idz]+idy);
int id_large = alignedObjectY*(gC_pos_X[idz]+idx) + gC_pos_Y[idz]+idy;
// int id_small = idx + Np_px*idy + Np_px*Np_py*idz ;
int id_small = alignedProbeY*idx + idy + Np_px*alignedProbeY*idz ;
sarray[id_small] = larray[id_large];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__host__ int getReductionThreadNum(int size) {return (int) rint( pow(2.0f, (int)ceil( log2( (float) size) ) ) );}
__host__ void h_initColorTransferTexture()
{
// create transfer function texture
float4 transferFunc[] =
{
{ 0.0, 0.0, 0.0, 0.0, },
{ 1.0, 0.0, 0.0, 1.0, },
{ 1.0, 0.5, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 0.0, 0.0, },
};
hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>();
hipMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1);
cutilCheckMsg("h_initColorTransferTexture() hipMallocArray execution failed!\n");
hipMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice);
cutilCheckMsg("h_initColorTransferTexture() hipMemcpyToArray execution failed!\n");
g_transferTex.filterMode = hipFilterModeLinear;
g_transferTex.normalized = true; // access with normalized texture coordinates
g_transferTex.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates
// Bind the array to the texture
hipBindTextureToArray(&g_transferTex, d_transferFuncArray, &channelDesc2);
cutilCheckMsg("h_initColorTransferTexture() hipBindTextureToArray execution failed!\n");
}
__host__ void h_freeColorTransferTexture()
{
if(d_transferFuncArray)
{
hipUnbindTexture(&g_transferTex);
cutilCheckMsg("h_freeColorTransferTexture()::hipUnbindTexture() execution failed!\n");
hipFreeArray(d_transferFuncArray);
cutilCheckMsg("h_init3DTexture()::hipFreeArray() execution failed!\n");
}
}
template<typename T>
void h_reduceToSum(const T* a, thrust::device_vector<T>& out, unsigned int x1, unsigned int x2, unsigned int y1, unsigned int y2, unsigned int alignedY)
{
unsigned int xNum = x2-x1;
unsigned int yNum = y2-y1;
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int reductionThreads = getReductionThreadNum(yNum);
dim3 grid;
dim3 block;
bool enoughThreads = true;
if(reductionThreads<=maxThreads)
{
grid = dim3(xNum, 1, 1);
block = dim3(reductionThreads, 1, 1);
out.resize(xNum);
}
else
{
enoughThreads = false;
unsigned int sliceNum = gh_iDivUp(reductionThreads, maxThreads);
grid = dim3(xNum, sliceNum, 1);
block = dim3(maxThreads, 1, 1);
out.resize(xNum*sliceNum);
}
unsigned int threadNum = block.x * block.y;
size_t shared_mem_size = (threadNum <= 32) ? 2* threadNum * sizeof(T) : threadNum * sizeof(T);
switch (threadNum)
{
case 8:hipLaunchKernelGGL(( d_reduceToSum< 8>), dim3(grid), dim3(block), shared_mem_size, 0, a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 16:hipLaunchKernelGGL(( d_reduceToSum< 16>), dim3(grid), dim3(block), shared_mem_size, 0, a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 32:hipLaunchKernelGGL(( d_reduceToSum< 32>), dim3(grid), dim3(block), shared_mem_size, 0, a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 64:hipLaunchKernelGGL(( d_reduceToSum< 64>), dim3(grid), dim3(block), shared_mem_size, 0, a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 128:hipLaunchKernelGGL(( d_reduceToSum< 128>), dim3(grid), dim3(block), shared_mem_size, 0, a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 256:hipLaunchKernelGGL(( d_reduceToSum< 256>), dim3(grid), dim3(block), shared_mem_size, 0, a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 512:hipLaunchKernelGGL(( d_reduceToSum< 512>), dim3(grid), dim3(block), shared_mem_size, 0, a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 1024:hipLaunchKernelGGL(( d_reduceToSum<1024>), dim3(grid), dim3(block), shared_mem_size, 0, a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
}
cutilCheckMsg("d_reduceToSum() execution failed!\n");
}
__host__ complex_t h_complexSum(const complex_t* a, unsigned int x1, unsigned int x2, unsigned int y1, unsigned int y2, unsigned int alignedY)
{
thrust::device_vector<complex_t> output;
h_reduceToSum<complex_t>(a, output, x1, x2, y1, y2, alignedY);
return thrust::reduce(output.begin(), output.end(), make_complex_t(0,0), complexSum());
}
__host__ real_t h_realSum(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
// thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
// return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY));
double sum=h_realSum(a, 0, x, 0, y, alignedY);
// real_t sum = h_realSumCUB(a, x, y, alignedY);
return sum;
}
__host__ real_t h_mean2(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
double sum=h_realSum(a, 0, x, 0, y, alignedY);
// double sum=h_realSumCUB(a, x, y, alignedY);
return sum/(x*y);
}
__host__ real_t h_realSum(const real_t* a, unsigned int x1, unsigned int x2, unsigned int y1, unsigned int y2, unsigned int alignedY)
{
thrust::device_vector<real_t> output;
h_reduceToSum<real_t>(a, output, x1, x2, y1, y2, alignedY);
return thrust::reduce(output.begin(), output.end());
}
//__host__ real_t h_realSumCUB(real_t* d_in, unsigned int x, unsigned int y, unsigned int alignedY)
//{
//
// real_t* d_out;
// hipMalloc((void **)&d_out, sizeof(real_t));
//
// // Request and allocate temporary storage
// void *d_temp_storage = NULL;
// size_t temp_storage_bytes = 0;
// int num_items=x*alignedY;
//
// DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
// hipMalloc((void**)&d_temp_storage, temp_storage_bytes);
// DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
// hipDeviceSynchronize();
//
// real_t sum=0;
// hipMemcpy(&sum, d_out, sizeof(real_t), hipMemcpyDeviceToHost);
//
// hipFree(d_out);
// hipFree(d_temp_storage);
//// printf("sum: %15e.\n", sum);
//
// cutilCheckMsg("h_realSumCUB() execution failed!\n");
//
// return sum;
//}
__host__ float2 h_maxFloat2(float2* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<float2> devPtr_a = thrust::device_pointer_cast(a);
return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY), make_float2(FLT_MIN,FLT_MIN), maxFloat2());
}
__host__ float2 h_minFloat2(float2* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<float2> devPtr_a = thrust::device_pointer_cast(a);
return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY), make_float2(FLT_MAX,FLT_MAX), minFloat2());
}
__host__ real_t h_maxFloat(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
// thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
// return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY), make_float2(FLT_MIN,FLT_MIN), maxFloat2());
thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
// thrust::device_vector<real_t> devPtr_a(devPtr);
thrust::device_vector<real_t>::iterator iter = thrust::max_element(devPtr_a, devPtr_a+(x*alignedY));
real_t max_val = *iter;
return max_val;
}
//__host__ float2 h_subtractFloat2(const float2* a, const float* b,
// unsigned int x, unsigned int y, unsigned int alignedY)
//{
//// unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
//// dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
//// dim3 block(alignedY, sliceNum, 1);
//
// d_float2Subtract<<<x, alignedY>>>(a, b, result, y);
// cutilCheckMsg("d_complexSubtract() execution failed!\n");
//}
//
//__global__ void d_float2Subtract(const float2* a, const float* b, complex_t* result, unsigned int y)
//{
// unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
// if(threadIdx.x < y)
// result[index] = sub_complex_t(a[index], b[index]);
//
// unsigned int posIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
//
//}
__host__ void h_subtract(const complex_t* a, const complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_complexSubtract), dim3(grid), dim3(block), 0, 0, a, b, result, y);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
__host__ void h_subtract(const real_t* a, const real_t* b, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_subtract), dim3(grid), dim3(block), 0, 0, a, b, result, y);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
__host__ void h_addFactorDivide(real_t* a, real_t* result, real_t factor,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_addFactorDivide), dim3(grid), dim3(block), 0, 0, a, result, factor, y);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
__host__ void h_object_sum_update_Gfun(complex_t* a, real_t* b, complex_t* result, real_t factor,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_object_sum_update_Gfun), dim3(grid), dim3(block), 0, 0, a, b, result, factor, y);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
void h_addFactor(complex_t* a, complex_t* result, complex_t factor,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_addFactor), dim3(grid), dim3(block), 0, 0, a, result, factor, y);
cutilCheckMsg("d_addFactor() execution failed!\n");
}
void h_addFactor(real_t* a, real_t* result, real_t factor,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_addFactor), dim3(grid), dim3(block), 0, 0, a, result, factor, y);
cutilCheckMsg("d_addFactor() execution failed!\n");
}
__host__ void h_square(real_t* a, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_square<true>), dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY);
//hipLaunchKernelGGL(( d_square<true>), dim3(grid), dim3(block), 0, 0, d_arr, d_result, x, y, alignedY);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
__host__ void h_multiplyConju(complex_t* a, complex_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int blockOffset = max(axOffset,bxOffset);
if(blockOffset<x && max(ayOffset,byOffset)<y)
{
if (alignedY <= maxThreads)
{
unsigned int sliceNum = gh_iDivDown(maxThreads, alignedY);
dim3 grid(gh_iDivUp(x-blockOffset, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_multiplyConju<true>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
else
{
unsigned int sliceNum = gh_iDivUp(alignedY, maxThreads);
dim3 grid(x-blockOffset, sliceNum, 1);
dim3 block(maxThreads, 1, 1);
hipLaunchKernelGGL(( d_multiplyConju<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
}
cutilCheckMsg("d_multiplyConju() execution failed!\n");
}
__host__ void h_multiply(complex_t* a, complex_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int blockOffset = max(axOffset,bxOffset);
if(blockOffset<x && max(ayOffset,byOffset)<y)
{
if (alignedY <= maxThreads)
{
unsigned int sliceNum = gh_iDivDown(maxThreads, alignedY);
dim3 grid(gh_iDivUp(x-blockOffset, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_complexMultiply<true>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
else
{
unsigned int sliceNum = gh_iDivUp(alignedY, maxThreads);
dim3 grid(x-blockOffset, sliceNum, 1);
dim3 block(maxThreads, 1, 1);
hipLaunchKernelGGL(( d_complexMultiply<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
}
cutilCheckMsg("d_complexMultiply() execution failed!\n");
}
__host__ void h_multiply(real_t* a, complex_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int blockOffset = max(axOffset,bxOffset);
if(blockOffset<x && max(ayOffset,byOffset)<y)
{
if (alignedY <= maxThreads)
{
unsigned int sliceNum = gh_iDivDown(maxThreads, alignedY);
dim3 grid(gh_iDivUp(x-blockOffset, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_complexMultiply<true>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
else
{
unsigned int sliceNum = gh_iDivUp(alignedY, maxThreads);
dim3 grid(x-blockOffset, sliceNum, 1);
dim3 block(maxThreads, 1, 1);
hipLaunchKernelGGL(( d_complexMultiply<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
}
cutilCheckMsg("d_complexMultiply() execution failed!\n");
}
__host__ void h_multiply(real_t* a, real_t* b, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int blockOffset = max(axOffset,bxOffset);
if(blockOffset<x && max(ayOffset,byOffset)<y)
{
if (alignedY <= maxThreads)
{
unsigned int sliceNum = gh_iDivDown(maxThreads, alignedY);
dim3 grid(gh_iDivUp(x-blockOffset, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_realMultiply<true>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
else
{
unsigned int sliceNum = gh_iDivUp(alignedY, maxThreads);
dim3 grid(x-blockOffset, sliceNum, 1);
dim3 block(maxThreads, 1, 1);
hipLaunchKernelGGL(( d_realMultiply<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
}
cutilCheckMsg("d_realMultiply() execution failed!\n");
}
__host__ void h_checkCache( thrust::device_vector<real_t>& m_factors,
thrust::host_vector<bool>& m_cachedFlags,
thrust::host_vector<real_t>& m_cachedFactors, thrust::device_vector<bool>& m_flags, real_t objMax, real_t probeMax,
bool phaseConstraint,bool updateProbe, bool updateProbeModes, bool RMS)
{
bool passedFlags[3] = {phaseConstraint, updateProbe, updateProbeModes};
for(size_t i=0; i<m_cachedFlags.size();++i)
if(m_cachedFlags[i]!=passedFlags[i])
{
m_cachedFlags[i]=passedFlags[i];
m_flags[i] = m_cachedFlags[i];
}
real_t passedFactors[2] = {1.0/objMax, 1.0/probeMax};
for(size_t i=0; i<m_cachedFactors.size();++i)
{
if(fabs(m_cachedFactors[i]-passedFactors[i])>EPS)
{
m_cachedFactors[i]=passedFactors[i];
m_factors[i] = m_cachedFactors[i];
}
}
}
__host__ void h_multiply(const complex_t* a, const complex_t& b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, bool normalize)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_complexMultiply<true>) , dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
else hipLaunchKernelGGL(( d_complexMultiply<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_multiply() execution failed!\n");
}
__host__ void h_multiply(const real_t* a, const real_t& b, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, bool normalize)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_multiply<true>) , dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
else hipLaunchKernelGGL(( d_multiply<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_multiply() execution failed!\n");
}
__host__ void h_multiplyPage(complex_t* a, complex_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_multiplyPage<true>) , dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, pagex);
else hipLaunchKernelGGL(( d_multiplyPage<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, pagex);
cutilCheckMsg("h_multiplyPage() execution failed!\n");
}
__host__ void h_multiplyAbsConjuRealWhole(complex_t* a, complex_t* b, complex_t* c, real_t* result1, real_t* result2, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_multiplyAbsConjuRealWhole<true>) , dim3(grid), dim3(block), 0, 0, a, b, c, result1, result2, x, y, alignedY, pagex);
else hipLaunchKernelGGL(( d_multiplyAbsConjuRealWhole<false>), dim3(grid), dim3(block), 0, 0, a, b, c, result1, result2, x, y, alignedY, pagex);
cutilCheckMsg("h_multiplyRow() execution failed!\n");
}
__host__ void h_multiplyAbsConjuReal(complex_t* a, complex_t* b, complex_t* c, real_t* result1, real_t* result2, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_multiplyAbsConjuReal<true>) , dim3(grid), dim3(block), 0, 0, a, b, c, result1, result2, x, y, alignedY, pagex);
else hipLaunchKernelGGL(( d_multiplyAbsConjuReal<false>), dim3(grid), dim3(block), 0, 0, a, b, c, result1, result2, x, y, alignedY, pagex);
cutilCheckMsg("h_multiplyRow() execution failed!\n");
}
__host__ void h_multiplyRow(complex_t* a, real_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_multiplyRow<true>) , dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
else hipLaunchKernelGGL(( d_multiplyRow<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_multiplyRow() execution failed!\n");
}
__host__ void h_multiplyColumn(complex_t* a, real_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_multiplyColum<true>) , dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
else hipLaunchKernelGGL(( d_multiplyColum<false>), dim3(grid), dim3(block), 0, 0, a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_multiplyColumn() execution failed!\n");
}
__host__ void h_get_optimal_step_lsq(complex_t* chi,complex_t* object_update_proj, complex_t* dPO, complex_t* probe, real_t lambda,
real_t* AA1, complex_t* AA2, real_t* AA4, real_t* Atb1, real_t* Atb2, unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_get_optimal_step_lsq<true>) , dim3(grid), dim3(block), 0, 0, chi, object_update_proj, dPO, probe, lambda,
AA1, AA2, AA4, Atb1, Atb2, x, y, alignedY);
else hipLaunchKernelGGL(( d_get_optimal_step_lsq<false>), dim3(grid), dim3(block), 0, 0, chi, object_update_proj, dPO, probe, lambda,
AA1, AA2, AA4, Atb1, Atb2, x, y, alignedY);
cutilCheckMsg("h_get_optimal_step_lsq() execution failed!\n");
}
__host__ void h_mul_rca_mulc_rcr(complex_t* obj_proj_i, complex_t* modes_i, complex_t* chi_i, real_t* weight_proj,
unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_mul_rca_mulc_rcr<true>) , dim3(grid), dim3(block), 0, 0, obj_proj_i, modes_i, chi_i, weight_proj, x, y, alignedY);
else hipLaunchKernelGGL(( d_mul_rca_mulc_rcr<false>), dim3(grid), dim3(block), 0, 0, obj_proj_i, modes_i, chi_i, weight_proj, x, y, alignedY);
cutilCheckMsg("h_mul_rca_mulc_rcr() execution failed!\n");
}
__host__ void h_multiplyReal(real_t* a, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_realMultiply<true>) , dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY);
else hipLaunchKernelGGL(( d_realMultiply<false>) , dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY);
cutilCheckMsg("h_multiplyReal() execution failed!\n");
}
__host__ void h_normalize(complex_t* a, unsigned int x, unsigned int y, unsigned int alignedY, real_t factor)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_complexMultiply<true>) , dim3(grid), dim3(block), 0, 0, a, factor, a, x, y, alignedY);
else hipLaunchKernelGGL(( d_complexMultiply<false>), dim3(grid), dim3(block), 0, 0, a, factor, a, x, y, alignedY);
cutilCheckMsg("h_normalize() execution failed\n");
}
__host__ void h_normalize(const complex_t* a, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY, real_t factor)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_complexMultiply<true>) , dim3(grid), dim3(block), 0, 0, a, factor, result, x, y, alignedY);
else hipLaunchKernelGGL(( d_complexMultiply<false>), dim3(grid), dim3(block), 0, 0, a, factor, result, x, y, alignedY);
cutilCheckMsg("h_normalize() execution failed\n");
}
__host__ void h_normalize(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
thrust::constant_iterator<real_t> maxValue(h_realMax(a,x,y,alignedY));
thrust::transform(devPtr_a, devPtr_a+(x*alignedY), maxValue, devPtr_a, thrust::divides<real_t>());
cutilCheckMsg("h_normalize() execution failed\n");
}
__host__ void h_normalize(real_t* a, real_t factor, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
thrust::constant_iterator<real_t> factorValue(factor);
thrust::transform(devPtr_a, devPtr_a+(x*alignedY), factorValue, devPtr_a, thrust::divides<real_t>());
cutilCheckMsg("h_normalize() execution failed\n");
}
__host__ real_t h_realMax(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY), DBL_MIN, thrust::maximum<real_t>() );
}
__host__ void h_realToRGBA(const real_t* d_arr, float4* d_output, unsigned int x, unsigned int y, unsigned int alignedY,
real_t factor, float tf, float ts)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_realToRGBA<true>) , dim3(grid), dim3(block), 0, 0, d_arr, factor, d_output, x, y, alignedY, tf, ts);
else hipLaunchKernelGGL(( d_realToRGBA<false>), dim3(grid), dim3(block), 0, 0, d_arr, factor, d_output, x, y, alignedY, tf, ts);
cutilCheckMsg("h_realToRGBA() execution failed\n");
}
__host__ void h_realToGray(const real_t* d_arr, float* d_output, unsigned int x, unsigned int y, unsigned int alignedY,
real_t factor, bool outAligned)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_realToGray<true>) , dim3(grid), dim3(block), 0, 0, d_arr, factor, d_output, x, y, alignedY, outAligned);
else hipLaunchKernelGGL(( d_realToGray<false>), dim3(grid), dim3(block), 0, 0, d_arr, factor, d_output, x, y, alignedY, outAligned);
cutilCheckMsg("h_realToGray() execution failed\n");
}
__host__ real_t h_norm2Mat(real_t* d_arr, real_t* d_result, unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_square<true>), dim3(grid), dim3(block), 0, 0, d_arr, d_result, x, y, alignedY);
else hipLaunchKernelGGL(( d_square<false>), dim3(grid), dim3(block), 0, 0, d_arr, d_result, x, y, alignedY);
cutilCheckMsg("h_realComplexReal() execution failed\n");
real_t result=h_realSum(d_result, x, y, alignedY);
// real_t result=h_realSum(d_result, 0, x, 0, y, alignedY);
real_t xresult=sqrt_real_t(result/(x*y));
return xresult;
}
__host__ real_t h_norm2Mat(complex_t* d_arr, real_t* d_result, unsigned int x, unsigned int y, unsigned int alignedY)
{
h_realComplexAbs(d_arr, d_result, x, y, alignedY, true);
real_t result=h_realSum(d_result, x, y, alignedY);
// real_t result=h_realSum(d_result, 0, x, 0, y, alignedY);
real_t xresult=sqrt_real_t(result/(x*y));
return xresult;
}
__host__ void h_squareRoot(real_t* d_arr, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_realsquareRoot<true>), dim3(grid), dim3(block), 0, 0, d_arr, result, x, y, alignedY);
else hipLaunchKernelGGL(( d_realsquareRoot<false>), dim3(grid), dim3(block), 0, 0, d_arr, result, x, y, alignedY);
cutilCheckMsg("h_realComplexReal() execution failed\n");
}
__host__ void h_realComplexAbs(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_complexToDouble<'a', true>) , dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY, squared);
else hipLaunchKernelGGL(( d_complexToDouble<'a', false>), dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY, squared);
cutilCheckMsg("h_realComplexReal() execution failed\n");
}
__host__ void h_realComplexPhase(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_complexToDouble<'p', true>) , dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY, squared);
else hipLaunchKernelGGL(( d_complexToDouble<'p', false>), dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY, squared);
cutilCheckMsg("h_realComplexPhase() execution failed\n");
}
__host__ void h_realComplexReal(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_complexToDouble<'r', true>) , dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY, squared);
else hipLaunchKernelGGL(( d_complexToDouble<'r', false>), dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY, squared);
cutilCheckMsg("h_realComplexReal() execution failed\n");
}
__host__ void h_realComplexImag(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_complexToDouble<'i', true>) , dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY, squared);
else hipLaunchKernelGGL(( d_complexToDouble<'i', false>), dim3(grid), dim3(block), 0, 0, a, result, x, y, alignedY, squared);
cutilCheckMsg("h_realComplexImag() execution failed\n");
}
__host__ void h_realComplexExp(const real_t* src, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY, real_t factor)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_realComplexExp<true>) , dim3(grid), dim3(block), 0, 0, src, result, x, y, alignedY, factor);
else hipLaunchKernelGGL(( d_realComplexExp<false>), dim3(grid), dim3(block), 0, 0, src, result, x, y, alignedY, factor);
cutilCheckMsg("realComplexExp() execution failed\n");
}
__host__ void h_set_projections(real_t* p_object, real_t* proj, unsigned int* p_positions_x, unsigned int* p_positions_y,
unsigned int objectX, unsigned int objectY, unsigned int alignedObjectY, unsigned int probeX, unsigned int probeY, unsigned int Npos)
{
int const threadsPerBlockEachDim = 32;
int const blocksPerGrid_M = (probeX + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_N = (probeY + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_O = Npos;
dim3 const dimBlock(blocksPerGrid_M, blocksPerGrid_N, blocksPerGrid_O);
dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim, 1);
bool isFlat=true;
hipLaunchKernelGGL(( d_addToArray_r), dim3(dimBlock), dim3(dimThread), 0, 0, proj, p_object, p_positions_x, p_positions_y ,probeX , probeY, objectX, objectY, Npos, alignedObjectY, isFlat);
}
__host__ void h_set_projections(complex_t* p_object, complex_t* proj, unsigned int* p_positions_x, unsigned int* p_positions_y,
unsigned int objectX, unsigned int objectY, unsigned int alignedObjectY, unsigned int probeX, unsigned int probeY, unsigned int alignedProbeY,
unsigned int Npos, bool isFlat)
{
int const threadsPerBlockEachDim = 32;
int const blocksPerGrid_M = (probeX + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_N = (probeY + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_O = Npos;
dim3 const dimBlock(blocksPerGrid_M, blocksPerGrid_N, blocksPerGrid_O);
dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim, 1);
hipLaunchKernelGGL(( d_addToArray_c), dim3(dimBlock), dim3(dimThread), 0, 0, proj, p_object, p_positions_x, p_positions_y ,probeX , probeY, objectX, objectY, Npos, alignedObjectY, alignedProbeY, isFlat);
}
__host__ void h_get_projections(const complex_t* p_object, complex_t* proj, unsigned int* p_positions_x, unsigned int* p_positions_y,
unsigned int objectX, unsigned int objectY, unsigned int alignedObjectY, unsigned int probeX, unsigned int probeY, unsigned int probeZ,
unsigned int alignedProbeY, unsigned int Npos)
{
int const threadsPerBlockEachDim = 32;
int const blocksPerGrid_M = (probeX + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_N = (probeY + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_O = Npos;
dim3 const dimBlock(blocksPerGrid_M, blocksPerGrid_N, blocksPerGrid_O);
dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim, 1);
hipLaunchKernelGGL(( d_readFromArray_c), dim3(dimBlock), dim3(dimThread), 0, 0, proj, p_object, p_positions_x, p_positions_y , probeX, probeY, probeZ,
objectX, objectY, alignedObjectY, alignedProbeY, Npos);
}
__host__ void h_get_projections(const real_t* p_object, real_t* proj, unsigned int* p_positions_x, unsigned int* p_positions_y,
unsigned int objectX, unsigned int objectY, unsigned int alignedObjectY, unsigned int probeX, unsigned int probeY, unsigned int probeZ,
unsigned int alignedProbeY, unsigned int Npos)
{
int const threadsPerBlockEachDim = 32;
int const blocksPerGrid_M = (probeX + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_N = (probeY + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_O = Npos;
dim3 const dimBlock(blocksPerGrid_M, blocksPerGrid_N, blocksPerGrid_O);
dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim, 1);
hipLaunchKernelGGL(( d_readFromArray_r), dim3(dimBlock), dim3(dimThread), 0, 0, proj, p_object, p_positions_x, p_positions_y , probeX, probeY, probeZ,
objectX, objectY, alignedObjectY, alignedProbeY, Npos);
// if(Npos<MAX_IND_READ)
// {
// hipMemcpyToSymbol(gC_pos_X, p_positions_x, Npos*sizeof(unsigned int), 0, hipMemcpyHostToDevice);
// hipMemcpyToSymbol(gC_pos_Y, p_positions_y, Npos*sizeof(unsigned int), 0, hipMemcpyHostToDevice);
// d_readFromArray_r_fast<<<dimBlock, dimThread>>>(proj, p_object, probeX, probeY, probeZ,
// objectX, objectY, alignedObjectY, alignedProbeY, Npos);
// }
// else
// {
// hipLaunchKernelGGL(( d_readFromArray_r), dim3(dimBlock), dim3(dimThread), 0, 0, proj, p_object, p_positions_x, p_positions_y , probeX, probeY, probeZ,
// objectX, objectY, alignedObjectY, alignedProbeY, Npos);
// }
}
__host__ void h_adjustFFT(const complex_t* d_psi, complex_t* d_output, const real_t* d_det_mod, const real_t* d_mask,
const real_t saturationValue, unsigned int modeNum, unsigned int x, unsigned int y, unsigned int alignedY, bool normalize)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
if(modeNum>1)
hipLaunchKernelGGL(( d_adjustModalFFT), dim3(grid), dim3(block), 0, 0, d_psi, d_output, d_det_mod, d_mask, saturationValue, modeNum, x, y, normalize?1.0/(real_t)(x*y):1);
else
hipLaunchKernelGGL(( d_adjustFFT), dim3(grid), dim3(block), 0, 0, d_psi, d_output, d_det_mod, d_mask, saturationValue, y, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_adjustFFT() execution failed!\n");
}
__host__ real_t h_calculateER(const complex_t* d_psi, const real_t* d_det_mod, unsigned int modeNum,
unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_vector<real_t> output;
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int reductionThreads = getReductionThreadNum(y);
dim3 grid;
dim3 block;
bool enoughThreads = true;
if(reductionThreads<=maxThreads)
{
grid = dim3(x, 1, 1);
block = dim3(reductionThreads, 1, 1);
output.resize(x);
}
else
{
enoughThreads = false;
unsigned int sliceNum = gh_iDivUp(reductionThreads, maxThreads);
grid = dim3(x, sliceNum, 1);
block = dim3(maxThreads, 1, 1);
output.resize(x*sliceNum);
}
unsigned int threadNum = block.x * block.y;
size_t shared_mem_size = (threadNum <= 32) ? 2* threadNum * sizeof(real_t) : threadNum * sizeof(real_t);
switch (threadNum)
{
case 8:hipLaunchKernelGGL(( d_calculateER< 8>), dim3(grid), dim3(block), shared_mem_size, 0, d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 16:hipLaunchKernelGGL(( d_calculateER< 16>), dim3(grid), dim3(block), shared_mem_size, 0, d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 32:hipLaunchKernelGGL(( d_calculateER< 32>), dim3(grid), dim3(block), shared_mem_size, 0, d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 64:hipLaunchKernelGGL(( d_calculateER< 64>), dim3(grid), dim3(block), shared_mem_size, 0, d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 128:hipLaunchKernelGGL(( d_calculateER< 128>), dim3(grid), dim3(block), shared_mem_size, 0, d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 256:hipLaunchKernelGGL(( d_calculateER< 256>), dim3(grid), dim3(block), shared_mem_size, 0, d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 512:hipLaunchKernelGGL(( d_calculateER< 512>), dim3(grid), dim3(block), shared_mem_size, 0, d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 1024:hipLaunchKernelGGL(( d_calculateER<1024>), dim3(grid), dim3(block), shared_mem_size, 0, d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
}
cutilCheckMsg("h_calculateER() execution failed!\n");
return thrust::reduce(output.begin(), output.end())/modeNum;
}
__host__ real_t h_calculateER(const complex_t* d_GT, const complex_t* d_obj,
unsigned int sx, unsigned int sy, unsigned int qx, unsigned int qy,
unsigned int x1, unsigned int y1, unsigned int alignedY1,
unsigned int x2, unsigned int y2, unsigned int alignedY2)
{
thrust::device_vector<real_t> output(sx*sy);
dim3 grid, block;
bool enoughThreads = calcGrids(sx,sy,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_realSpaceER<true>) , dim3(grid), dim3(block), 0, 0, d_GT, d_obj, thrust::raw_pointer_cast(output.data()),
qx, qy, sx, sy, x1, y1, alignedY1, x2, y2, alignedY2);
else hipLaunchKernelGGL(( d_realSpaceER<false>), dim3(grid), dim3(block), 0, 0, d_GT, d_obj, thrust::raw_pointer_cast(output.data()),
qx, qy, sx, sy, x1, y1, alignedY1, x2, y2, alignedY2);
cutilCheckMsg("d_realSpaceER() execution failed\n");
return sqrt(thrust::reduce(output.begin(), output.end()))/output.size();
}
__host__ void h_shiftFFT(real_t* d_data, real_t* d_temp, unsigned int x, unsigned int y, unsigned int alignedY, hipStream_t* stream)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_shiftX<real_t, true>), dim3(grid), dim3(block),0,(stream?*stream:0), d_data, d_temp, (float)x/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFT() execution failed!\n");
hipDeviceSynchronize();
hipLaunchKernelGGL(( d_shiftY<real_t, true>), dim3(grid), dim3(block),0,(stream?*stream:0), d_temp, d_data, (float)y/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFT() execution failed!\n");
hipDeviceSynchronize();
// d_check<<<x, y>>>(d_data);
}
__host__ void h_shiftFFTy(real_t* d_data, real_t* d_temp, unsigned int x, unsigned int y, unsigned int alignedY, hipStream_t* stream)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_shiftY<real_t, true>), dim3(grid), dim3(block),0,(stream?*stream:0), d_data, d_temp, (float)y/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFT() execution failed!\n");
hipDeviceSynchronize();
}
__host__ void h_shiftFFTtmp(complex_t* d_probe, complex_t* d_tempprobe, complex_t* d_copyprobe, unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_shiftX<complex_t, true>), dim3(grid), dim3(block),0,0, d_copyprobe, d_tempprobe, (float)x/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFTtmp() execution failed!\n");
hipDeviceSynchronize();
hipLaunchKernelGGL(( d_shiftY<complex_t, true>), dim3(grid), dim3(block),0,0, d_tempprobe, d_probe, (float)y/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFTtmp() execution failed!\n");
hipDeviceSynchronize();
// d_checkcomplex<<<x, y>>>(d_probe);
}
__host__ void h_shiftFFTtwo(complex_t* d_probe, complex_t* d_tempprobe, unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_shiftX<complex_t, true>), dim3(grid), dim3(block),0,0, d_probe, d_tempprobe, (float)x/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFTtmp() execution failed!\n");
hipDeviceSynchronize();
hipLaunchKernelGGL(( d_shiftY<complex_t, true>), dim3(grid), dim3(block),0,0, d_tempprobe, d_probe, (float)y/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFTtmp() execution failed!\n");
hipDeviceSynchronize();
// d_checkcomplex<<<x, y>>>(d_probe);
}
__host__ void imshift_fft(complex_t* d_probe, unsigned int x, unsigned int y, unsigned int alignedY, float radNo1, float radNo2)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_imshift_fft), dim3(grid), dim3(block), 0, 0, d_probe, x/2, y/2, radNo1, radNo2, x, y, alignedY);
// d_checkcomplex<<<x, y>>>(d_probe);
}
__host__ void h_realRotate90(const real_t* d_data, real_t* d_out, unsigned int x, unsigned int y, unsigned int alignedY, unsigned int times, hipStream_t* stream)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
switch(times % 4)
{
case 0: break;
case 1:
hipLaunchKernelGGL(( d_rot90<real_t>), dim3(grid),dim3(block),0,(stream?*stream:0), d_data, d_out, x, y, alignedY);
break;
case 2:
//d_mirrorY<real_t><<<x, alignedY, alignedY*sizeof(real_t)>>>(d_data, d_data, y);
break;
case 3:
break;
}
cutilCheckMsg("h_realRotate90() execution failed!\n");
}
__host__ complex_t h_innerProduct(const complex_t* d_u, const complex_t* d_v, complex_t* d_output,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int reductionThreads = getReductionThreadNum(alignedY);
dim3 grid(x, 1, 1);
dim3 block(reductionThreads, 1, 1);
size_t shared_mem_size = (block.x <= 32) ? 2* block.x * sizeof(complex_t) : block.x * sizeof(complex_t);
switch (block.x)
{
case 8:hipLaunchKernelGGL(( d_innerProduct< 8>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 16:hipLaunchKernelGGL(( d_innerProduct< 16>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 32:hipLaunchKernelGGL(( d_innerProduct< 32>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 64:hipLaunchKernelGGL(( d_innerProduct< 64>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 128:hipLaunchKernelGGL(( d_innerProduct< 128>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 256:hipLaunchKernelGGL(( d_innerProduct< 256>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 512:hipLaunchKernelGGL(( d_innerProduct< 512>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 1024:hipLaunchKernelGGL(( d_innerProduct<1024>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
}
cutilCheckMsg("d_innerProduct() execution failed!\n");
thrust::device_ptr<complex_t> devPtr = thrust::device_pointer_cast(d_output);
return thrust::reduce(devPtr, devPtr+x, make_complex_t(0,0), complexSum());
}
__host__ complex_t h_innerProductOne(const complex_t* d_u, const complex_t* d_v, complex_t* d_output,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int reductionThreads = getReductionThreadNum(alignedY);
dim3 grid(x, 1, 1);
dim3 block(reductionThreads, 1, 1);
size_t shared_mem_size = (block.x <= 32) ? 2* block.x * sizeof(complex_t) : block.x * sizeof(complex_t);
switch (block.x)
{
case 8:hipLaunchKernelGGL(( d_innerProductOne< 8>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0, y);
break;
case 16:hipLaunchKernelGGL(( d_innerProductOne< 16>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0, y);
break;
case 32:hipLaunchKernelGGL(( d_innerProductOne< 32>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0, y);
break;
case 64:hipLaunchKernelGGL(( d_innerProductOne< 64>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0, y);
break;
case 128:hipLaunchKernelGGL(( d_innerProductOne< 128>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0, y);
break;
case 256:hipLaunchKernelGGL(( d_innerProductOne< 256>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0, y);
break;
case 512:hipLaunchKernelGGL(( d_innerProductOne< 512>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0, y);
break;
case 1024:hipLaunchKernelGGL(( d_innerProductOne<1024>), dim3(grid), dim3(block), shared_mem_size, 0, d_u, d_v, d_output, 1.0, y);
break;
}
cutilCheckMsg("d_innerProduct() execution failed!\n");
thrust::device_ptr<complex_t> devPtr = thrust::device_pointer_cast(d_output);
complex_t result = thrust::reduce(devPtr, devPtr+x, make_complex_t(0,0), complexSum());
return result;
}
__host__ void h_innerProductModes(complex_t* d_u, complex_t* d_v, complex_t* d_factor, unsigned int index,
unsigned int modesNum, unsigned int x, unsigned int y, unsigned int alignedY)
{
// unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
// dim3 grid(modesNum, gh_iDivUp(x,sliceNum), 1);
// dim3 block(alignedY, sliceNum, 1);
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
unsigned int offset=x*alignedY;
hipLaunchKernelGGL(( d_innerProductModes), dim3(grid), dim3(block), 0, 0, d_u, d_v, d_factor, index, modesNum, x, y, offset);
cutilCheckMsg("d_innerProductModes() execution failed!\n");
}
__host__ void h_extracSubArrReal(real_t* d_objectArray, real_t* d_output, unsigned int offsetX, unsigned int offsetY,
unsigned int sampleX, unsigned int sampleY, unsigned int alignedSampleY,
unsigned int objectArrayX, unsigned int objectArrayY, unsigned int alignedObjectArrayY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(sampleX,alignedSampleY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_extractArrReal<true>) , dim3(grid), dim3(block), 0, 0, d_objectArray, d_output, sampleX, sampleY, offsetX, offsetY, alignedSampleY, alignedObjectArrayY);
else hipLaunchKernelGGL(( d_extractArrReal<false>), dim3(grid), dim3(block), 0, 0, d_objectArray, d_output, sampleX, sampleY, offsetX, offsetY, alignedSampleY, alignedObjectArrayY);
cutilCheckMsg("h_extractObjectArray() execution failed!\n");
// d_check<<<sampleX, alignedSampleY>>>(d_output);
}
__host__ void h_extracSubArrComplex(complex_t* d_objectArray, complex_t* d_output, unsigned int offsetX, unsigned int offsetY,
unsigned int sampleX, unsigned int sampleY, unsigned int alignedSampleY,
unsigned int objectArrayX, unsigned int objectArrayY, unsigned int alignedObjectArrayY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(sampleX,alignedSampleY,grid,block);
if(enoughThreads)hipLaunchKernelGGL(( d_extractArrComplex<true>) , dim3(grid), dim3(block), 0, 0, d_objectArray, d_output, sampleX, sampleY, offsetX, offsetY, alignedSampleY, alignedObjectArrayY);
else hipLaunchKernelGGL(( d_extractArrComplex<false>), dim3(grid), dim3(block), 0, 0, d_objectArray, d_output, sampleX, sampleY, offsetX, offsetY, alignedSampleY, alignedObjectArrayY);
cutilCheckMsg("h_extractObjectArray() execution failed!\n");
}
__host__ void h_realModalSum(const real_t* d_modes, real_t* d_output, unsigned int modesNum,
unsigned int x, unsigned int y, unsigned int alignedY, bool sqaureRoot)
{
// Along the z direction it must be a 3d array
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_modalSum<real_t>), dim3(grid), dim3(block), 0, 0, d_modes, d_output, modesNum, x, y, sqaureRoot);
cutilCheckMsg("d_modalSum() execution failed!\n");
}
__host__ void h_realModalSum(const complex_t* d_modes, complex_t* d_output, unsigned int modesNum,
unsigned int x, unsigned int y, unsigned int alignedY, bool sqaureRoot)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_modalSumComplex), dim3(grid), dim3(block), 0, 0, d_modes, d_output, modesNum, x, y, sqaureRoot);
cutilCheckMsg("d_modalSum() execution failed!\n");
}
__host__ void h_complexSum(complex_t* d_leftArr, complex_t* d_rightArr, complex_t* d_result, real_t leftFactor, real_t rightFactor, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_complexSum), dim3(grid), dim3(block), 0, 0, d_leftArr, d_rightArr, d_result, leftFactor, rightFactor, x, y, alignedY);
cutilCheckMsg("d_modalSum() execution failed!\n");
}
__host__ void h_realSum(real_t* d_leftArr, real_t* d_rightArr, real_t* d_result, real_t leftFactor, real_t rightFactor, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_realSum), dim3(grid), dim3(block), 0, 0, d_leftArr, d_rightArr, d_result, leftFactor, rightFactor, x, y, alignedY);
cutilCheckMsg("d_realSum() execution failed!\n");
}
__host__ void h_realSingleSum(real_t* d_leftArr, real_t* d_rightArr, real_t* d_result, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
hipLaunchKernelGGL(( d_realSingleSum), dim3(grid), dim3(block), 0, 0, d_leftArr, d_rightArr, d_result, x, y, alignedY);
cutilCheckMsg("d_modalSum() execution failed!\n");
}
__host__ int2 h_realArgMax2D(real_t* d_ncc, unsigned int x, unsigned int y, unsigned int alignedY, unsigned char dir)
{
thrust::device_ptr<real_t> ncc_wrapper = thrust::device_pointer_cast(d_ncc);
int maxIndex = thrust::max_element(ncc_wrapper, ncc_wrapper+(x*alignedY)) - ncc_wrapper;
cutilCheckMsg("h_realArgMax2D():thrust::max_element() execution failed!\n");
int2 peak;
peak.x = maxIndex / alignedY;
peak.y = maxIndex % alignedY;
peak.x = (dir == 'h' && (peak.x >= (x/2)))? peak.x - x: peak.x;
peak.y = (dir == 'v' && (peak.y >= (y/2)))? peak.y - y: peak.y;
//printf("Registration point (%d,%d)...\n", peak.x, peak.y);
return peak;
}
__host__ void h_realComplexModulate(const complex_t* d_array1, complex_t* d_array2, int2& peak,
unsigned int x, unsigned int y, unsigned int alignedY, unsigned char dir)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)
{
if(dir == 'h' && peak.x!=0)
{
hipLaunchKernelGGL(( d_shiftX<complex_t, true>), dim3(grid), dim3(block), 0, 0, d_array2, d_array2, -(float)peak.x, 0, x, y, alignedY);
cutilCheckMsg("h_hMatchArrays()::shiftX() execution failed!\n");
peak.x = 0;
}
else if(dir == 'v' && peak.y!=0)
{
hipLaunchKernelGGL(( d_shiftY<complex_t, true>), dim3(grid), dim3(block), 0, 0, d_array2, d_array2, -(float)peak.y, 0, x, y, alignedY);
cutilCheckMsg("h_vMatchArrays()::shiftY() execution failed!\n");
peak.y=0;
}
}
else
{
if(dir == 'h' && peak.x!=0)
{
hipLaunchKernelGGL(( d_shiftX<complex_t, false>), dim3(grid), dim3(block), 0, 0, d_array2, d_array2, -(float)peak.x, 0, x, y, alignedY);
cutilCheckMsg("h_hMatchArrays()::shiftX() execution failed!\n");
peak.x = 0;
}
else if(dir == 'v' && peak.y!=0)
{
hipLaunchKernelGGL(( d_shiftY<complex_t, false>), dim3(grid), dim3(block), 0, 0, d_array2, d_array2, -(float)peak.y, 0, x, y, alignedY);
cutilCheckMsg("h_vMatchArrays()::shiftY() execution failed!\n");
peak.y=0;
}
}
complex_t m1 = h_complexSum(d_array1, peak.x, x, peak.y, y, alignedY);
complex_t m2 = h_complexSum(d_array2, 0, x-peak.x, 0, y-peak.y, alignedY);
complex_t ratio = div_complex_t(m1,m2);
h_multiply(d_array2, ratio, d_array2, x, y, alignedY, false);
}
#endif /* UTILITIESKERNELS_CU_ */
| 52d0b6ac70b2df9f97ff9752bdb105a5ef44e27f.cu | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Copyright © 2019, UChicago Argonne, LLC
//
//All Rights Reserved
//
//Software Name: ptychopy
//
//By: Argonne National Laboratory
//
//OPEN SOURCE LICENSE
//Any publication using the package should cite for
//Yue K, Deng J, Jiang Y, Nashed Y, Vine D, Vogt S.
//Ptychopy: GPU framework for ptychographic data analysis.
//X-Ray Nanoimaging: Instruments and Methods V 2021 .
//International Society for Optics and Photonics.
//
//
//Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
//following conditions are met:
//
//1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
//disclaimer.
//
//2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
//disclaimer in the documentation and/or other materials provided with the distribution.
//
//3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
//derived from this software without specific prior written permission.
//
//DISCLAIMER
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
//INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
//DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
//SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
//SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
//WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
//OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef UTILITIESKERNELS_CU_
#define UTILITIESKERNELS_CU_
#include "utilities.h"
#include "reductions.cu"
#include <math.h>
#include <math_constants.h>
#include <float.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <thrust/extrema.h>
#define EPS 1e-3
//#include <cub/device/device_reduce.cuh>
//using namespace cub;
/* extern shared memory for dynamic allocation */
extern __shared__ real_t shared_array[];
// ~10800 is the maximum of const memory
const unsigned int MAX_IND_READ = 3000;
//__constant__ unsigned int gC_ind_read[MAX_IND_READ];
__constant__ unsigned int gC_pos_X[MAX_IND_READ];
__constant__ unsigned int gC_pos_Y[MAX_IND_READ];
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
texture<float4, 1, cudaReadModeElementType> g_transferTex; // 1D transfer function texture
cudaArray *d_transferFuncArray = 0;
struct complexSum
{
complex_t normalizeBy;
complexSum(complex_t f=make_complex_t(1,0)) : normalizeBy(f)
{}
__host__ __device__ complex_t operator()(const complex_t&lhs, const complex_t&rhs) const
{return mul_complex_t(add_complex_t(lhs,rhs),normalizeBy);}
};
struct maxFloat2
{
__host__ __device__ float2 operator()(float2 lhs, float2 rhs)
{return make_float2(thrust::max(lhs.x, rhs.x), thrust::max(lhs.y, rhs.y));}
};
struct minFloat2
{
__host__ __device__ float2 operator()(float2 lhs, float2 rhs)
{return make_float2(thrust::min(lhs.x, rhs.x), thrust::min(lhs.y, rhs.y));}
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
//__global__ void d_check(real_t* d_data)
//{
//
// unsigned int Index = (blockIdx.x * blockDim.x) + threadIdx.x;
//
//// real_t temp=d_data[Index];
//// unsigned int sq1=1;
//}
//__global__ void d_checkcomplex(complex_t* d_data)
//{
//
// unsigned int Index = (blockIdx.x * blockDim.x) + threadIdx.x;
//
//// complex_t temp=d_data[Index];
//// unsigned int sq1=1;
//}
template<unsigned int threadNum>
__global__ void d_reduceToSum(const complex_t* d_u, complex_t* d_output, unsigned int x1, unsigned int y1,
unsigned int xNum, unsigned int yNum, unsigned int alignedY, bool enoughThreads)
{
complex_t* s_addends = (complex_t*)shared_array;
unsigned int row = blockIdx.x;//(blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outIndex = enoughThreads? blockIdx.x : (blockIdx.y*gridDim.x) + blockIdx.x;
unsigned int index = ((row+x1)*alignedY) + col + y1;
if(row<xNum)
{
s_addends[threadIdx.x] = (col<yNum)? d_u[index] : make_complex_t(0,0);
reduceToSumComplex<threadNum>(s_addends, threadIdx.x);
if(threadIdx.x == 0)
d_output[outIndex] = s_addends[0];
}
}
template<unsigned int threadNum>
__global__ void d_reduceToSum(const real_t* d_u, real_t* d_output, unsigned int x1, unsigned int y1,
unsigned int xNum, unsigned int yNum, unsigned int alignedY, bool enoughThreads)
{
real_t* s_addends = (real_t*)shared_array;
unsigned int row = blockIdx.x;//(blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outIndex = enoughThreads? blockIdx.x : (blockIdx.y*gridDim.x) + blockIdx.x;
unsigned int index = ((row+x1)*alignedY) + col + y1;
if(row<xNum)
{
s_addends[threadIdx.x] = (col<yNum)? d_u[index] : 0;
reduceToSum<real_t,threadNum>(s_addends, threadIdx.x);
if(threadIdx.x == 0)
d_output[outIndex] = s_addends[0];
}
}
__global__ void d_complexSubtract(const complex_t* a, const complex_t* b, complex_t* result, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
result[index] = sub_complex_t(a[index], b[index]);
}
__global__ void d_subtract(const real_t* a, const real_t* b, real_t* result, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
result[index] = a[index]-b[index];
}
__global__ void d_addFactorDivide(real_t* a, real_t* result, real_t factor, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
real_t tmp=a[index];
result[index]=tmp/(tmp+factor);
}
}
__global__ void d_object_sum_update_Gfun(complex_t* a, real_t* b, complex_t* result, real_t factor, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
complex_t tmp= make_complex_t((b[index]+factor), 0);
result[index]=div_complex_t(a[index], tmp);
}
}
__global__ void d_addFactor(complex_t* a, complex_t* result, complex_t factor, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
result[index]=add_complex_t(a[index], factor);
}
}
__global__ void d_addFactor(real_t* a, real_t* result, real_t factor, unsigned int y)
{
unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
result[index]=a[index]+factor;
}
}
template<bool enoughThreads>
__global__ void d_multiplyConju(const complex_t* a, const complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c,
unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset,
unsigned int byOffset)
{
unsigned int aRow = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int aCol = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int bRow = aRow + bxOffset;
unsigned int bCol = aCol + byOffset;
unsigned int bIndex = (bRow * alignedY) + bCol;
aRow += axOffset;
aCol += ayOffset;
unsigned int aIndex = (aRow * alignedY) + aCol;
if(max(aRow,bRow)<x && max(aCol,bCol)<y)
{
complex_t temp = mul_complex_t(a[aIndex], conj_complex_t(b[bIndex]));
result[bIndex] = mul_complex_t(temp, make_complex_t(c, 0));
}
}
template<bool enoughThreads>
__global__ void d_complexMultiply(const complex_t* a, const complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c,
unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset,
unsigned int byOffset)
{
unsigned int aRow = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int aCol = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int bRow = aRow + bxOffset;
unsigned int bCol = aCol + byOffset;
unsigned int bIndex = (bRow * alignedY) + bCol;
aRow += axOffset;
aCol += ayOffset;
unsigned int aIndex = (aRow * alignedY) + aCol;
if(max(aRow,bRow)<x && max(aCol,bCol)<y)
{
complex_t temp = mul_complex_t(a[aIndex], b[bIndex]);
result[bIndex] = mul_complex_t(temp, make_complex_t(c, 0));
}
}
template<bool enoughThreads>
__global__ void d_complexMultiply(const real_t* a, const complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c,
unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset,
unsigned int byOffset)
{
unsigned int aRow = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int aCol = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int bRow = aRow + bxOffset;
unsigned int bCol = aCol + byOffset;
unsigned int bIndex = (bRow * alignedY) + bCol;
aRow += axOffset;
aCol += ayOffset;
unsigned int aIndex = (aRow * alignedY) + aCol;
if(max(aRow,bRow)<x && max(aCol,bCol)<y)
{
result[bIndex] = mul_complex_t(make_complex_t(a[aIndex], 0), b[bIndex]);
}
}
template<bool enoughThreads>
__global__ void d_complexMultiply(const complex_t* a, complex_t b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp = mul_complex_t(a[index], b);
result[index] = mul_complex_t(temp, make_complex_t(c, 0));
}
}
template<bool enoughThreads>
__global__ void d_multiply(const real_t* a, real_t b, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
result[index] = a[index]*b;
}
}
template<bool enoughThreads>
__global__ void d_mul_rca_mulc_rcr(complex_t* a, complex_t* b, complex_t* c, real_t* weight_proj,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp1=mul_complex_t(a[index],b[index]);
float sum2denom=abs_complex_t(temp1);
complex_t temp3=mul_complex_t(c[index], conj_complex_t(temp1));
float sum2nom=real_complex_t(temp3);
weight_proj[index]=0.1*sum2nom/(sum2denom*sum2denom);
}
}
// Only has one row of factor col from 1 to alignedy
template<bool enoughThreads>
__global__ void d_multiplyPage(const complex_t* a, complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, unsigned int pagex)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
// bcol=col
unsigned int brow=row%pagex;
unsigned int aindex = (row * alignedY) + col;
unsigned int bindex = (brow * alignedY) + col;
if(row<x && col<y)
{
result[aindex] = mul_complex_t(a[aindex], b[bindex]);
}
}
template<bool enoughThreads>
__global__ void d_multiplyAbsConjuRealWhole(const complex_t* a, complex_t* b, complex_t* c, real_t* result1,
real_t* result2, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t img = mul_complex_t(a[index], b[index]);
real_t temp = abs_complex_t(img);
result1[index] = temp*temp;
img = mul_complex_t(c[index], conj_complex_t(img));
result2[index]=real_complex_t(img);
}
}
template<bool enoughThreads>
__global__ void d_multiplyAbsConjuReal(const complex_t* a, complex_t* b, complex_t* c, real_t* result1,
real_t* result2, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
// bcol=col
unsigned int brow=row%pagex;
unsigned int aindex = (row * alignedY) + col;
unsigned int bindex = (brow * alignedY) + col;
if(row<x && col<y)
{
complex_t img = mul_complex_t(a[aindex], b[bindex]);
real_t temp = abs_complex_t(img);
result1[aindex] = temp*temp;
img = mul_complex_t(c[aindex], conj_complex_t(img));
result2[aindex]=real_complex_t(img);
}
}
// Only has one row of factor col from 1 to alignedy
template<bool enoughThreads>
__global__ void d_multiplyRow(const complex_t* a, real_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp = mul_complex_t(a[index], make_complex_t(b[col], 0));
result[index] = temp;
}
}
// the factor is from 0 to x;
template<bool enoughThreads>
__global__ void d_multiplyColum(const complex_t* a, real_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp = mul_complex_t(a[index], make_complex_t(b[row], 0));
result[index] = temp;
}
}
//function [AA1,AA2,AA4, Atb1,Atb2] = ...
// get_optimal_step_lsq(chi,dO,dP,O,P, lambda)
// % fast kernel for estimation of optimal probe and object steps
// dOP = dO.*P;
// dPO = dP.*O;
// cdOP = conj(dOP);
// cdPO = conj(dPO);
//
// AA1 = real(dOP .* cdOP)+lambda;
// AA2 = (dOP .* cdPO);
// AA4 = real(dPO .* cdPO)+lambda;
// Atb1 = real(cdOP .* chi);
// Atb2 = real(cdPO .* chi);
//end
template<bool enoughThreads>
__global__ void d_get_optimal_step_lsq(complex_t* chi, complex_t* object_update_proj, complex_t* dPO, complex_t* probe, real_t lambda,
real_t* AA1, complex_t* AA2, real_t* AA4, real_t* Atb1, real_t* Atb2,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t dOP=mul_complex_t(object_update_proj[index], probe[index]);
complex_t cdOP=conj_complex_t(dOP);
complex_t cdPO=conj_complex_t(dPO[index]);
AA1[index]=real_complex_t(mul_complex_t(dOP, cdOP))+lambda;
AA2[index]=mul_complex_t(dOP, cdPO);
AA4[index] = real_complex_t(mul_complex_t(dPO[index], cdPO))+lambda;
Atb1[index]=real_complex_t(mul_complex_t(cdOP, chi[index]));
Atb2[index]=real_complex_t(mul_complex_t(cdPO, chi[index]));
}
}
template<bool enoughThreads>
__global__ void d_complexMultiply(const complex_t* a, real_t c, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp=mul_complex_t(a[index], make_complex_t(c, 0.0));
result[index] = temp;
}
// int temp=1;
}
template<bool enoughThreads>
__global__ void d_realMultiply(real_t* a, real_t* b, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, real_t c,
unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset,
unsigned int byOffset)
{
unsigned int aRow = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int aCol = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int bRow = aRow + bxOffset;
unsigned int bCol = aCol + byOffset;
unsigned int bIndex = (bRow * alignedY) + bCol;
aRow += axOffset;
aCol += ayOffset;
unsigned int aIndex = (aRow * alignedY) + aCol;
if(max(aRow,bRow)<x && max(aCol,bCol)<y)
{
result[aIndex]=a[aIndex]*b[bIndex];
}
}
template<bool enoughThreads>
__global__ void d_realMultiply(real_t* a, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
real_t temp=a[index]*result[index];
result[index] = temp;
}
}
template<bool enoughThreads>
__global__ void d_realToRGBA(const real_t* a, real_t c, float4* result, unsigned int X, unsigned int Y, unsigned int alignedY, float transferOffset, float transferScale)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
unsigned int oIndex = (row * Y) + col;
if(row<X && col<Y)
{
float normalizedV = (float) (a[index]*c);
result[oIndex] = tex1D(g_transferTex, (normalizedV-transferOffset)*transferScale);
}
}
template<bool enoughThreads>
__global__ void d_realToGray(const real_t* a, real_t c, float* result, unsigned int X, unsigned int Y, unsigned int alignedY, bool outAligned)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
unsigned int oIndex = (row * Y) + col;
if(row<X && col<Y)
{
float normalizedV = (float) (a[index]*c);
result[outAligned?index:oIndex] = normalizedV;
}
}
template<unsigned char op, bool enoughThreads>
__global__ void d_complexToDouble(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
real_t temp = 0;
switch(op)
{
case 'a': temp = abs_complex_t(a[index]); break;
case 'p': temp = atan2_real_t(a[index].y, a[index].x); break;
case 'r': temp = real_complex_t(a[index]); break;
case 'i': temp = imag_complex_t(a[index]); break;
default: temp = 0; break;
}
result[index] = squared? temp*temp: temp;
}
}
template<bool enoughThreads>
__global__ void d_realComplexExp(const real_t* src, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY, real_t factor)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
complex_t temp=make_complex_t((cos_real_t(src[index]*factor)), (sin_real_t(src[index]*factor)));
result[index]=temp;
}
}
template<bool enoughThreads>
__global__ void d_realsquareRoot(real_t* d_arr, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
// real_t temp = 0;
// temp= sqrt_real_t(d_arr[index]);
result[index]=sqrt_real_t(d_arr[index]);
}
}
template<bool enoughThreads>
__global__ void d_square(real_t* d_arr, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(row<x && col<y)
{
real_t tmp=d_arr[index];
result[index]=tmp*tmp;
}
}
__device__ bool letFloat(const real_t* beamstopMask, unsigned int index, const real_t saturationValue, const real_t diffValue)
{
bool toFloat = beamstopMask? beamstopMask[index]<0.99:false;
toFloat = toFloat || (diffValue-saturationValue)>=0;
return toFloat;
}
__device__ complex_t modulusConstraint(complex_t psi, real_t det_mod)
{
real_t sinFunc, cosFunc;
sincos_real_t(atan2_real_t(psi.y,psi.x),&sinFunc,&cosFunc);
return make_complex_t(det_mod*cosFunc, det_mod*sinFunc);
}
__global__ void d_adjustFFT(const complex_t* d_psi, complex_t* d_output, const real_t* d_det_mod, const real_t* d_mask,
const real_t saturationValue, unsigned int y, real_t normalizeBy)
{
unsigned int row = (blockIdx.x*blockDim.y)+threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int psiIndex = (row * blockDim.x) + col;
if(col < y)
{
complex_t psi = d_psi[psiIndex];
real_t diffValue = d_det_mod[psiIndex];
// if(diffValue>=saturationValue)
// {
// printf("diffValue is %f, saturationValue is %f \n", diffValue, saturationValue);
// printf(" row is %u, column is %u, complex_t psi x is %f, psi y is %f \n", row, col, psi.x, psi.y);
// }
bool toFloat = letFloat(d_mask,psiIndex,saturationValue, diffValue);
// d_output[psiIndex] = toFloat?psi:mul_complex_t(modulusConstraint(psi, diffValue), make_complex_t(normalizeBy, 0.0));
d_output[psiIndex] = mul_complex_t(toFloat?psi:modulusConstraint(psi, diffValue), make_complex_t(normalizeBy, 0.0));
}
}
__global__ void d_adjustModalFFT(const complex_t* d_psi, complex_t* d_output, const real_t* d_det_mod, const real_t* d_mask,
const real_t saturationValue, unsigned int modeNum, unsigned int x, unsigned int y, real_t normalizeBy)
{
unsigned int modeIndex = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int detIndex = (modeIndex*blockDim.x) + threadIdx.x;
if(modeIndex<x && threadIdx.x<y)
{
real_t modalSum = 0, avdata = d_det_mod[detIndex];
for(unsigned int i=0; i<modeNum; ++i)
{
unsigned int psiIndex = ((modeIndex+(i*x))*blockDim.x) + threadIdx.x;
real_t psiFFtAbs = abs_complex_t(d_psi[psiIndex]);
modalSum += psiFFtAbs * psiFFtAbs;
}
modalSum = rsqrt_real_t(modalSum);
for(unsigned int i=0; i<modeNum; ++i)
{
unsigned int psiIndex = ((modeIndex+(i*x))*blockDim.x) + threadIdx.x;
bool toFloat = letFloat(d_mask, detIndex, saturationValue, avdata);
// d_output[psiIndex] = toFloat?d_psi[psiIndex]:mul_complex_t(d_psi[psiIndex], make_complex_t(avdata*modalSum*normalizeBy, 0.0));
d_output[psiIndex] = mul_complex_t(d_psi[psiIndex], make_complex_t((toFloat?1:avdata)*modalSum*normalizeBy, 0.0));
}
}
}
template<unsigned int threadNum>
__global__ void d_calculateER(const complex_t* d_psi, const real_t* d_detMod, real_t* d_output,
unsigned int x, unsigned int y, unsigned int alignedY, unsigned int modeNum, bool enoughThreads)
{
real_t* s_addends = (real_t*)shared_array;
unsigned int row = blockIdx.x;//(blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outIndex = enoughThreads? blockIdx.x : (blockIdx.y*gridDim.x) + blockIdx.x;
unsigned int index = (row*alignedY) + col;
if(row<x)
{
complex_t modalSum = make_complex_t(0,0);
for(unsigned int i=0; i<modeNum; ++i)
{
unsigned int psiIndex = ((row+(i*x))*alignedY) + col;
modalSum = add_complex_t(modalSum, d_psi[psiIndex]);
}
s_addends[threadIdx.x] = (col<y)? abs_real_t( d_detMod[index] - abs_complex_t(modalSum) ) : 0;
reduceToSum<real_t, threadNum>(s_addends, threadIdx.x);
if(threadIdx.x == 0)
d_output[outIndex] = s_addends[0];
}
}
template<bool enoughThreads>
__global__ void d_realSpaceER(const complex_t* d_GT, const complex_t* d_obj, real_t* d_output,
unsigned int qx, unsigned int qy,
unsigned int outX, unsigned int outY,
unsigned int x1, unsigned int y1, unsigned int alignedY1,
unsigned int x2, unsigned int y2, unsigned int alignedY2)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outIndex = (row*outY) + col;
unsigned int gtIndex = ((row+qx)*alignedY1) + col + qy;
unsigned int objIndex = ((row+qx)*alignedY2) + col + qy;
if(row<outX && col<outY)
{
complex_t gtVal = d_GT[gtIndex];
complex_t objVal = d_obj[objIndex];
real_t diff = abs_complex_t(gtVal) - abs_complex_t(objVal);
d_output[outIndex] = diff*diff;
}
}
template<typename T, bool enoughThreads>
__global__ void d_shiftY(const T* d_objectArray, T* d_output, float nx,
unsigned int offset, unsigned int X, unsigned int Y, unsigned int alignedY)
{
unsigned int xIndex = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int yIndex = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int objectArrayIndex = (xIndex * alignedY) + yIndex;
T saved = d_objectArray[objectArrayIndex];
if(xIndex<X && yIndex<Y)
{
int offsetY = yIndex - (roundf(nx) - offset);
if(offsetY < 0) offsetY += Y;
if(offsetY >= Y) offsetY -= Y;
offsetY += (xIndex * alignedY);
__syncthreads();
d_output[offsetY] = saved;
}
}
template<typename T, bool enoughThreads>
__global__ void d_shiftX(const T* d_objectArray, T* d_output, float ny,
unsigned int offset, unsigned int X, unsigned int Y, unsigned int alignedY)
{
unsigned int xIndex = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int yIndex = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
if(xIndex<X && yIndex<Y)
{
unsigned int objectArrayIndex = (xIndex * alignedY) + yIndex;
T saved = d_objectArray[objectArrayIndex];
int offsetX = xIndex - (roundf(ny) - offset);
if(offsetX < 0) offsetX += X;
if(offsetX >= X) offsetX -= X;
offsetX = (offsetX * alignedY) + yIndex;
__syncthreads();
d_output[offsetX] = saved;
}
}
template<typename T>
__global__ void d_imshift_fft(T* d_data, unsigned int midx, unsigned int midy, float radNo1, float radNo2,
unsigned int X, unsigned int Y, unsigned int alignedY)
{
unsigned int xIndex = threadIdx.x;
unsigned int yIndex = (blockIdx.x*blockDim.y) + threadIdx.y;
if(xIndex<Y && yIndex<X)
{
unsigned int objectArrayIndex = (yIndex * alignedY) + xIndex;
T saved = d_data[objectArrayIndex];
float xgridindex=xIndex;
float ygridindex=yIndex;
if (xIndex < midx)
xgridindex+=midx;
else
xgridindex-=midx;
if (yIndex < midy)
ygridindex+=midy;
else
ygridindex-=midy;
xgridindex=radNo1*(xgridindex/X-0.5);
ygridindex=radNo2*(ygridindex/Y-0.5);
real_t sumInitx=2*CUDART_PI*xgridindex;
real_t sumInity=2*CUDART_PI*ygridindex;
real_t costx=cos_real_t(sumInitx);
real_t sintx=-1*sin_real_t(sumInitx);
real_t costy=cos_real_t(sumInity);
real_t sinty=-1*sin_real_t(sumInity);
complex_t tempmulx = make_complex_t(costx, sintx);
complex_t tempmuly = make_complex_t(costy, sinty);
d_data[objectArrayIndex]=mul_complex_t(saved,mul_complex_t(tempmulx, tempmuly));
}
}
template<typename T>
__global__ void d_mirrorY(const T* d_objectArray, T* d_output, unsigned int objectArrayY)
{
unsigned int objectArrayIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
T saved = d_objectArray[objectArrayIndex];
if(threadIdx.x < objectArrayY)
{
unsigned int mirrorIndex = (blockIdx.x * blockDim.x) + (objectArrayY-threadIdx.x);
d_output[--mirrorIndex] = saved;
}
}
template<typename T>
__global__ void d_rot90(const T* src, T* dst, unsigned int rows, unsigned int cols, unsigned int pitch)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int tid = row * pitch + col;
unsigned int tid_out = (rows-col-1) * pitch + row;
//saved[threadIdx.x*blockDim.y+threadIdx.y] = srcDst[tid];
if(row<rows && col<cols)
dst[tid_out] = src[tid];//saved[threadIdx.x*blockDim.y+threadIdx.y];
}
template<unsigned int threadNum>
__global__ void d_innerProduct(const complex_t* d_u, const complex_t* d_v, complex_t* d_output,
real_t oneOverN, unsigned int y)
{
complex_t* s_addends = (complex_t*)shared_array;
unsigned int probeIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
complex_t value = (threadIdx.x<y)?mul_complex_t( conj_complex_t(d_u[probeIndex]), d_v[probeIndex]): make_complex_t(0,0);
s_addends[threadIdx.x] = make_complex_t(value.x*oneOverN,value.y*oneOverN);
reduceToSumComplex<threadNum>(s_addends,threadIdx.x);
if(threadIdx.x == 0)
d_output[blockIdx.x] = s_addends[0];
}
template<unsigned int threadNum>
__global__ void d_innerProductOne(const complex_t* d_u, const complex_t* d_v, complex_t* d_output,
real_t oneOverN, unsigned int y)
{
complex_t* s_addends = (complex_t*)shared_array;
unsigned int probeIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
complex_t value = (threadIdx.x<y)?mul_complex_t( conj_complex_t(d_u[probeIndex]), d_v[probeIndex]): make_complex_t(0,0);
s_addends[threadIdx.x] = make_complex_t(value.x*oneOverN,value.y*oneOverN);
reduceToSumComplex<threadNum>(s_addends,threadIdx.x);
if(threadIdx.x == 0)
d_output[blockIdx.x] = s_addends[0];
}
__global__ void d_innerProductModes(complex_t* d_u, complex_t* d_v, complex_t* d_factor,
unsigned int index, unsigned int ModeNumber, unsigned int probeX, unsigned int probeY, unsigned int offset)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
// unsigned int modeIndex = ((row+((blockIdx.x) * probeX)) * blockDim.x) + threadIdx.x;
unsigned int baseIndex = (row*blockDim.x) + threadIdx.x;
if(row<probeX && threadIdx.x<probeY)
{
complex_t value=make_complex_t(0, 0);
for(int i=0; i< ModeNumber; i++)
{
value = add_complex_t(value, mul_complex_t(d_u[baseIndex+offset*i], d_factor[index+i*ModeNumber]));
}
// complex_t value=add_complex_t(mul_complex_t(d_u[baseIndex], d_factor[index]), mul_complex_t(d_u[baseIndex+offset], d_factor[index+5]));
// value = add_complex_t(value, mul_complex_t(d_u[baseIndex+offset*2], d_factor[index+2*5]));
// value = add_complex_t(value, mul_complex_t(d_u[baseIndex+offset*3], d_factor[index+3*5]));
// value = add_complex_t(value, mul_complex_t(d_u[baseIndex+offset*4], d_factor[index+4*5]));
d_v[baseIndex]=value;
}
}
template<typename T>
__global__ void d_modalSum(const T* d_modes, T* d_output, unsigned int modeNum, unsigned int x, unsigned int y, bool sqaureRoot)
{
//unsigned int baseIndex = (blockIdx.x * probeX) + ((blockIdx.y*blockDim.y) + threadIdx.y);
unsigned int modeIndex = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int outIndex = (modeIndex*blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
T val = d_modes[outIndex];
for(unsigned int i=1; i<modeNum; ++i)
val += d_modes[((modeIndex+(i*x))*blockDim.x) + threadIdx.x];
d_output[outIndex] = sqaureRoot? sqrt_real_t(val) : val;
}
}
__global__ void d_modalSumComplex(const complex_t* d_modes, complex_t* d_output, unsigned int modeNum, unsigned int x, unsigned int y, bool sqaureRoot)
{
unsigned int modeIndex = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int outIndex = (modeIndex*blockDim.x) + threadIdx.x;
if(threadIdx.x < y)
{
complex_t val = d_modes[outIndex];
for(unsigned int i=1; i<modeNum; ++i)
val=add_complex_t(val, d_modes[((modeIndex+(i*x))*blockDim.x) + threadIdx.x]);
d_output[outIndex]=val;
}
}
__global__ void d_complexSum(complex_t* d_leftArr, complex_t* d_rightArr, complex_t* d_result, real_t leftFactor, real_t rightFactor, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(threadIdx.x < y)
{
complex_t leftOp=mul_complex_t(d_leftArr[index], make_complex_t(leftFactor,0));
complex_t rightOp=mul_complex_t(d_rightArr[index], make_complex_t(rightFactor,0));
d_result[index]=add_complex_t(leftOp, rightOp);
}
}
__global__ void d_realSum(real_t* d_leftArr, real_t* d_rightArr, real_t* d_result, real_t leftFactor, real_t rightFactor, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int index = (row * alignedY) + col;
if(threadIdx.x < y)
{
d_result[index]=d_leftArr[index]*leftFactor+d_rightArr[index]*rightFactor;
}
}
// The first row and the first column combine to a new matrix by adding the duplicated elments of each line
// 26*256 26*256
__global__ void d_realSingleSum(real_t* d_leftArr, real_t* d_rightArr, real_t* d_result, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int row = (blockIdx.x*blockDim.y) + threadIdx.y;
unsigned int col = threadIdx.x;
unsigned int leftIndex= col;
// unsigned int rightindex = row * alignedY;
unsigned int rightindex= row;
unsigned int index = (row * alignedY) + col;
if(threadIdx.x < y)
{
d_result[index]=d_leftArr[leftIndex]+d_rightArr[rightindex];
}
}
template<bool enoughThreads>
__global__ void d_extractArrReal(real_t* d_objectArray, real_t* d_output, unsigned int sampleX, unsigned int sampleY,
float offsetX, float offsetY, unsigned int alignedSampleY, unsigned int alignedObjectArrayY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outputIndex = (row * alignedSampleY) + col;
unsigned int inputIndex=(row+offsetX)*alignedObjectArrayY+col+offsetY;
if(row<sampleX && col<sampleY)
{
d_output[outputIndex] = d_objectArray[inputIndex];
}
}
template<bool enoughThreads>
__global__ void d_extractArrComplex(complex_t* d_objectArray, complex_t* d_output, unsigned int sampleX, unsigned int sampleY,
float offsetX, float offsetY, unsigned int alignedSampleY, unsigned int alignedObjectArrayY)
{
unsigned int row = enoughThreads? (blockIdx.x*blockDim.y) + threadIdx.y : blockIdx.x;
unsigned int col = enoughThreads? threadIdx.x : (blockIdx.y*blockDim.x) + threadIdx.x;
unsigned int outputIndex = (row * alignedSampleY) + col;
unsigned int inputIndex=(row+offsetX)*alignedObjectArrayY+col+offsetY;
if(row<sampleX && col<sampleY)
{
d_output[outputIndex] = d_objectArray[inputIndex];
}
}
__global__ void d_addToArray_r(float * sarray, float* larray, unsigned int* pos_X, unsigned int* posY,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_ox, unsigned int Np_oy,
unsigned int Npos, unsigned int alignedObjectY, const bool isFlat)
{
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px && idy < Np_py && id < Npos)
{
int idz = id; // go only through some of the indices
int id_large = alignedObjectY*(pos_X[idz]+idx)+(posY[idz]+idy);
int id_small = Np_py*idx + idy ;
// if (!isFlat)
// id_small = id_small + Np_px*Np_py*idz ;
if (!isFlat)
id_small = id_small + Np_px*alignedObjectY*idz ;
atomicAdd(&larray[id_large] ,sarray[id_small]);
}
}
__global__ void d_addToArray_c(complex_t * sarray, complex_t* larray, unsigned int* pos_X, unsigned int* posY,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_ox, unsigned int Np_oy,
unsigned int Npos, unsigned int alignedObjectY, unsigned int alignedProbeY, const bool isFlat)
{
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px && idy < Np_py && id < Npos)
{
int idz = id; // go only through some of the indices
int id_large = alignedObjectY*(pos_X[idz]+idx)+(posY[idz]+idy);
int id_small = Np_py*idx + idy ;
if (!isFlat)
id_small = id_small + Np_px*alignedProbeY*idz ;
// id_small = id_small + Np_px*Np_py*idz ;
atomicAdd(&larray[id_large].x ,sarray[id_small].x);
atomicAdd(&larray[id_large].y ,sarray[id_small].y);
}
}
__global__ void d_readFromArray_c(complex_t * sarray, const complex_t * larray, /*unsigned int* ind_read,*/ unsigned int* pos_X, unsigned int* pos_Y,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_pz, unsigned int Np_ox, unsigned int Np_oy,
unsigned int alignedObjectY, unsigned int alignedProbeY, unsigned int Npos) {
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px & idy < Np_py & id < Npos)
{
// int idz = ind_read[id]; // go only through some of the indices
int idz = id;
int id_large = alignedObjectY*(pos_X[idz]+idx)+pos_Y[idz]+idy;
// int id_large = pos_X[idz]+idx + Np_ox*(pos_Y[idz]+idy);
// int id_small = idx + Np_px*idy + Np_px*Np_py*idz ;
int id_small = alignedProbeY*idx + idy + Np_px*alignedProbeY*idz ;
// sarray[id_small].x = larray[id_large].x ;
// sarray[id_small].y = larray[id_large].y ;
sarray[id_small]= larray[id_large];
}
}
__global__ void d_readFromArray_r(real_t * sarray, const real_t * larray, /*unsigned int* ind_read,*/ unsigned int* pos_X, unsigned int* pos_Y,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_pz, unsigned int Np_ox, unsigned int Np_oy,
unsigned int alignedObjectY, unsigned int alignedProbeY, unsigned int Npos)
{
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px & idy < Np_py & id < Npos)
{
// int idz = ind_read[id]; // go only through some of the indices
int idz = id;
int id_large = alignedObjectY*(pos_X[idz]+idx)+pos_Y[idz]+idy;
// int id_large = pos_X[idz]+idx + Np_ox*(pos_Y[idz]+idy);
// int id_small = idx + Np_px*idy + Np_px*Np_py*idz ;
int id_small = alignedProbeY*idx + idy + Np_px*alignedProbeY*idz ;
// sarray[id_small].x = larray[id_large].x ;
// sarray[id_small].y = larray[id_large].y ;
sarray[id_small]= larray[id_large];
}
}
__global__ void d_readFromArray_r_fast(real_t * sarray, const real_t * larray,
unsigned int Np_px, unsigned int Np_py, unsigned int Np_pz, unsigned int Np_ox, unsigned int Np_oy,
unsigned int alignedObjectY, unsigned int alignedProbeY, unsigned int Npos)
{
// Location in a 3D matrix
int idx= blockIdx.x * blockDim.x + threadIdx.x;
int idy= blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.z * blockDim.z + threadIdx.z;
if ( idx < Np_px & idy < Np_py & id < Npos)
{
// int idz = gC_ind_read[id]-1; // go only through some of the indices
int idz = id;
// int id_large = gC_pos_X[idz]+idx + Np_ox*(gC_pos_Y[idz]+idy);
int id_large = alignedObjectY*(gC_pos_X[idz]+idx) + gC_pos_Y[idz]+idy;
// int id_small = idx + Np_px*idy + Np_px*Np_py*idz ;
int id_small = alignedProbeY*idx + idy + Np_px*alignedProbeY*idz ;
sarray[id_small] = larray[id_large];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__host__ int getReductionThreadNum(int size) {return (int) rint( pow(2.0f, (int)ceil( log2( (float) size) ) ) );}
__host__ void h_initColorTransferTexture()
{
// create transfer function texture
float4 transferFunc[] =
{
{ 0.0, 0.0, 0.0, 0.0, },
{ 1.0, 0.0, 0.0, 1.0, },
{ 1.0, 0.5, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 0.0, 0.0, },
};
cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>();
cudaMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1);
cutilCheckMsg("h_initColorTransferTexture() cudaMallocArray execution failed!\n");
cudaMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice);
cutilCheckMsg("h_initColorTransferTexture() cudaMemcpyToArray execution failed!\n");
g_transferTex.filterMode = cudaFilterModeLinear;
g_transferTex.normalized = true; // access with normalized texture coordinates
g_transferTex.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates
// Bind the array to the texture
cudaBindTextureToArray(&g_transferTex, d_transferFuncArray, &channelDesc2);
cutilCheckMsg("h_initColorTransferTexture() cudaBindTextureToArray execution failed!\n");
}
__host__ void h_freeColorTransferTexture()
{
if(d_transferFuncArray)
{
cudaUnbindTexture(&g_transferTex);
cutilCheckMsg("h_freeColorTransferTexture()::cudaUnbindTexture() execution failed!\n");
cudaFreeArray(d_transferFuncArray);
cutilCheckMsg("h_init3DTexture()::cudaFreeArray() execution failed!\n");
}
}
template<typename T>
void h_reduceToSum(const T* a, thrust::device_vector<T>& out, unsigned int x1, unsigned int x2, unsigned int y1, unsigned int y2, unsigned int alignedY)
{
unsigned int xNum = x2-x1;
unsigned int yNum = y2-y1;
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int reductionThreads = getReductionThreadNum(yNum);
dim3 grid;
dim3 block;
bool enoughThreads = true;
if(reductionThreads<=maxThreads)
{
grid = dim3(xNum, 1, 1);
block = dim3(reductionThreads, 1, 1);
out.resize(xNum);
}
else
{
enoughThreads = false;
unsigned int sliceNum = gh_iDivUp(reductionThreads, maxThreads);
grid = dim3(xNum, sliceNum, 1);
block = dim3(maxThreads, 1, 1);
out.resize(xNum*sliceNum);
}
unsigned int threadNum = block.x * block.y;
size_t shared_mem_size = (threadNum <= 32) ? 2* threadNum * sizeof(T) : threadNum * sizeof(T);
switch (threadNum)
{
case 8: d_reduceToSum< 8><<<grid, block, shared_mem_size>>>(a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 16: d_reduceToSum< 16><<<grid, block, shared_mem_size>>>(a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 32: d_reduceToSum< 32><<<grid, block, shared_mem_size>>>(a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 64: d_reduceToSum< 64><<<grid, block, shared_mem_size>>>(a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 128: d_reduceToSum< 128><<<grid, block, shared_mem_size>>>(a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 256: d_reduceToSum< 256><<<grid, block, shared_mem_size>>>(a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 512: d_reduceToSum< 512><<<grid, block, shared_mem_size>>>(a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
case 1024: d_reduceToSum<1024><<<grid, block, shared_mem_size>>>(a, thrust::raw_pointer_cast(out.data()), x1, y1, xNum, yNum, alignedY, enoughThreads);
break;
}
cutilCheckMsg("d_reduceToSum() execution failed!\n");
}
__host__ complex_t h_complexSum(const complex_t* a, unsigned int x1, unsigned int x2, unsigned int y1, unsigned int y2, unsigned int alignedY)
{
thrust::device_vector<complex_t> output;
h_reduceToSum<complex_t>(a, output, x1, x2, y1, y2, alignedY);
return thrust::reduce(output.begin(), output.end(), make_complex_t(0,0), complexSum());
}
__host__ real_t h_realSum(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
// thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
// return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY));
double sum=h_realSum(a, 0, x, 0, y, alignedY);
// real_t sum = h_realSumCUB(a, x, y, alignedY);
return sum;
}
__host__ real_t h_mean2(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
double sum=h_realSum(a, 0, x, 0, y, alignedY);
// double sum=h_realSumCUB(a, x, y, alignedY);
return sum/(x*y);
}
__host__ real_t h_realSum(const real_t* a, unsigned int x1, unsigned int x2, unsigned int y1, unsigned int y2, unsigned int alignedY)
{
thrust::device_vector<real_t> output;
h_reduceToSum<real_t>(a, output, x1, x2, y1, y2, alignedY);
return thrust::reduce(output.begin(), output.end());
}
//__host__ real_t h_realSumCUB(real_t* d_in, unsigned int x, unsigned int y, unsigned int alignedY)
//{
//
// real_t* d_out;
// cudaMalloc((void **)&d_out, sizeof(real_t));
//
// // Request and allocate temporary storage
// void *d_temp_storage = NULL;
// size_t temp_storage_bytes = 0;
// int num_items=x*alignedY;
//
// DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
// cudaMalloc((void**)&d_temp_storage, temp_storage_bytes);
// DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
// cudaDeviceSynchronize();
//
// real_t sum=0;
// cudaMemcpy(&sum, d_out, sizeof(real_t), cudaMemcpyDeviceToHost);
//
// cudaFree(d_out);
// cudaFree(d_temp_storage);
//// printf("sum: %15e.\n", sum);
//
// cutilCheckMsg("h_realSumCUB() execution failed!\n");
//
// return sum;
//}
__host__ float2 h_maxFloat2(float2* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<float2> devPtr_a = thrust::device_pointer_cast(a);
return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY), make_float2(FLT_MIN,FLT_MIN), maxFloat2());
}
__host__ float2 h_minFloat2(float2* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<float2> devPtr_a = thrust::device_pointer_cast(a);
return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY), make_float2(FLT_MAX,FLT_MAX), minFloat2());
}
__host__ real_t h_maxFloat(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
// thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
// return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY), make_float2(FLT_MIN,FLT_MIN), maxFloat2());
thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
// thrust::device_vector<real_t> devPtr_a(devPtr);
thrust::device_vector<real_t>::iterator iter = thrust::max_element(devPtr_a, devPtr_a+(x*alignedY));
real_t max_val = *iter;
return max_val;
}
//__host__ float2 h_subtractFloat2(const float2* a, const float* b,
// unsigned int x, unsigned int y, unsigned int alignedY)
//{
//// unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
//// dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
//// dim3 block(alignedY, sliceNum, 1);
//
// d_float2Subtract<<<x, alignedY>>>(a, b, result, y);
// cutilCheckMsg("d_complexSubtract() execution failed!\n");
//}
//
//__global__ void d_float2Subtract(const float2* a, const float* b, complex_t* result, unsigned int y)
//{
// unsigned int index = (((blockIdx.x*blockDim.y)+threadIdx.y) * blockDim.x) + threadIdx.x;
// if(threadIdx.x < y)
// result[index] = sub_complex_t(a[index], b[index]);
//
// unsigned int posIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
//
//}
__host__ void h_subtract(const complex_t* a, const complex_t* b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_complexSubtract<<<grid, block>>>(a, b, result, y);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
__host__ void h_subtract(const real_t* a, const real_t* b, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_subtract<<<grid, block>>>(a, b, result, y);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
__host__ void h_addFactorDivide(real_t* a, real_t* result, real_t factor,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_addFactorDivide<<<grid, block>>>(a, result, factor, y);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
__host__ void h_object_sum_update_Gfun(complex_t* a, real_t* b, complex_t* result, real_t factor,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_object_sum_update_Gfun<<<grid, block>>>(a, b, result, factor, y);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
void h_addFactor(complex_t* a, complex_t* result, complex_t factor,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_addFactor<<<grid, block>>>(a, result, factor, y);
cutilCheckMsg("d_addFactor() execution failed!\n");
}
void h_addFactor(real_t* a, real_t* result, real_t factor,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_addFactor<<<grid, block>>>(a, result, factor, y);
cutilCheckMsg("d_addFactor() execution failed!\n");
}
__host__ void h_square(real_t* a, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_square<true><<<grid, block>>>(a, result, x, y, alignedY);
// d_square<true><<<grid, block>>>(d_arr, d_result, x, y, alignedY);
cutilCheckMsg("d_complexSubtract() execution failed!\n");
}
__host__ void h_multiplyConju(complex_t* a, complex_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int blockOffset = max(axOffset,bxOffset);
if(blockOffset<x && max(ayOffset,byOffset)<y)
{
if (alignedY <= maxThreads)
{
unsigned int sliceNum = gh_iDivDown(maxThreads, alignedY);
dim3 grid(gh_iDivUp(x-blockOffset, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_multiplyConju<true><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
else
{
unsigned int sliceNum = gh_iDivUp(alignedY, maxThreads);
dim3 grid(x-blockOffset, sliceNum, 1);
dim3 block(maxThreads, 1, 1);
d_multiplyConju<false><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
}
cutilCheckMsg("d_multiplyConju() execution failed!\n");
}
__host__ void h_multiply(complex_t* a, complex_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int blockOffset = max(axOffset,bxOffset);
if(blockOffset<x && max(ayOffset,byOffset)<y)
{
if (alignedY <= maxThreads)
{
unsigned int sliceNum = gh_iDivDown(maxThreads, alignedY);
dim3 grid(gh_iDivUp(x-blockOffset, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_complexMultiply<true><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
else
{
unsigned int sliceNum = gh_iDivUp(alignedY, maxThreads);
dim3 grid(x-blockOffset, sliceNum, 1);
dim3 block(maxThreads, 1, 1);
d_complexMultiply<false><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
}
cutilCheckMsg("d_complexMultiply() execution failed!\n");
}
__host__ void h_multiply(real_t* a, complex_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int blockOffset = max(axOffset,bxOffset);
if(blockOffset<x && max(ayOffset,byOffset)<y)
{
if (alignedY <= maxThreads)
{
unsigned int sliceNum = gh_iDivDown(maxThreads, alignedY);
dim3 grid(gh_iDivUp(x-blockOffset, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_complexMultiply<true><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
else
{
unsigned int sliceNum = gh_iDivUp(alignedY, maxThreads);
dim3 grid(x-blockOffset, sliceNum, 1);
dim3 block(maxThreads, 1, 1);
d_complexMultiply<false><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
}
cutilCheckMsg("d_complexMultiply() execution failed!\n");
}
__host__ void h_multiply(real_t* a, real_t* b, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int blockOffset = max(axOffset,bxOffset);
if(blockOffset<x && max(ayOffset,byOffset)<y)
{
if (alignedY <= maxThreads)
{
unsigned int sliceNum = gh_iDivDown(maxThreads, alignedY);
dim3 grid(gh_iDivUp(x-blockOffset, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_realMultiply<true><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
else
{
unsigned int sliceNum = gh_iDivUp(alignedY, maxThreads);
dim3 grid(x-blockOffset, sliceNum, 1);
dim3 block(maxThreads, 1, 1);
d_realMultiply<false><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1,
axOffset, ayOffset, bxOffset, byOffset);
}
}
cutilCheckMsg("d_realMultiply() execution failed!\n");
}
__host__ void h_checkCache( thrust::device_vector<real_t>& m_factors,
thrust::host_vector<bool>& m_cachedFlags,
thrust::host_vector<real_t>& m_cachedFactors, thrust::device_vector<bool>& m_flags, real_t objMax, real_t probeMax,
bool phaseConstraint,bool updateProbe, bool updateProbeModes, bool RMS)
{
bool passedFlags[3] = {phaseConstraint, updateProbe, updateProbeModes};
for(size_t i=0; i<m_cachedFlags.size();++i)
if(m_cachedFlags[i]!=passedFlags[i])
{
m_cachedFlags[i]=passedFlags[i];
m_flags[i] = m_cachedFlags[i];
}
real_t passedFactors[2] = {1.0/objMax, 1.0/probeMax};
for(size_t i=0; i<m_cachedFactors.size();++i)
{
if(fabs(m_cachedFactors[i]-passedFactors[i])>EPS)
{
m_cachedFactors[i]=passedFactors[i];
m_factors[i] = m_cachedFactors[i];
}
}
}
__host__ void h_multiply(const complex_t* a, const complex_t& b, complex_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, bool normalize)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_complexMultiply<true> <<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
else d_complexMultiply<false><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_multiply() execution failed!\n");
}
__host__ void h_multiply(const real_t* a, const real_t& b, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY, bool normalize)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_multiply<true> <<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
else d_multiply<false><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_multiply() execution failed!\n");
}
__host__ void h_multiplyPage(complex_t* a, complex_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_multiplyPage<true> <<<grid, block>>>(a, b, result, x, y, alignedY, pagex);
else d_multiplyPage<false><<<grid, block>>>(a, b, result, x, y, alignedY, pagex);
cutilCheckMsg("h_multiplyPage() execution failed!\n");
}
__host__ void h_multiplyAbsConjuRealWhole(complex_t* a, complex_t* b, complex_t* c, real_t* result1, real_t* result2, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_multiplyAbsConjuRealWhole<true> <<<grid, block>>>(a, b, c, result1, result2, x, y, alignedY, pagex);
else d_multiplyAbsConjuRealWhole<false><<<grid, block>>>(a, b, c, result1, result2, x, y, alignedY, pagex);
cutilCheckMsg("h_multiplyRow() execution failed!\n");
}
__host__ void h_multiplyAbsConjuReal(complex_t* a, complex_t* b, complex_t* c, real_t* result1, real_t* result2, unsigned int x, unsigned int y, unsigned int alignedY,
unsigned int pagex)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_multiplyAbsConjuReal<true> <<<grid, block>>>(a, b, c, result1, result2, x, y, alignedY, pagex);
else d_multiplyAbsConjuReal<false><<<grid, block>>>(a, b, c, result1, result2, x, y, alignedY, pagex);
cutilCheckMsg("h_multiplyRow() execution failed!\n");
}
__host__ void h_multiplyRow(complex_t* a, real_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_multiplyRow<true> <<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
else d_multiplyRow<false><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_multiplyRow() execution failed!\n");
}
__host__ void h_multiplyColumn(complex_t* a, real_t* b, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY,
bool normalize, unsigned int axOffset, unsigned int ayOffset, unsigned int bxOffset, unsigned int byOffset)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_multiplyColum<true> <<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
else d_multiplyColum<false><<<grid, block>>>(a, b, result, x, y, alignedY, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_multiplyColumn() execution failed!\n");
}
__host__ void h_get_optimal_step_lsq(complex_t* chi,complex_t* object_update_proj, complex_t* dPO, complex_t* probe, real_t lambda,
real_t* AA1, complex_t* AA2, real_t* AA4, real_t* Atb1, real_t* Atb2, unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_get_optimal_step_lsq<true> <<<grid, block>>>(chi, object_update_proj, dPO, probe, lambda,
AA1, AA2, AA4, Atb1, Atb2, x, y, alignedY);
else d_get_optimal_step_lsq<false><<<grid, block>>>(chi, object_update_proj, dPO, probe, lambda,
AA1, AA2, AA4, Atb1, Atb2, x, y, alignedY);
cutilCheckMsg("h_get_optimal_step_lsq() execution failed!\n");
}
__host__ void h_mul_rca_mulc_rcr(complex_t* obj_proj_i, complex_t* modes_i, complex_t* chi_i, real_t* weight_proj,
unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_mul_rca_mulc_rcr<true> <<<grid, block>>>(obj_proj_i, modes_i, chi_i, weight_proj, x, y, alignedY);
else d_mul_rca_mulc_rcr<false><<<grid, block>>>(obj_proj_i, modes_i, chi_i, weight_proj, x, y, alignedY);
cutilCheckMsg("h_mul_rca_mulc_rcr() execution failed!\n");
}
__host__ void h_multiplyReal(real_t* a, real_t* result,
unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_realMultiply<true> <<<grid, block>>>(a, result, x, y, alignedY);
else d_realMultiply<false> <<<grid, block>>>(a, result, x, y, alignedY);
cutilCheckMsg("h_multiplyReal() execution failed!\n");
}
__host__ void h_normalize(complex_t* a, unsigned int x, unsigned int y, unsigned int alignedY, real_t factor)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_complexMultiply<true> <<<grid, block>>>(a, factor, a, x, y, alignedY);
else d_complexMultiply<false><<<grid, block>>>(a, factor, a, x, y, alignedY);
cutilCheckMsg("h_normalize() execution failed\n");
}
__host__ void h_normalize(const complex_t* a, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY, real_t factor)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_complexMultiply<true> <<<grid, block>>>(a, factor, result, x, y, alignedY);
else d_complexMultiply<false><<<grid, block>>>(a, factor, result, x, y, alignedY);
cutilCheckMsg("h_normalize() execution failed\n");
}
__host__ void h_normalize(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
thrust::constant_iterator<real_t> maxValue(h_realMax(a,x,y,alignedY));
thrust::transform(devPtr_a, devPtr_a+(x*alignedY), maxValue, devPtr_a, thrust::divides<real_t>());
cutilCheckMsg("h_normalize() execution failed\n");
}
__host__ void h_normalize(real_t* a, real_t factor, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
thrust::constant_iterator<real_t> factorValue(factor);
thrust::transform(devPtr_a, devPtr_a+(x*alignedY), factorValue, devPtr_a, thrust::divides<real_t>());
cutilCheckMsg("h_normalize() execution failed\n");
}
__host__ real_t h_realMax(real_t* a, unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_ptr<real_t> devPtr_a = thrust::device_pointer_cast(a);
return thrust::reduce(devPtr_a, devPtr_a+(x*alignedY), DBL_MIN, thrust::maximum<real_t>() );
}
__host__ void h_realToRGBA(const real_t* d_arr, float4* d_output, unsigned int x, unsigned int y, unsigned int alignedY,
real_t factor, float tf, float ts)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_realToRGBA<true> <<<grid, block>>>(d_arr, factor, d_output, x, y, alignedY, tf, ts);
else d_realToRGBA<false><<<grid, block>>>(d_arr, factor, d_output, x, y, alignedY, tf, ts);
cutilCheckMsg("h_realToRGBA() execution failed\n");
}
__host__ void h_realToGray(const real_t* d_arr, float* d_output, unsigned int x, unsigned int y, unsigned int alignedY,
real_t factor, bool outAligned)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_realToGray<true> <<<grid, block>>>(d_arr, factor, d_output, x, y, alignedY, outAligned);
else d_realToGray<false><<<grid, block>>>(d_arr, factor, d_output, x, y, alignedY, outAligned);
cutilCheckMsg("h_realToGray() execution failed\n");
}
__host__ real_t h_norm2Mat(real_t* d_arr, real_t* d_result, unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_square<true><<<grid, block>>>(d_arr, d_result, x, y, alignedY);
else d_square<false><<<grid, block>>>(d_arr, d_result, x, y, alignedY);
cutilCheckMsg("h_realComplexReal() execution failed\n");
real_t result=h_realSum(d_result, x, y, alignedY);
// real_t result=h_realSum(d_result, 0, x, 0, y, alignedY);
real_t xresult=sqrt_real_t(result/(x*y));
return xresult;
}
__host__ real_t h_norm2Mat(complex_t* d_arr, real_t* d_result, unsigned int x, unsigned int y, unsigned int alignedY)
{
h_realComplexAbs(d_arr, d_result, x, y, alignedY, true);
real_t result=h_realSum(d_result, x, y, alignedY);
// real_t result=h_realSum(d_result, 0, x, 0, y, alignedY);
real_t xresult=sqrt_real_t(result/(x*y));
return xresult;
}
__host__ void h_squareRoot(real_t* d_arr, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_realsquareRoot<true><<<grid, block>>>(d_arr, result, x, y, alignedY);
else d_realsquareRoot<false><<<grid, block>>>(d_arr, result, x, y, alignedY);
cutilCheckMsg("h_realComplexReal() execution failed\n");
}
__host__ void h_realComplexAbs(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_complexToDouble<'a', true> <<<grid, block>>>(a, result, x, y, alignedY, squared);
else d_complexToDouble<'a', false><<<grid, block>>>(a, result, x, y, alignedY, squared);
cutilCheckMsg("h_realComplexReal() execution failed\n");
}
__host__ void h_realComplexPhase(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_complexToDouble<'p', true> <<<grid, block>>>(a, result, x, y, alignedY, squared);
else d_complexToDouble<'p', false><<<grid, block>>>(a, result, x, y, alignedY, squared);
cutilCheckMsg("h_realComplexPhase() execution failed\n");
}
__host__ void h_realComplexReal(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_complexToDouble<'r', true> <<<grid, block>>>(a, result, x, y, alignedY, squared);
else d_complexToDouble<'r', false><<<grid, block>>>(a, result, x, y, alignedY, squared);
cutilCheckMsg("h_realComplexReal() execution failed\n");
}
__host__ void h_realComplexImag(const complex_t* a, real_t* result, unsigned int x, unsigned int y, unsigned int alignedY, bool squared)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_complexToDouble<'i', true> <<<grid, block>>>(a, result, x, y, alignedY, squared);
else d_complexToDouble<'i', false><<<grid, block>>>(a, result, x, y, alignedY, squared);
cutilCheckMsg("h_realComplexImag() execution failed\n");
}
__host__ void h_realComplexExp(const real_t* src, complex_t* result, unsigned int x, unsigned int y, unsigned int alignedY, real_t factor)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads) d_realComplexExp<true> <<<grid, block>>>(src, result, x, y, alignedY, factor);
else d_realComplexExp<false><<<grid, block>>>(src, result, x, y, alignedY, factor);
cutilCheckMsg("realComplexExp() execution failed\n");
}
__host__ void h_set_projections(real_t* p_object, real_t* proj, unsigned int* p_positions_x, unsigned int* p_positions_y,
unsigned int objectX, unsigned int objectY, unsigned int alignedObjectY, unsigned int probeX, unsigned int probeY, unsigned int Npos)
{
int const threadsPerBlockEachDim = 32;
int const blocksPerGrid_M = (probeX + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_N = (probeY + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_O = Npos;
dim3 const dimBlock(blocksPerGrid_M, blocksPerGrid_N, blocksPerGrid_O);
dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim, 1);
bool isFlat=true;
d_addToArray_r<<<dimBlock, dimThread>>>(proj, p_object, p_positions_x, p_positions_y ,probeX , probeY, objectX, objectY, Npos, alignedObjectY, isFlat);
}
__host__ void h_set_projections(complex_t* p_object, complex_t* proj, unsigned int* p_positions_x, unsigned int* p_positions_y,
unsigned int objectX, unsigned int objectY, unsigned int alignedObjectY, unsigned int probeX, unsigned int probeY, unsigned int alignedProbeY,
unsigned int Npos, bool isFlat)
{
int const threadsPerBlockEachDim = 32;
int const blocksPerGrid_M = (probeX + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_N = (probeY + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_O = Npos;
dim3 const dimBlock(blocksPerGrid_M, blocksPerGrid_N, blocksPerGrid_O);
dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim, 1);
d_addToArray_c<<<dimBlock, dimThread>>>(proj, p_object, p_positions_x, p_positions_y ,probeX , probeY, objectX, objectY, Npos, alignedObjectY, alignedProbeY, isFlat);
}
__host__ void h_get_projections(const complex_t* p_object, complex_t* proj, unsigned int* p_positions_x, unsigned int* p_positions_y,
unsigned int objectX, unsigned int objectY, unsigned int alignedObjectY, unsigned int probeX, unsigned int probeY, unsigned int probeZ,
unsigned int alignedProbeY, unsigned int Npos)
{
int const threadsPerBlockEachDim = 32;
int const blocksPerGrid_M = (probeX + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_N = (probeY + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_O = Npos;
dim3 const dimBlock(blocksPerGrid_M, blocksPerGrid_N, blocksPerGrid_O);
dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim, 1);
d_readFromArray_c<<<dimBlock, dimThread>>>(proj, p_object, p_positions_x, p_positions_y , probeX, probeY, probeZ,
objectX, objectY, alignedObjectY, alignedProbeY, Npos);
}
__host__ void h_get_projections(const real_t* p_object, real_t* proj, unsigned int* p_positions_x, unsigned int* p_positions_y,
unsigned int objectX, unsigned int objectY, unsigned int alignedObjectY, unsigned int probeX, unsigned int probeY, unsigned int probeZ,
unsigned int alignedProbeY, unsigned int Npos)
{
int const threadsPerBlockEachDim = 32;
int const blocksPerGrid_M = (probeX + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_N = (probeY + threadsPerBlockEachDim - 1) / threadsPerBlockEachDim;
int const blocksPerGrid_O = Npos;
dim3 const dimBlock(blocksPerGrid_M, blocksPerGrid_N, blocksPerGrid_O);
dim3 const dimThread(threadsPerBlockEachDim, threadsPerBlockEachDim, 1);
d_readFromArray_r<<<dimBlock, dimThread>>>(proj, p_object, p_positions_x, p_positions_y , probeX, probeY, probeZ,
objectX, objectY, alignedObjectY, alignedProbeY, Npos);
// if(Npos<MAX_IND_READ)
// {
// cudaMemcpyToSymbol(gC_pos_X, p_positions_x, Npos*sizeof(unsigned int), 0, cudaMemcpyHostToDevice);
// cudaMemcpyToSymbol(gC_pos_Y, p_positions_y, Npos*sizeof(unsigned int), 0, cudaMemcpyHostToDevice);
// d_readFromArray_r_fast<<<dimBlock, dimThread>>>(proj, p_object, probeX, probeY, probeZ,
// objectX, objectY, alignedObjectY, alignedProbeY, Npos);
// }
// else
// {
// d_readFromArray_r<<<dimBlock, dimThread>>>(proj, p_object, p_positions_x, p_positions_y , probeX, probeY, probeZ,
// objectX, objectY, alignedObjectY, alignedProbeY, Npos);
// }
}
__host__ void h_adjustFFT(const complex_t* d_psi, complex_t* d_output, const real_t* d_det_mod, const real_t* d_mask,
const real_t saturationValue, unsigned int modeNum, unsigned int x, unsigned int y, unsigned int alignedY, bool normalize)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
if(modeNum>1)
d_adjustModalFFT<<<grid, block>>>(d_psi, d_output, d_det_mod, d_mask, saturationValue, modeNum, x, y, normalize?1.0/(real_t)(x*y):1);
else
d_adjustFFT<<<grid, block>>>(d_psi, d_output, d_det_mod, d_mask, saturationValue, y, normalize?1.0/(real_t)(x*y):1);
cutilCheckMsg("h_adjustFFT() execution failed!\n");
}
__host__ real_t h_calculateER(const complex_t* d_psi, const real_t* d_det_mod, unsigned int modeNum,
unsigned int x, unsigned int y, unsigned int alignedY)
{
thrust::device_vector<real_t> output;
unsigned int maxThreads = GPUQuery::getInstance()->getGPUMaxThreads();
unsigned int reductionThreads = getReductionThreadNum(y);
dim3 grid;
dim3 block;
bool enoughThreads = true;
if(reductionThreads<=maxThreads)
{
grid = dim3(x, 1, 1);
block = dim3(reductionThreads, 1, 1);
output.resize(x);
}
else
{
enoughThreads = false;
unsigned int sliceNum = gh_iDivUp(reductionThreads, maxThreads);
grid = dim3(x, sliceNum, 1);
block = dim3(maxThreads, 1, 1);
output.resize(x*sliceNum);
}
unsigned int threadNum = block.x * block.y;
size_t shared_mem_size = (threadNum <= 32) ? 2* threadNum * sizeof(real_t) : threadNum * sizeof(real_t);
switch (threadNum)
{
case 8: d_calculateER< 8><<<grid, block, shared_mem_size>>>(d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 16: d_calculateER< 16><<<grid, block, shared_mem_size>>>(d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 32: d_calculateER< 32><<<grid, block, shared_mem_size>>>(d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 64: d_calculateER< 64><<<grid, block, shared_mem_size>>>(d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 128: d_calculateER< 128><<<grid, block, shared_mem_size>>>(d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 256: d_calculateER< 256><<<grid, block, shared_mem_size>>>(d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 512: d_calculateER< 512><<<grid, block, shared_mem_size>>>(d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
case 1024: d_calculateER<1024><<<grid, block, shared_mem_size>>>(d_psi, d_det_mod, thrust::raw_pointer_cast(output.data()), x, y, alignedY, modeNum, enoughThreads);
break;
}
cutilCheckMsg("h_calculateER() execution failed!\n");
return thrust::reduce(output.begin(), output.end())/modeNum;
}
__host__ real_t h_calculateER(const complex_t* d_GT, const complex_t* d_obj,
unsigned int sx, unsigned int sy, unsigned int qx, unsigned int qy,
unsigned int x1, unsigned int y1, unsigned int alignedY1,
unsigned int x2, unsigned int y2, unsigned int alignedY2)
{
thrust::device_vector<real_t> output(sx*sy);
dim3 grid, block;
bool enoughThreads = calcGrids(sx,sy,grid,block);
if(enoughThreads) d_realSpaceER<true> <<<grid, block>>>( d_GT, d_obj, thrust::raw_pointer_cast(output.data()),
qx, qy, sx, sy, x1, y1, alignedY1, x2, y2, alignedY2);
else d_realSpaceER<false><<<grid, block>>>( d_GT, d_obj, thrust::raw_pointer_cast(output.data()),
qx, qy, sx, sy, x1, y1, alignedY1, x2, y2, alignedY2);
cutilCheckMsg("d_realSpaceER() execution failed\n");
return sqrt(thrust::reduce(output.begin(), output.end()))/output.size();
}
__host__ void h_shiftFFT(real_t* d_data, real_t* d_temp, unsigned int x, unsigned int y, unsigned int alignedY, cudaStream_t* stream)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_shiftX<real_t, true><<<grid, block,0,(stream?*stream:0)>>>(d_data, d_temp, (float)x/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFT() execution failed!\n");
cudaDeviceSynchronize();
d_shiftY<real_t, true><<<grid, block,0,(stream?*stream:0)>>>(d_temp, d_data, (float)y/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFT() execution failed!\n");
cudaDeviceSynchronize();
// d_check<<<x, y>>>(d_data);
}
__host__ void h_shiftFFTy(real_t* d_data, real_t* d_temp, unsigned int x, unsigned int y, unsigned int alignedY, cudaStream_t* stream)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_shiftY<real_t, true><<<grid, block,0,(stream?*stream:0)>>>(d_data, d_temp, (float)y/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFT() execution failed!\n");
cudaDeviceSynchronize();
}
__host__ void h_shiftFFTtmp(complex_t* d_probe, complex_t* d_tempprobe, complex_t* d_copyprobe, unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_shiftX<complex_t, true><<<grid, block,0,0>>>(d_copyprobe, d_tempprobe, (float)x/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFTtmp() execution failed!\n");
cudaDeviceSynchronize();
d_shiftY<complex_t, true><<<grid, block,0,0>>>(d_tempprobe, d_probe, (float)y/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFTtmp() execution failed!\n");
cudaDeviceSynchronize();
// d_checkcomplex<<<x, y>>>(d_probe);
}
__host__ void h_shiftFFTtwo(complex_t* d_probe, complex_t* d_tempprobe, unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_shiftX<complex_t, true><<<grid, block,0,0>>>(d_probe, d_tempprobe, (float)x/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFTtmp() execution failed!\n");
cudaDeviceSynchronize();
d_shiftY<complex_t, true><<<grid, block,0,0>>>(d_tempprobe, d_probe, (float)y/2.0, 0, x, y, alignedY);
cutilCheckMsg("h_shiftFFTtmp() execution failed!\n");
cudaDeviceSynchronize();
// d_checkcomplex<<<x, y>>>(d_probe);
}
__host__ void imshift_fft(complex_t* d_probe, unsigned int x, unsigned int y, unsigned int alignedY, float radNo1, float radNo2)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_imshift_fft<<<grid, block>>>(d_probe, x/2, y/2, radNo1, radNo2, x, y, alignedY);
// d_checkcomplex<<<x, y>>>(d_probe);
}
__host__ void h_realRotate90(const real_t* d_data, real_t* d_out, unsigned int x, unsigned int y, unsigned int alignedY, unsigned int times, cudaStream_t* stream)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x, sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
switch(times % 4)
{
case 0: break;
case 1:
d_rot90<real_t><<<grid,block,0,(stream?*stream:0)>>>(d_data, d_out, x, y, alignedY);
break;
case 2:
//d_mirrorY<real_t><<<x, alignedY, alignedY*sizeof(real_t)>>>(d_data, d_data, y);
break;
case 3:
break;
}
cutilCheckMsg("h_realRotate90() execution failed!\n");
}
__host__ complex_t h_innerProduct(const complex_t* d_u, const complex_t* d_v, complex_t* d_output,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int reductionThreads = getReductionThreadNum(alignedY);
dim3 grid(x, 1, 1);
dim3 block(reductionThreads, 1, 1);
size_t shared_mem_size = (block.x <= 32) ? 2* block.x * sizeof(complex_t) : block.x * sizeof(complex_t);
switch (block.x)
{
case 8: d_innerProduct< 8><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 16: d_innerProduct< 16><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 32: d_innerProduct< 32><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 64: d_innerProduct< 64><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 128: d_innerProduct< 128><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 256: d_innerProduct< 256><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 512: d_innerProduct< 512><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
case 1024: d_innerProduct<1024><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0/(real_t)(x*y), y);
break;
}
cutilCheckMsg("d_innerProduct() execution failed!\n");
thrust::device_ptr<complex_t> devPtr = thrust::device_pointer_cast(d_output);
return thrust::reduce(devPtr, devPtr+x, make_complex_t(0,0), complexSum());
}
__host__ complex_t h_innerProductOne(const complex_t* d_u, const complex_t* d_v, complex_t* d_output,
unsigned int x, unsigned int y, unsigned int alignedY)
{
unsigned int reductionThreads = getReductionThreadNum(alignedY);
dim3 grid(x, 1, 1);
dim3 block(reductionThreads, 1, 1);
size_t shared_mem_size = (block.x <= 32) ? 2* block.x * sizeof(complex_t) : block.x * sizeof(complex_t);
switch (block.x)
{
case 8: d_innerProductOne< 8><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0, y);
break;
case 16: d_innerProductOne< 16><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0, y);
break;
case 32: d_innerProductOne< 32><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0, y);
break;
case 64: d_innerProductOne< 64><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0, y);
break;
case 128: d_innerProductOne< 128><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0, y);
break;
case 256: d_innerProductOne< 256><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0, y);
break;
case 512: d_innerProductOne< 512><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0, y);
break;
case 1024: d_innerProductOne<1024><<<grid, block, shared_mem_size>>>(d_u, d_v, d_output, 1.0, y);
break;
}
cutilCheckMsg("d_innerProduct() execution failed!\n");
thrust::device_ptr<complex_t> devPtr = thrust::device_pointer_cast(d_output);
complex_t result = thrust::reduce(devPtr, devPtr+x, make_complex_t(0,0), complexSum());
return result;
}
__host__ void h_innerProductModes(complex_t* d_u, complex_t* d_v, complex_t* d_factor, unsigned int index,
unsigned int modesNum, unsigned int x, unsigned int y, unsigned int alignedY)
{
// unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
// dim3 grid(modesNum, gh_iDivUp(x,sliceNum), 1);
// dim3 block(alignedY, sliceNum, 1);
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
unsigned int offset=x*alignedY;
d_innerProductModes<<<grid, block>>>(d_u, d_v, d_factor, index, modesNum, x, y, offset);
cutilCheckMsg("d_innerProductModes() execution failed!\n");
}
__host__ void h_extracSubArrReal(real_t* d_objectArray, real_t* d_output, unsigned int offsetX, unsigned int offsetY,
unsigned int sampleX, unsigned int sampleY, unsigned int alignedSampleY,
unsigned int objectArrayX, unsigned int objectArrayY, unsigned int alignedObjectArrayY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(sampleX,alignedSampleY,grid,block);
if(enoughThreads) d_extractArrReal<true> <<<grid, block>>>(d_objectArray, d_output, sampleX, sampleY, offsetX, offsetY, alignedSampleY, alignedObjectArrayY);
else d_extractArrReal<false><<<grid, block>>>(d_objectArray, d_output, sampleX, sampleY, offsetX, offsetY, alignedSampleY, alignedObjectArrayY);
cutilCheckMsg("h_extractObjectArray() execution failed!\n");
// d_check<<<sampleX, alignedSampleY>>>(d_output);
}
__host__ void h_extracSubArrComplex(complex_t* d_objectArray, complex_t* d_output, unsigned int offsetX, unsigned int offsetY,
unsigned int sampleX, unsigned int sampleY, unsigned int alignedSampleY,
unsigned int objectArrayX, unsigned int objectArrayY, unsigned int alignedObjectArrayY)
{
dim3 grid, block;
bool enoughThreads = calcGrids(sampleX,alignedSampleY,grid,block);
if(enoughThreads) d_extractArrComplex<true> <<<grid, block>>>(d_objectArray, d_output, sampleX, sampleY, offsetX, offsetY, alignedSampleY, alignedObjectArrayY);
else d_extractArrComplex<false><<<grid, block>>>(d_objectArray, d_output, sampleX, sampleY, offsetX, offsetY, alignedSampleY, alignedObjectArrayY);
cutilCheckMsg("h_extractObjectArray() execution failed!\n");
}
__host__ void h_realModalSum(const real_t* d_modes, real_t* d_output, unsigned int modesNum,
unsigned int x, unsigned int y, unsigned int alignedY, bool sqaureRoot)
{
// Along the z direction it must be a 3d array
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_modalSum<real_t><<<grid, block>>>(d_modes, d_output, modesNum, x, y, sqaureRoot);
cutilCheckMsg("d_modalSum() execution failed!\n");
}
__host__ void h_realModalSum(const complex_t* d_modes, complex_t* d_output, unsigned int modesNum,
unsigned int x, unsigned int y, unsigned int alignedY, bool sqaureRoot)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_modalSumComplex<<<grid, block>>>(d_modes, d_output, modesNum, x, y, sqaureRoot);
cutilCheckMsg("d_modalSum() execution failed!\n");
}
__host__ void h_complexSum(complex_t* d_leftArr, complex_t* d_rightArr, complex_t* d_result, real_t leftFactor, real_t rightFactor, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_complexSum<<<grid, block>>>(d_leftArr, d_rightArr, d_result, leftFactor, rightFactor, x, y, alignedY);
cutilCheckMsg("d_modalSum() execution failed!\n");
}
__host__ void h_realSum(real_t* d_leftArr, real_t* d_rightArr, real_t* d_result, real_t leftFactor, real_t rightFactor, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_realSum<<<grid, block>>>(d_leftArr, d_rightArr, d_result, leftFactor, rightFactor, x, y, alignedY);
cutilCheckMsg("d_realSum() execution failed!\n");
}
__host__ void h_realSingleSum(real_t* d_leftArr, real_t* d_rightArr, real_t* d_result, unsigned int x, unsigned int y,
unsigned int alignedY)
{
unsigned int sliceNum = gh_iDivDown(GPUQuery::getInstance()->getGPUMaxThreads(), alignedY);
dim3 grid(gh_iDivUp(x,sliceNum), 1, 1);
dim3 block(alignedY, sliceNum, 1);
d_realSingleSum<<<grid, block>>>(d_leftArr, d_rightArr, d_result, x, y, alignedY);
cutilCheckMsg("d_modalSum() execution failed!\n");
}
__host__ int2 h_realArgMax2D(real_t* d_ncc, unsigned int x, unsigned int y, unsigned int alignedY, unsigned char dir)
{
thrust::device_ptr<real_t> ncc_wrapper = thrust::device_pointer_cast(d_ncc);
int maxIndex = thrust::max_element(ncc_wrapper, ncc_wrapper+(x*alignedY)) - ncc_wrapper;
cutilCheckMsg("h_realArgMax2D():thrust::max_element() execution failed!\n");
int2 peak;
peak.x = maxIndex / alignedY;
peak.y = maxIndex % alignedY;
peak.x = (dir == 'h' && (peak.x >= (x/2)))? peak.x - x: peak.x;
peak.y = (dir == 'v' && (peak.y >= (y/2)))? peak.y - y: peak.y;
//printf("Registration point (%d,%d)...\n", peak.x, peak.y);
return peak;
}
__host__ void h_realComplexModulate(const complex_t* d_array1, complex_t* d_array2, int2& peak,
unsigned int x, unsigned int y, unsigned int alignedY, unsigned char dir)
{
dim3 grid, block;
bool enoughThreads = calcGrids(x,alignedY,grid,block);
if(enoughThreads)
{
if(dir == 'h' && peak.x!=0)
{
d_shiftX<complex_t, true><<<grid, block>>>(d_array2, d_array2, -(float)peak.x, 0, x, y, alignedY);
cutilCheckMsg("h_hMatchArrays()::shiftX() execution failed!\n");
peak.x = 0;
}
else if(dir == 'v' && peak.y!=0)
{
d_shiftY<complex_t, true><<<grid, block>>>(d_array2, d_array2, -(float)peak.y, 0, x, y, alignedY);
cutilCheckMsg("h_vMatchArrays()::shiftY() execution failed!\n");
peak.y=0;
}
}
else
{
if(dir == 'h' && peak.x!=0)
{
d_shiftX<complex_t, false><<<grid, block>>>(d_array2, d_array2, -(float)peak.x, 0, x, y, alignedY);
cutilCheckMsg("h_hMatchArrays()::shiftX() execution failed!\n");
peak.x = 0;
}
else if(dir == 'v' && peak.y!=0)
{
d_shiftY<complex_t, false><<<grid, block>>>(d_array2, d_array2, -(float)peak.y, 0, x, y, alignedY);
cutilCheckMsg("h_vMatchArrays()::shiftY() execution failed!\n");
peak.y=0;
}
}
complex_t m1 = h_complexSum(d_array1, peak.x, x, peak.y, y, alignedY);
complex_t m2 = h_complexSum(d_array2, 0, x-peak.x, 0, y-peak.y, alignedY);
complex_t ratio = div_complex_t(m1,m2);
h_multiply(d_array2, ratio, d_array2, x, y, alignedY, false);
}
#endif /* UTILITIESKERNELS_CU_ */
|
4aa09fa4c47568e38bd64908823758579f685244.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <openacc.h>
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <chrono>
#include <cmath>
namespace
{
template<typename T> T* acc_to_cuda(T* ptr) { return static_cast<T*>(acc_deviceptr(ptr)); }
}
#define CUDA_MACRO __device__
CUDA_MACRO inline float interp2(const float a, const float b)
{
return float(0.5) * (a + b);
}
CUDA_MACRO inline float interp4_ws(const float a, const float b, const float c, const float d)
{
constexpr float c0 = float(7./12.);
constexpr float c1 = float(1./12.);
return c0*(b+c) - c1*(a+d);
}
CUDA_MACRO inline float interp3_ws(const float a, const float b, const float c, const float d)
{
constexpr float c0 = float(3./12.);
constexpr float c1 = float(1./12.);
return c0*(c-b) - c1*(d-a);
}
CUDA_MACRO inline float interp6_ws(
const float a, const float b, const float c, const float d, const float e, const float f)
{
constexpr float c0 = float(37./60.);
constexpr float c1 = float(8./60.);
constexpr float c2 = float(1./60.);
return c0*(c+d) - c1*(b+e) + c2*(a+f);
}
CUDA_MACRO inline float interp5_ws(
const float a, const float b, const float c, const float d, const float e, const float f)
{
constexpr float c0 = float(10./60.);
constexpr float c1 = float(5./60.);
constexpr float c2 = float(1./60.);
return c0*(d-c) - c1*(e-b) + c2*(f-a);
}
void init_zero(float* const __restrict__ a, const int ncells)
{
for (int n=0; n<ncells; ++n)
a[n] = float(0.);
}
void init_rand(float* const __restrict__ a, const int ncells)
{
for (int n=0; n<ncells; ++n)
a[n] = float(std::rand() % 1000) + float(0.001);
}
__global__
void advec_2i5(
float* const restrict ut,
const float* const restrict u,
const float* const restrict v,
const float* const restrict w,
const float* const restrict dzi,
const float dx, const float dy,
const float* const restrict rhoref,
const float* const restrict rhorefh,
const int istart, const int iend,
const int jstart, const int jend,
const int kstart, const int kend,
const int jj, const int kk)
{
const int ii1 = 1;
const int ii2 = 2;
const int ii3 = 3;
const int jj1 = jj;
const int jj2 = 2*jj;
const int jj3 = 3*jj;
const int kk1 = kk;
const int kk2 = 2*kk;
const int kk3 = 3*kk;
const float dxi = float(1.)/dx;
const float dyi = float(1.)/dy;
const int i = blockIdx.x*blockDim.x + threadIdx.x + istart;
const int j = blockIdx.y*blockDim.y + threadIdx.y + jstart;
const int k = blockIdx.z*blockDim.z + threadIdx.z + kstart;
if (i < iend && j < jend && k < kend)
{
const int ijk = i + j*jj1 + k*kk1;
ut[ijk] +=
// u*du/dx
- ( interp2(u[ijk ], u[ijk+ii1]) * interp6_ws(u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2], u[ijk+ii3])
- interp2(u[ijk-ii1 ], u[ijk ]) * interp6_ws(u[ijk-ii3], u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2]) ) * dxi
+ ( std::abs(interp2(u[ijk ], u[ijk+ii1])) * interp5_ws(u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2], u[ijk+ii3])
- std::abs(interp2(u[ijk-ii1 ], u[ijk ])) * interp5_ws(u[ijk-ii3], u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2]) ) * dxi
// v*du/dy
- ( interp2(v[ijk-ii1+jj1], v[ijk+jj1]) * interp6_ws(u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2], u[ijk+jj3])
- interp2(v[ijk-ii1 ], v[ijk ]) * interp6_ws(u[ijk-jj3], u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2]) ) * dyi
+ ( std::abs(interp2(v[ijk-ii1+jj1], v[ijk+jj1])) * interp5_ws(u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2], u[ijk+jj3])
- std::abs(interp2(v[ijk-ii1 ], v[ijk ])) * interp5_ws(u[ijk-jj3], u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2]) ) * dyi;
if (k >= kstart+3 && k < kend-3)
{
ut[ijk] +=
// w*du/dz
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp6_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp6_ws(u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k]
+ ( rhorefh[k+1] * std::abs(interp2(w[ijk-ii1+kk1], w[ijk+kk1])) * interp5_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3])
- rhorefh[k ] * std::abs(interp2(w[ijk-ii1 ], w[ijk ])) * interp5_ws(u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k];
}
else if (k == kstart)
{
ut[ijk] +=
// w*du/dz -> second order interpolation for fluxtop, fluxbot = 0. as w=0
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp2(u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k];
}
else if (k == kstart+1)
{
ut[ijk] +=
// w*du/dz -> second order interpolation for fluxbot, fourth order for fluxtop
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp4_ws(u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp2( u[ijk-kk1], u[ijk ]) ) / rhoref[k] * dzi[k]
+ ( rhorefh[k+1] * std::abs(interp2(w[ijk-ii1+kk1], w[ijk+kk1])) * interp3_ws(u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k];
}
else if (k == kstart+2)
{
ut[ijk] +=
// w*du/dz -> fourth order interpolation for fluxbot
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp6_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp4_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k]
+ ( rhorefh[k+1] * std::abs(interp2(w[ijk-ii1+kk1], w[ijk+kk1])) * interp5_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3])
- rhorefh[k ] * std::abs(interp2(w[ijk-ii1 ], w[ijk ])) * interp3_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k];
}
else if (k == kend-3)
{
ut[ijk] +=
// w*du/dz -> fourth order interpolation for fluxtop
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp4_ws(u[ijk-kk1 ], u[ijk ], u[ijk+kk1], u[ijk+kk2])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp6_ws(u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k]
+ ( rhorefh[k+1] * std::abs(interp2(w[ijk-ii1+kk1], w[ijk+kk1])) * interp3_ws(u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2])
- rhorefh[k ] * std::abs(interp2(w[ijk-ii1 ], w[ijk ])) * interp5_ws(u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k];
}
else if (k == kend-2)
{
ut[ijk] +=
// w*du/dz -> second order interpolation for fluxtop, fourth order for fluxbot
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp2(u[ijk ], u[ijk+kk1])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp4_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k]
- ( rhorefh[k ] * std::abs(interp2(w[ijk-ii1 ], w[ijk ])) * interp3_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k];
}
else if (k == kend-1)
{
ut[ijk] +=
// w*du/dz -> second order interpolation for fluxbot, fluxtop=0 as w=0
- ( -rhorefh[k] * interp2(w[ijk-ii1 ], w[ijk ]) * interp2(u[ijk-kk1], u[ijk ]) ) / rhoref[k] * dzi[k];
}
}
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
std::cout << "Add the grid size as an argument!" << std::endl;
return 1;
}
const int nloop = 30;
const int itot = std::stoi(argv[1]);
const int jtot = std::stoi(argv[1]);
const int ktot = std::stoi(argv[1]);
const int icells = itot + 2*3;
const int jcells = jtot + 2*3;
const int kcells = ktot + 2*3;
const int ncells = icells*jcells*kcells;
const int istart = 3;
const int jstart = 3;
const int kstart = 3;
const int iend = itot + 3;
const int jend = jtot + 3;
const int kend = ktot + 3;
const int jstride = icells;
const int kstride = icells*jcells;
// const int ijk_check = (istart + itot/2) + (jstart + jtot/2)*jstride + (kstart + ktot/2)*kstride;
float* ut = new float[ncells];
float* u = new float[ncells];
float* v = new float[ncells];
float* w = new float[ncells];
float* dzi = new float[kcells];
float* rhoref = new float[kcells];
float* rhorefh = new float[kcells];
const float dxi = 0.1;
const float dyi = 0.1;
init_zero(ut, ncells);
std::srand(123);
init_rand(u, ncells);
init_rand(v, ncells);
init_rand(w, ncells);
init_rand(dzi, kcells);
init_rand(rhoref, kcells);
init_rand(rhorefh, kcells);
// Send data to the GPU.
#pragma acc enter data copyin(ut[0:ncells], u[0:ncells], v[0:ncells], w[0:ncells], dzi[0:kcells], rhoref[0:kcells], rhorefh[0:kcells])
const int blocki = TILE_I;
const int blockj = TILE_J;
const int blockk = TILE_K;
const int gridi = itot/blocki + (itot%blocki > 0);
const int gridj = jtot/blockj + (jtot%blockj > 0);
const int gridk = ktot/blockk + (ktot%blockk > 0);
dim3 grid(gridi, gridj, gridk);
dim3 block(blocki, blockj, blockk);
// Check results
hipLaunchKernelGGL(( advec_2i5), dim3(grid), dim3(block), 0, 0,
acc_to_cuda(ut), acc_to_cuda(u), acc_to_cuda(v), acc_to_cuda(w),
acc_to_cuda(dzi), dxi, dyi,
acc_to_cuda(rhoref), acc_to_cuda(rhorefh),
istart, iend, jstart, jend, kstart, kend,
jstride, kstride);
hipDeviceSynchronize();
// Update the data.
// #pragma acc update self(ut[0:ncells])
// printf("ut=%.20f\n", ut[ijk_check]);
// Time performance
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (int i=0; i<nloop; ++i)
hipLaunchKernelGGL(( advec_2i5), dim3(grid), dim3(block), 0, 0,
acc_to_cuda(ut), acc_to_cuda(u), acc_to_cuda(v), acc_to_cuda(w),
acc_to_cuda(dzi), dxi, dyi,
acc_to_cuda(rhoref), acc_to_cuda(rhorefh),
istart, iend, jstart, jend, kstart, kend,
jstride, kstride);
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
float duration = std::chrono::duration_cast<std::chrono::duration<float>>(end - start).count();
printf("time/iter = %E s (%i iters)\n",duration/(float)nloop, nloop);
// Remove data from the GPU.
#pragma acc exit data copyout(ut[0:ncells]) delete(u[0:ncells], v[0:ncells], w[0:ncells], dzi[0:kcells], rhoref[0:kcells], rhorefh[0:kcells])
std::ofstream binary_file("ut_cuda.bin", std::ios::out | std::ios::trunc | std::ios::binary);
if (binary_file)
binary_file.write(reinterpret_cast<const char*>(ut), ncells*sizeof(float));
else
{
std::string error = "Cannot write file \"ut_cuda.bin\"";
throw std::runtime_error(error);
}
return 0;
}
| 4aa09fa4c47568e38bd64908823758579f685244.cu | #include <openacc.h>
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <chrono>
#include <cmath>
namespace
{
template<typename T> T* acc_to_cuda(T* ptr) { return static_cast<T*>(acc_deviceptr(ptr)); }
}
#define CUDA_MACRO __device__
CUDA_MACRO inline float interp2(const float a, const float b)
{
return float(0.5) * (a + b);
}
CUDA_MACRO inline float interp4_ws(const float a, const float b, const float c, const float d)
{
constexpr float c0 = float(7./12.);
constexpr float c1 = float(1./12.);
return c0*(b+c) - c1*(a+d);
}
CUDA_MACRO inline float interp3_ws(const float a, const float b, const float c, const float d)
{
constexpr float c0 = float(3./12.);
constexpr float c1 = float(1./12.);
return c0*(c-b) - c1*(d-a);
}
CUDA_MACRO inline float interp6_ws(
const float a, const float b, const float c, const float d, const float e, const float f)
{
constexpr float c0 = float(37./60.);
constexpr float c1 = float(8./60.);
constexpr float c2 = float(1./60.);
return c0*(c+d) - c1*(b+e) + c2*(a+f);
}
CUDA_MACRO inline float interp5_ws(
const float a, const float b, const float c, const float d, const float e, const float f)
{
constexpr float c0 = float(10./60.);
constexpr float c1 = float(5./60.);
constexpr float c2 = float(1./60.);
return c0*(d-c) - c1*(e-b) + c2*(f-a);
}
void init_zero(float* const __restrict__ a, const int ncells)
{
for (int n=0; n<ncells; ++n)
a[n] = float(0.);
}
void init_rand(float* const __restrict__ a, const int ncells)
{
for (int n=0; n<ncells; ++n)
a[n] = float(std::rand() % 1000) + float(0.001);
}
__global__
void advec_2i5(
float* const restrict ut,
const float* const restrict u,
const float* const restrict v,
const float* const restrict w,
const float* const restrict dzi,
const float dx, const float dy,
const float* const restrict rhoref,
const float* const restrict rhorefh,
const int istart, const int iend,
const int jstart, const int jend,
const int kstart, const int kend,
const int jj, const int kk)
{
const int ii1 = 1;
const int ii2 = 2;
const int ii3 = 3;
const int jj1 = jj;
const int jj2 = 2*jj;
const int jj3 = 3*jj;
const int kk1 = kk;
const int kk2 = 2*kk;
const int kk3 = 3*kk;
const float dxi = float(1.)/dx;
const float dyi = float(1.)/dy;
const int i = blockIdx.x*blockDim.x + threadIdx.x + istart;
const int j = blockIdx.y*blockDim.y + threadIdx.y + jstart;
const int k = blockIdx.z*blockDim.z + threadIdx.z + kstart;
if (i < iend && j < jend && k < kend)
{
const int ijk = i + j*jj1 + k*kk1;
ut[ijk] +=
// u*du/dx
- ( interp2(u[ijk ], u[ijk+ii1]) * interp6_ws(u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2], u[ijk+ii3])
- interp2(u[ijk-ii1 ], u[ijk ]) * interp6_ws(u[ijk-ii3], u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2]) ) * dxi
+ ( std::abs(interp2(u[ijk ], u[ijk+ii1])) * interp5_ws(u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2], u[ijk+ii3])
- std::abs(interp2(u[ijk-ii1 ], u[ijk ])) * interp5_ws(u[ijk-ii3], u[ijk-ii2], u[ijk-ii1], u[ijk ], u[ijk+ii1], u[ijk+ii2]) ) * dxi
// v*du/dy
- ( interp2(v[ijk-ii1+jj1], v[ijk+jj1]) * interp6_ws(u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2], u[ijk+jj3])
- interp2(v[ijk-ii1 ], v[ijk ]) * interp6_ws(u[ijk-jj3], u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2]) ) * dyi
+ ( std::abs(interp2(v[ijk-ii1+jj1], v[ijk+jj1])) * interp5_ws(u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2], u[ijk+jj3])
- std::abs(interp2(v[ijk-ii1 ], v[ijk ])) * interp5_ws(u[ijk-jj3], u[ijk-jj2], u[ijk-jj1], u[ijk ], u[ijk+jj1], u[ijk+jj2]) ) * dyi;
if (k >= kstart+3 && k < kend-3)
{
ut[ijk] +=
// w*du/dz
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp6_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp6_ws(u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k]
+ ( rhorefh[k+1] * std::abs(interp2(w[ijk-ii1+kk1], w[ijk+kk1])) * interp5_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3])
- rhorefh[k ] * std::abs(interp2(w[ijk-ii1 ], w[ijk ])) * interp5_ws(u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k];
}
else if (k == kstart)
{
ut[ijk] +=
// w*du/dz -> second order interpolation for fluxtop, fluxbot = 0. as w=0
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp2(u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k];
}
else if (k == kstart+1)
{
ut[ijk] +=
// w*du/dz -> second order interpolation for fluxbot, fourth order for fluxtop
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp4_ws(u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp2( u[ijk-kk1], u[ijk ]) ) / rhoref[k] * dzi[k]
+ ( rhorefh[k+1] * std::abs(interp2(w[ijk-ii1+kk1], w[ijk+kk1])) * interp3_ws(u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k];
}
else if (k == kstart+2)
{
ut[ijk] +=
// w*du/dz -> fourth order interpolation for fluxbot
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp6_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp4_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k]
+ ( rhorefh[k+1] * std::abs(interp2(w[ijk-ii1+kk1], w[ijk+kk1])) * interp5_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2], u[ijk+kk3])
- rhorefh[k ] * std::abs(interp2(w[ijk-ii1 ], w[ijk ])) * interp3_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k];
}
else if (k == kend-3)
{
ut[ijk] +=
// w*du/dz -> fourth order interpolation for fluxtop
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp4_ws(u[ijk-kk1 ], u[ijk ], u[ijk+kk1], u[ijk+kk2])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp6_ws(u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k]
+ ( rhorefh[k+1] * std::abs(interp2(w[ijk-ii1+kk1], w[ijk+kk1])) * interp3_ws(u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2])
- rhorefh[k ] * std::abs(interp2(w[ijk-ii1 ], w[ijk ])) * interp5_ws(u[ijk-kk3], u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1], u[ijk+kk2]) ) / rhoref[k] * dzi[k];
}
else if (k == kend-2)
{
ut[ijk] +=
// w*du/dz -> second order interpolation for fluxtop, fourth order for fluxbot
- ( rhorefh[k+1] * interp2(w[ijk-ii1+kk1], w[ijk+kk1]) * interp2(u[ijk ], u[ijk+kk1])
- rhorefh[k ] * interp2(w[ijk-ii1 ], w[ijk ]) * interp4_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k]
- ( rhorefh[k ] * std::abs(interp2(w[ijk-ii1 ], w[ijk ])) * interp3_ws(u[ijk-kk2], u[ijk-kk1], u[ijk ], u[ijk+kk1]) ) / rhoref[k] * dzi[k];
}
else if (k == kend-1)
{
ut[ijk] +=
// w*du/dz -> second order interpolation for fluxbot, fluxtop=0 as w=0
- ( -rhorefh[k] * interp2(w[ijk-ii1 ], w[ijk ]) * interp2(u[ijk-kk1], u[ijk ]) ) / rhoref[k] * dzi[k];
}
}
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
std::cout << "Add the grid size as an argument!" << std::endl;
return 1;
}
const int nloop = 30;
const int itot = std::stoi(argv[1]);
const int jtot = std::stoi(argv[1]);
const int ktot = std::stoi(argv[1]);
const int icells = itot + 2*3;
const int jcells = jtot + 2*3;
const int kcells = ktot + 2*3;
const int ncells = icells*jcells*kcells;
const int istart = 3;
const int jstart = 3;
const int kstart = 3;
const int iend = itot + 3;
const int jend = jtot + 3;
const int kend = ktot + 3;
const int jstride = icells;
const int kstride = icells*jcells;
// const int ijk_check = (istart + itot/2) + (jstart + jtot/2)*jstride + (kstart + ktot/2)*kstride;
float* ut = new float[ncells];
float* u = new float[ncells];
float* v = new float[ncells];
float* w = new float[ncells];
float* dzi = new float[kcells];
float* rhoref = new float[kcells];
float* rhorefh = new float[kcells];
const float dxi = 0.1;
const float dyi = 0.1;
init_zero(ut, ncells);
std::srand(123);
init_rand(u, ncells);
init_rand(v, ncells);
init_rand(w, ncells);
init_rand(dzi, kcells);
init_rand(rhoref, kcells);
init_rand(rhorefh, kcells);
// Send data to the GPU.
#pragma acc enter data copyin(ut[0:ncells], u[0:ncells], v[0:ncells], w[0:ncells], dzi[0:kcells], rhoref[0:kcells], rhorefh[0:kcells])
const int blocki = TILE_I;
const int blockj = TILE_J;
const int blockk = TILE_K;
const int gridi = itot/blocki + (itot%blocki > 0);
const int gridj = jtot/blockj + (jtot%blockj > 0);
const int gridk = ktot/blockk + (ktot%blockk > 0);
dim3 grid(gridi, gridj, gridk);
dim3 block(blocki, blockj, blockk);
// Check results
advec_2i5<<<grid, block>>>(
acc_to_cuda(ut), acc_to_cuda(u), acc_to_cuda(v), acc_to_cuda(w),
acc_to_cuda(dzi), dxi, dyi,
acc_to_cuda(rhoref), acc_to_cuda(rhorefh),
istart, iend, jstart, jend, kstart, kend,
jstride, kstride);
cudaDeviceSynchronize();
// Update the data.
// #pragma acc update self(ut[0:ncells])
// printf("ut=%.20f\n", ut[ijk_check]);
// Time performance
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (int i=0; i<nloop; ++i)
advec_2i5<<<grid, block>>>(
acc_to_cuda(ut), acc_to_cuda(u), acc_to_cuda(v), acc_to_cuda(w),
acc_to_cuda(dzi), dxi, dyi,
acc_to_cuda(rhoref), acc_to_cuda(rhorefh),
istart, iend, jstart, jend, kstart, kend,
jstride, kstride);
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
float duration = std::chrono::duration_cast<std::chrono::duration<float>>(end - start).count();
printf("time/iter = %E s (%i iters)\n",duration/(float)nloop, nloop);
// Remove data from the GPU.
#pragma acc exit data copyout(ut[0:ncells]) delete(u[0:ncells], v[0:ncells], w[0:ncells], dzi[0:kcells], rhoref[0:kcells], rhorefh[0:kcells])
std::ofstream binary_file("ut_cuda.bin", std::ios::out | std::ios::trunc | std::ios::binary);
if (binary_file)
binary_file.write(reinterpret_cast<const char*>(ut), ncells*sizeof(float));
else
{
std::string error = "Cannot write file \"ut_cuda.bin\"";
throw std::runtime_error(error);
}
return 0;
}
|
6928f8736b7383767995622f8f5a27adf2d1e563.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// impact of prefetching the data - CUDA lab
#include <stdio.h>
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
/*
* Add asynchronous prefetching after the data is initialized,
* and before launching the kernel, to avoid host to GPU page
* faulting.
*/
hipMemPrefetchAsync(a, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 6928f8736b7383767995622f8f5a27adf2d1e563.cu | /// impact of prefetching the data - CUDA lab
#include <stdio.h>
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
/*
* Add asynchronous prefetching after the data is initialized,
* and before launching the kernel, to avoid host to GPU page
* faulting.
*/
cudaMemPrefetchAsync(a, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
c7a3043a8dd80857f07e42b1797bf515e472338b.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc -o pfb_cuda pfb_cuda.cu -lgomp -lfftw3f -lm -lcufft -lcufftw
//nvcc -Xcompiler -fPIC -shared -o libpfb_cuda.so pfb_cuda.cu -lcufft -lgomp to compile library
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <fftw3.h>
#include <omp.h>
#include <hipfft.h>
#define NTHREAD_PFB 128
/*--------------------------------------------------------------------------------*/
struct PFB_GPU_PLAN {
int n;
int nchan;
int nchunk;
int ntap;
int nthread;
float *win_gpu;
float *dat_gpu;
float *dat_tapered_gpu;
hipfftComplex *dat_trans_gpu;
float *pfb_gpu;
hipfftHandle cuplan;
};
/*--------------------------------------------------------------------------------*/
int get_nchunk(int n,int nchan,int ntap)
{
return n/nchan-ntap;
}
/*--------------------------------------------------------------------------------*/
void coswin(float *vec, int n)
{
for (int i=0;i<n;i++) {
float xx=2.0*(i-n/2)/(n-1);
vec[i]=0.5+0.5*cos(xx*M_PI);
}
}
/*--------------------------------------------------------------------------------*/
void mul_sinc(float *vec, int n, int ntap)
{
for (int i=0;i<n;i++) {
float xx=ntap*1.0*(i-n/2)/(n-1);
if (xx!=0)
vec[i]=vec[i]*sin(M_PI*xx)/(M_PI*xx);
}
}
/*--------------------------------------------------------------------------------*/
struct PFB_GPU_PLAN *setup_pfb_plan(int n, int nchan, int ntap)
{
struct PFB_GPU_PLAN *tmp=(struct PFB_GPU_PLAN *)malloc(sizeof(struct PFB_GPU_PLAN));
int nn=nchan*ntap;
float *win=(float *)malloc(sizeof(float)*nn);
coswin(win,nn);
mul_sinc(win,nn,ntap);
int nchunk=get_nchunk(n,nchan,ntap);
tmp->n=n;
tmp->nchan=nchan;
tmp->nchunk=nchunk;
tmp->ntap=ntap;
tmp->nthread=NTHREAD_PFB;
if (hipMalloc((void **)&(tmp->dat_gpu),sizeof(float)*n)!=hipSuccess)
printf("Malloc error on dat_gpu.\n");
if (hipMalloc((void **)&(tmp->dat_tapered_gpu),sizeof(float)*nchunk*nchan)!=hipSuccess)
printf("Malloc error on dat_tapered_gpu.\n");
if (hipMalloc((void **)&(tmp->win_gpu),sizeof(float)*nn)!=hipSuccess)
printf("Malloc error on win_gpu.\n");
if (hipMalloc((void **)&(tmp->dat_trans_gpu),sizeof(hipfftComplex)*nchan*nchunk)!=hipSuccess)
printf("Malloc error on dat_trans_gpu.\n");
if (hipMalloc((void **)&(tmp->pfb_gpu),sizeof(float)*nchan)!=hipSuccess)
printf("Malloc error on pfb_gpu.\n");
if (hipMemcpy(tmp->win_gpu,win,nn*sizeof(float),hipMemcpyHostToDevice)!=hipSuccess)
printf("Copy error on win_gpu.\n");
if (hipfftPlan1d(&(tmp->cuplan),nchan,HIPFFT_R2C,nchunk)!=HIPFFT_SUCCESS)
printf("we had an issue creating plan.\n");
return tmp;
}
/*--------------------------------------------------------------------------------*/
extern "C" {
void setup_pfb_plan_wrapper(int n, int nchan, int ntap, struct PFB_GPU_PLAN **ptr)
//assumes **ptr already has been malloced, so ptr[0] will be the correct pointer when done
{
//printf("n,nchan, and ntap are %d, %d, and %d\n",n,nchan,ntap);
ptr[0]=setup_pfb_plan(n,nchan,ntap);
//printf("plan address is %ld\n",(long)(ptr[0]));
//printf("n is now %d\n",ptr[0]->n);
}
}
/*--------------------------------------------------------------------------------*/
void destroy_pfb_gpu_plan(struct PFB_GPU_PLAN *plan)
{
hipfftDestroy(plan->cuplan);
hipFree(plan->dat_gpu);
hipFree(plan->win_gpu);
hipFree(plan->dat_tapered_gpu);
hipFree(plan->dat_trans_gpu);
hipFree(plan->pfb_gpu);
free(plan);
}
/*--------------------------------------------------------------------------------*/
extern "C" {
void destroy_pfb_gpu_plan_wrapper(struct PFB_GPU_PLAN **plan)
{
destroy_pfb_gpu_plan(plan[0]);
}
}
/*--------------------------------------------------------------------------------*/
extern "C" {
void print_pfb_plan(struct PFB_GPU_PLAN *plan)
{
printf("Printing PFB plan.\n");
printf("N is %d\n",plan->n);
printf("ntap is %d\n",plan->ntap);
printf("nchan is %d\n",plan->nchan);
}
}
/*--------------------------------------------------------------------------------*/
void format_data(float *dat, int n, int nchan, int ntap, float *win, float **dat_out, int *nchunk)
{
int nn=n/nchan-ntap;
float *dd=(float *)malloc(sizeof(float)*nn*nchan);
memset(dd,0,sizeof(float)*nn*nchan);
for (int i=0;i<nn;i++)
for (int j=0;j<ntap;j++)
for (int k=0;k<nchan;k++)
dd[i*nchan+k]+=dat[(i+j)*nchan+k]*win[j*nchan+k];
*nchunk=nn;
*dat_out=dd;
}
/*--------------------------------------------------------------------------------*/
__global__
void gpu_int162float32(short *in,float *out,int n)
{
int myi=blockIdx.x*blockDim.x+threadIdx.x;
int nthread=gridDim.x*blockDim.x;
for (int i=0;i<n;i+=nthread)
if (myi+i<n)
out[myi+i]=in[myi+i];
}
/*--------------------------------------------------------------------------------*/
__global__
void format_data_gpu(float *dat, int nchunk, int nchan, int ntap, float *win, float *dat_out)
{
int myi=blockIdx.x*blockDim.x+threadIdx.x;
for (int i=0;i<nchunk;i++) {
float tot=0;
for (int j=0;j<ntap;j++) {
tot+=dat[(i+j)*nchan+myi]*win[j*nchan+myi];
}
dat_out[i*nchan+myi]=tot;
}
}
/*--------------------------------------------------------------------------------*/
__global__
void sum_pfb_gpu(hipfftComplex *dat_trans, int nchan, int nchunk, float *pfb_out)
{
int myi=blockIdx.x*blockDim.x+threadIdx.x;
float tot=0;
for (int i=0;i<nchunk;i++) {
hipfftComplex tmp=dat_trans[myi+i*nchan];
tot+=tmp.x*tmp.x+tmp.y*tmp.y;
}
pfb_out[myi]=tot;
}
/*--------------------------------------------------------------------------------*/
void pfb_gpu(float *dat, float *pfb, struct PFB_GPU_PLAN *pfbplan)
{
if (hipMemcpy(pfbplan->dat_gpu,dat,pfbplan->n*sizeof(float),hipMemcpyHostToDevice)!=hipSuccess)
printf("Copy error on dat_gpu.\n");
for (int i=0;i<10;i++) {
hipLaunchKernelGGL(( format_data_gpu), dim3(pfbplan->nchan/pfbplan->nthread),dim3(pfbplan->nthread), 0, 0, pfbplan->dat_gpu,pfbplan->nchunk,pfbplan->nchan,pfbplan->ntap,pfbplan->win_gpu,pfbplan->dat_tapered_gpu);
if (hipfftExecR2C(pfbplan->cuplan, pfbplan->dat_tapered_gpu, pfbplan->dat_trans_gpu)!=HIPFFT_SUCCESS)
printf("Error executing FFT on GPU.\n");
hipLaunchKernelGGL(( sum_pfb_gpu), dim3(pfbplan->nchan/pfbplan->nthread),dim3(pfbplan->nthread), 0, 0, pfbplan->dat_trans_gpu,pfbplan->nchan,pfbplan->nchunk,pfbplan->pfb_gpu);
}
if (hipMemcpy(pfb,pfbplan->pfb_gpu,sizeof(float)*pfbplan->nchan,hipMemcpyDeviceToHost)!=hipSuccess)
printf("Error copying PFB to cpu.\n");
}
/*--------------------------------------------------------------------------------*/
void pfb_gpu16(short int *dat, float *pfb, struct PFB_GPU_PLAN *pfbplan)
{
if (hipMemcpy(pfbplan->dat_tapered_gpu,dat,pfbplan->n*sizeof(short int),hipMemcpyHostToDevice)!=hipSuccess)
printf("Copy error on dat_gpu.\n");
hipLaunchKernelGGL(( gpu_int162float32), dim3(8*pfbplan->nchan/pfbplan->nthread),dim3(pfbplan->nthread), 0, 0, (short int *)pfbplan->dat_tapered_gpu,pfbplan->dat_gpu,pfbplan->n);
hipLaunchKernelGGL(( format_data_gpu), dim3(pfbplan->nchan/pfbplan->nthread),dim3(pfbplan->nthread), 0, 0, pfbplan->dat_gpu,pfbplan->nchunk,pfbplan->nchan,pfbplan->ntap,pfbplan->win_gpu,pfbplan->dat_tapered_gpu);
if (hipfftExecR2C(pfbplan->cuplan, pfbplan->dat_tapered_gpu, pfbplan->dat_trans_gpu)!=HIPFFT_SUCCESS)
printf("Error executing FFT on GPU.\n");
hipLaunchKernelGGL(( sum_pfb_gpu), dim3(pfbplan->nchan/pfbplan->nthread),dim3(pfbplan->nthread), 0, 0, pfbplan->dat_trans_gpu,pfbplan->nchan,pfbplan->nchunk,pfbplan->pfb_gpu);
if (hipMemcpy(pfb,pfbplan->pfb_gpu,sizeof(float)*pfbplan->nchan,hipMemcpyDeviceToHost)!=hipSuccess)
printf("Error copying PFB to cpu.\n");
}
/*--------------------------------------------------------------------------------*/
extern "C" {
void pfb_gpu16_wrapper(short int *dat, float *pfb, struct PFB_GPU_PLAN **pfbplan)
{
pfb_gpu16(dat,pfb,pfbplan[0]);
}
}
/*================================================================================*/
#if 0
int main(int argc, char *argv[])
{
long n;
FILE *infile;
infile=fopen("random_dat.raw","r");
fread(&n,sizeof(long),1,infile);
printf("N is %ld\n",n);
float *dat=(float *)malloc(sizeof(float)*n);
fread(dat,sizeof(float),n,infile);
fclose(infile);
printf("First element is %f\n",dat[0]);
int nchan=3584*4;
//int nchan=4096*4;
int ntap=4;
//int nn=nchan*ntap;
int niter=1000;
struct PFB_GPU_PLAN *pfbplan=setup_pfb_plan(n,nchan,ntap);
float *pfb_sum=(float *)malloc(sizeof(float)*nchan);
memset(pfb_sum,0,sizeof(float)*nchan);
#if 0
short int *dd=(short int *)malloc(n*sizeof(short int));
#else
short int *dd;
if(hipHostMalloc(&dd,sizeof(short int)*n)!=hipSuccess)
printf("cuda malloc error on dd.\n");
#endif
memset(dd,0,sizeof(short int)*n);
for (int i=0;i<n;i++)
dd[i]=1000*dat[i];
double t1=omp_get_wtime();
for (int i=0;i<niter;i++) {
//pfb_gpu(dat,pfb_sum,pfbplan);// this is the float version
pfb_gpu16(dd,pfb_sum,pfbplan);
}
double t2=omp_get_wtime();
double throughput=1.0*nchan*pfbplan->nchunk*niter/(t2-t1)/1e6;
printf("pfb[0] is now %12.4g, with time per iteration %12.4e and throughput %12.4f Msamp/s\n",pfb_sum[0],(t2-t1)/niter,throughput);
float *tmpv=(float *)malloc(sizeof(float)*n);
if (hipMemcpy(tmpv,pfbplan->dat_gpu,n*sizeof(float),hipMemcpyDeviceToHost)!=hipSuccess)
printf("Error copying temp data back to memory.\n");
printf("vals are %12.4f %d\n",tmpv[0],dd[0]);
destroy_pfb_gpu_plan(pfbplan);
#if 0 //this will do the pfb via fftw on the cpu
int rank=1;
//fftwf_complex *crap=fftwf_malloc(nchunk*(nchan/2+1)*sizeof(fftwf_complex));
fftwf_complex *crap=fftwf_alloc_complex(nchunk*(nchan/2+1));
//fftwf_plan plan=fftwf_plan_many_dft_r2c(rank,&nchan,nchunk,dat_out,NULL,1,nchan,crap,NULL,1,nchan/2+1,FFTW_ESTIMATE);
fftwf_plan plan=fftwf_plan_many_dft_r2c(rank,&nchan,nchunk,dat_out,NULL,1,nchan,crap,NULL,1,nchan/2+1,FFTW_ESTIMATE);
fftwf_execute(plan);
outfile=fopen("out_dat.raw","w");
fwrite(&nchan,1,sizeof(int),outfile);
fwrite(&nchunk,1,sizeof(int),outfile);
fwrite(dat_out,nchan*nchunk,sizeof(float),outfile);
fclose(outfile);
outfile=fopen("out_trans.raw","w");
int asdf=nchan/2+1;
fwrite(&asdf,1,sizeof(int),outfile);
fwrite(&nchunk,1,sizeof(int),outfile);
fwrite(crap,asdf*nchunk,sizeof(fftwf_complex),outfile);
fclose(outfile);
#endif
}
#endif
| c7a3043a8dd80857f07e42b1797bf515e472338b.cu | //nvcc -o pfb_cuda pfb_cuda.cu -lgomp -lfftw3f -lm -lcufft -lcufftw
//nvcc -Xcompiler -fPIC -shared -o libpfb_cuda.so pfb_cuda.cu -lcufft -lgomp to compile library
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <cuda.h>
#include <fftw3.h>
#include <omp.h>
#include <cufft.h>
#define NTHREAD_PFB 128
/*--------------------------------------------------------------------------------*/
struct PFB_GPU_PLAN {
int n;
int nchan;
int nchunk;
int ntap;
int nthread;
float *win_gpu;
float *dat_gpu;
float *dat_tapered_gpu;
cufftComplex *dat_trans_gpu;
float *pfb_gpu;
cufftHandle cuplan;
};
/*--------------------------------------------------------------------------------*/
int get_nchunk(int n,int nchan,int ntap)
{
return n/nchan-ntap;
}
/*--------------------------------------------------------------------------------*/
void coswin(float *vec, int n)
{
for (int i=0;i<n;i++) {
float xx=2.0*(i-n/2)/(n-1);
vec[i]=0.5+0.5*cos(xx*M_PI);
}
}
/*--------------------------------------------------------------------------------*/
void mul_sinc(float *vec, int n, int ntap)
{
for (int i=0;i<n;i++) {
float xx=ntap*1.0*(i-n/2)/(n-1);
if (xx!=0)
vec[i]=vec[i]*sin(M_PI*xx)/(M_PI*xx);
}
}
/*--------------------------------------------------------------------------------*/
struct PFB_GPU_PLAN *setup_pfb_plan(int n, int nchan, int ntap)
{
struct PFB_GPU_PLAN *tmp=(struct PFB_GPU_PLAN *)malloc(sizeof(struct PFB_GPU_PLAN));
int nn=nchan*ntap;
float *win=(float *)malloc(sizeof(float)*nn);
coswin(win,nn);
mul_sinc(win,nn,ntap);
int nchunk=get_nchunk(n,nchan,ntap);
tmp->n=n;
tmp->nchan=nchan;
tmp->nchunk=nchunk;
tmp->ntap=ntap;
tmp->nthread=NTHREAD_PFB;
if (cudaMalloc((void **)&(tmp->dat_gpu),sizeof(float)*n)!=cudaSuccess)
printf("Malloc error on dat_gpu.\n");
if (cudaMalloc((void **)&(tmp->dat_tapered_gpu),sizeof(float)*nchunk*nchan)!=cudaSuccess)
printf("Malloc error on dat_tapered_gpu.\n");
if (cudaMalloc((void **)&(tmp->win_gpu),sizeof(float)*nn)!=cudaSuccess)
printf("Malloc error on win_gpu.\n");
if (cudaMalloc((void **)&(tmp->dat_trans_gpu),sizeof(cufftComplex)*nchan*nchunk)!=cudaSuccess)
printf("Malloc error on dat_trans_gpu.\n");
if (cudaMalloc((void **)&(tmp->pfb_gpu),sizeof(float)*nchan)!=cudaSuccess)
printf("Malloc error on pfb_gpu.\n");
if (cudaMemcpy(tmp->win_gpu,win,nn*sizeof(float),cudaMemcpyHostToDevice)!=cudaSuccess)
printf("Copy error on win_gpu.\n");
if (cufftPlan1d(&(tmp->cuplan),nchan,CUFFT_R2C,nchunk)!=CUFFT_SUCCESS)
printf("we had an issue creating plan.\n");
return tmp;
}
/*--------------------------------------------------------------------------------*/
extern "C" {
void setup_pfb_plan_wrapper(int n, int nchan, int ntap, struct PFB_GPU_PLAN **ptr)
//assumes **ptr already has been malloced, so ptr[0] will be the correct pointer when done
{
//printf("n,nchan, and ntap are %d, %d, and %d\n",n,nchan,ntap);
ptr[0]=setup_pfb_plan(n,nchan,ntap);
//printf("plan address is %ld\n",(long)(ptr[0]));
//printf("n is now %d\n",ptr[0]->n);
}
}
/*--------------------------------------------------------------------------------*/
void destroy_pfb_gpu_plan(struct PFB_GPU_PLAN *plan)
{
cufftDestroy(plan->cuplan);
cudaFree(plan->dat_gpu);
cudaFree(plan->win_gpu);
cudaFree(plan->dat_tapered_gpu);
cudaFree(plan->dat_trans_gpu);
cudaFree(plan->pfb_gpu);
free(plan);
}
/*--------------------------------------------------------------------------------*/
extern "C" {
void destroy_pfb_gpu_plan_wrapper(struct PFB_GPU_PLAN **plan)
{
destroy_pfb_gpu_plan(plan[0]);
}
}
/*--------------------------------------------------------------------------------*/
extern "C" {
void print_pfb_plan(struct PFB_GPU_PLAN *plan)
{
printf("Printing PFB plan.\n");
printf("N is %d\n",plan->n);
printf("ntap is %d\n",plan->ntap);
printf("nchan is %d\n",plan->nchan);
}
}
/*--------------------------------------------------------------------------------*/
void format_data(float *dat, int n, int nchan, int ntap, float *win, float **dat_out, int *nchunk)
{
int nn=n/nchan-ntap;
float *dd=(float *)malloc(sizeof(float)*nn*nchan);
memset(dd,0,sizeof(float)*nn*nchan);
for (int i=0;i<nn;i++)
for (int j=0;j<ntap;j++)
for (int k=0;k<nchan;k++)
dd[i*nchan+k]+=dat[(i+j)*nchan+k]*win[j*nchan+k];
*nchunk=nn;
*dat_out=dd;
}
/*--------------------------------------------------------------------------------*/
__global__
void gpu_int162float32(short *in,float *out,int n)
{
int myi=blockIdx.x*blockDim.x+threadIdx.x;
int nthread=gridDim.x*blockDim.x;
for (int i=0;i<n;i+=nthread)
if (myi+i<n)
out[myi+i]=in[myi+i];
}
/*--------------------------------------------------------------------------------*/
__global__
void format_data_gpu(float *dat, int nchunk, int nchan, int ntap, float *win, float *dat_out)
{
int myi=blockIdx.x*blockDim.x+threadIdx.x;
for (int i=0;i<nchunk;i++) {
float tot=0;
for (int j=0;j<ntap;j++) {
tot+=dat[(i+j)*nchan+myi]*win[j*nchan+myi];
}
dat_out[i*nchan+myi]=tot;
}
}
/*--------------------------------------------------------------------------------*/
__global__
void sum_pfb_gpu(cufftComplex *dat_trans, int nchan, int nchunk, float *pfb_out)
{
int myi=blockIdx.x*blockDim.x+threadIdx.x;
float tot=0;
for (int i=0;i<nchunk;i++) {
cufftComplex tmp=dat_trans[myi+i*nchan];
tot+=tmp.x*tmp.x+tmp.y*tmp.y;
}
pfb_out[myi]=tot;
}
/*--------------------------------------------------------------------------------*/
void pfb_gpu(float *dat, float *pfb, struct PFB_GPU_PLAN *pfbplan)
{
if (cudaMemcpy(pfbplan->dat_gpu,dat,pfbplan->n*sizeof(float),cudaMemcpyHostToDevice)!=cudaSuccess)
printf("Copy error on dat_gpu.\n");
for (int i=0;i<10;i++) {
format_data_gpu<<<pfbplan->nchan/pfbplan->nthread,pfbplan->nthread>>>(pfbplan->dat_gpu,pfbplan->nchunk,pfbplan->nchan,pfbplan->ntap,pfbplan->win_gpu,pfbplan->dat_tapered_gpu);
if (cufftExecR2C(pfbplan->cuplan, pfbplan->dat_tapered_gpu, pfbplan->dat_trans_gpu)!=CUFFT_SUCCESS)
printf("Error executing FFT on GPU.\n");
sum_pfb_gpu<<<pfbplan->nchan/pfbplan->nthread,pfbplan->nthread>>>(pfbplan->dat_trans_gpu,pfbplan->nchan,pfbplan->nchunk,pfbplan->pfb_gpu);
}
if (cudaMemcpy(pfb,pfbplan->pfb_gpu,sizeof(float)*pfbplan->nchan,cudaMemcpyDeviceToHost)!=cudaSuccess)
printf("Error copying PFB to cpu.\n");
}
/*--------------------------------------------------------------------------------*/
void pfb_gpu16(short int *dat, float *pfb, struct PFB_GPU_PLAN *pfbplan)
{
if (cudaMemcpy(pfbplan->dat_tapered_gpu,dat,pfbplan->n*sizeof(short int),cudaMemcpyHostToDevice)!=cudaSuccess)
printf("Copy error on dat_gpu.\n");
gpu_int162float32<<<8*pfbplan->nchan/pfbplan->nthread,pfbplan->nthread>>>((short int *)pfbplan->dat_tapered_gpu,pfbplan->dat_gpu,pfbplan->n);
format_data_gpu<<<pfbplan->nchan/pfbplan->nthread,pfbplan->nthread>>>(pfbplan->dat_gpu,pfbplan->nchunk,pfbplan->nchan,pfbplan->ntap,pfbplan->win_gpu,pfbplan->dat_tapered_gpu);
if (cufftExecR2C(pfbplan->cuplan, pfbplan->dat_tapered_gpu, pfbplan->dat_trans_gpu)!=CUFFT_SUCCESS)
printf("Error executing FFT on GPU.\n");
sum_pfb_gpu<<<pfbplan->nchan/pfbplan->nthread,pfbplan->nthread>>>(pfbplan->dat_trans_gpu,pfbplan->nchan,pfbplan->nchunk,pfbplan->pfb_gpu);
if (cudaMemcpy(pfb,pfbplan->pfb_gpu,sizeof(float)*pfbplan->nchan,cudaMemcpyDeviceToHost)!=cudaSuccess)
printf("Error copying PFB to cpu.\n");
}
/*--------------------------------------------------------------------------------*/
extern "C" {
void pfb_gpu16_wrapper(short int *dat, float *pfb, struct PFB_GPU_PLAN **pfbplan)
{
pfb_gpu16(dat,pfb,pfbplan[0]);
}
}
/*================================================================================*/
#if 0
int main(int argc, char *argv[])
{
long n;
FILE *infile;
infile=fopen("random_dat.raw","r");
fread(&n,sizeof(long),1,infile);
printf("N is %ld\n",n);
float *dat=(float *)malloc(sizeof(float)*n);
fread(dat,sizeof(float),n,infile);
fclose(infile);
printf("First element is %f\n",dat[0]);
int nchan=3584*4;
//int nchan=4096*4;
int ntap=4;
//int nn=nchan*ntap;
int niter=1000;
struct PFB_GPU_PLAN *pfbplan=setup_pfb_plan(n,nchan,ntap);
float *pfb_sum=(float *)malloc(sizeof(float)*nchan);
memset(pfb_sum,0,sizeof(float)*nchan);
#if 0
short int *dd=(short int *)malloc(n*sizeof(short int));
#else
short int *dd;
if(cudaMallocHost(&dd,sizeof(short int)*n)!=cudaSuccess)
printf("cuda malloc error on dd.\n");
#endif
memset(dd,0,sizeof(short int)*n);
for (int i=0;i<n;i++)
dd[i]=1000*dat[i];
double t1=omp_get_wtime();
for (int i=0;i<niter;i++) {
//pfb_gpu(dat,pfb_sum,pfbplan);// this is the float version
pfb_gpu16(dd,pfb_sum,pfbplan);
}
double t2=omp_get_wtime();
double throughput=1.0*nchan*pfbplan->nchunk*niter/(t2-t1)/1e6;
printf("pfb[0] is now %12.4g, with time per iteration %12.4e and throughput %12.4f Msamp/s\n",pfb_sum[0],(t2-t1)/niter,throughput);
float *tmpv=(float *)malloc(sizeof(float)*n);
if (cudaMemcpy(tmpv,pfbplan->dat_gpu,n*sizeof(float),cudaMemcpyDeviceToHost)!=cudaSuccess)
printf("Error copying temp data back to memory.\n");
printf("vals are %12.4f %d\n",tmpv[0],dd[0]);
destroy_pfb_gpu_plan(pfbplan);
#if 0 //this will do the pfb via fftw on the cpu
int rank=1;
//fftwf_complex *crap=fftwf_malloc(nchunk*(nchan/2+1)*sizeof(fftwf_complex));
fftwf_complex *crap=fftwf_alloc_complex(nchunk*(nchan/2+1));
//fftwf_plan plan=fftwf_plan_many_dft_r2c(rank,&nchan,nchunk,dat_out,NULL,1,nchan,crap,NULL,1,nchan/2+1,FFTW_ESTIMATE);
fftwf_plan plan=fftwf_plan_many_dft_r2c(rank,&nchan,nchunk,dat_out,NULL,1,nchan,crap,NULL,1,nchan/2+1,FFTW_ESTIMATE);
fftwf_execute(plan);
outfile=fopen("out_dat.raw","w");
fwrite(&nchan,1,sizeof(int),outfile);
fwrite(&nchunk,1,sizeof(int),outfile);
fwrite(dat_out,nchan*nchunk,sizeof(float),outfile);
fclose(outfile);
outfile=fopen("out_trans.raw","w");
int asdf=nchan/2+1;
fwrite(&asdf,1,sizeof(int),outfile);
fwrite(&nchunk,1,sizeof(int),outfile);
fwrite(crap,asdf*nchunk,sizeof(fftwf_complex),outfile);
fclose(outfile);
#endif
}
#endif
|
cb8a0f49769a5f0ce81ec125f9d3067017ff6ac6.hip | // !!! This is a file automatically generated by hipify!!!
/**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "./polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 16384
#define NY 16384
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice);
hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
hipLaunchKernelGGL(( atax_kernel1), dim3(grid1), dim3(block) , 0, 0, A_gpu,x_gpu,tmp_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( atax_kernel2), dim3(grid2), dim3(block) , 0, 0, A_gpu,y_gpu,tmp_gpu);
hipDeviceSynchronize();
hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
//hipFree(A_gpu);
//hipFree(x_gpu);
//hipFree(y_gpu);
//hipFree(tmp_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
/*
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
*/
hipHostMalloc((void **)&A, sizeof(DATA_TYPE) * NX * NY, hipHostMallocPortable);
hipHostMalloc((void **)&x, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&y, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&tmp, sizeof(DATA_TYPE) * NX, hipHostMallocPortable);
init_array(x, A);
GPU_argv_init();
ataxGpu(A, x, y, tmp, y_outputFromGpu);
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
hipFree(A);
hipFree(x);
hipFree(y);
hipFree(y_outputFromGpu);
hipFree(tmp);
return 0;
}
| cb8a0f49769a5f0ce81ec125f9d3067017ff6ac6.cu | /**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "./polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 16384
#define NY 16384
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice);
cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
atax_kernel1<<< grid1, block >>>(A_gpu,x_gpu,tmp_gpu);
cudaThreadSynchronize();
atax_kernel2<<< grid2, block >>>(A_gpu,y_gpu,tmp_gpu);
cudaThreadSynchronize();
cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
//cudaFree(A_gpu);
//cudaFree(x_gpu);
//cudaFree(y_gpu);
//cudaFree(tmp_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
/*
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
*/
cudaHostAlloc((void **)&A, sizeof(DATA_TYPE) * NX * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&x, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&y, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&tmp, sizeof(DATA_TYPE) * NX, cudaHostAllocPortable);
init_array(x, A);
GPU_argv_init();
ataxGpu(A, x, y, tmp, y_outputFromGpu);
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
cudaFree(A);
cudaFree(x);
cudaFree(y);
cudaFree(y_outputFromGpu);
cudaFree(tmp);
return 0;
}
|
242e7fc7d1794355ff8b94a3e0f648e575890f5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
__global__ void vecAddKernel(float* A, float* B, float* C, int n) {
// Calculate global thread index based on the block and thread indices ----
//INSERT KERNEL CODE HERE
int i =
// Use global index to determine which elements to read, add, and write ---
//INSERT KERNEL CODE HERE
}
| 242e7fc7d1794355ff8b94a3e0f648e575890f5b.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
__global__ void vecAddKernel(float* A, float* B, float* C, int n) {
// Calculate global thread index based on the block and thread indices ----
//INSERT KERNEL CODE HERE
int i =
// Use global index to determine which elements to read, add, and write ---
//INSERT KERNEL CODE HERE
}
|
58b36ea91dfdf5c16e1403568b3dd062ca61963b.hip | // !!! This is a file automatically generated by hipify!!!
/* A Bison parser, made by GNU Bison 2.4.1. */
/* Skeleton implementation for Bison's Yacc-like parsers in C
Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* C LALR(1) parser skeleton written by Richard Stallman, by
simplifying the original so-called "semantic" parser. */
/* All symbols defined below should begin with yy or YY, to avoid
infringing on user name space. This should be done even for local
variables, as they might otherwise be expanded by user macros.
There are some unavoidable exceptions within include files to
define necessary library symbols; they are noted "INFRINGES ON
USER NAME SPACE" below. */
/* Identify Bison output. */
#define YYBISON 1
/* Bison version. */
#define YYBISON_VERSION "2.4.1"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
/* Pure parsers. */
#define YYPURE 0
/* Push parsers. */
#define YYPUSH 0
/* Pull parsers. */
#define YYPULL 1
/* Using locations. */
#define YYLSP_NEEDED 0
/* Copy the first part of user declarations. */
/* Line 189 of yacc.c */
#line 17 "bison.y"
#include "lex.yy.c"
#include "cm.cu"
void clean_queues();
void order_inplace(CudaSet* a, stack<string> exe_type);
void yyerror(char *s, ...);
void emit(char *s, ...);
void emit_mul();
void emit_add();
void emit_minus();
void emit_div();
void emit_and();
void emit_eq();
void emit_or();
void emit_cmp(int val);
void emit_var(char *s, int c, char *f);
void emit_var_asc(char *s);
void emit_var_desc(char *s);
void emit_name(char *name);
void emit_count();
void emit_sum();
void emit_average();
void emit_min();
void emit_max();
void emit_string(char *str);
void emit_number(int_type val);
void emit_float(float_type val);
void emit_decimal(float_type val);
void emit_sel_name(char* name);
void emit_limit(int val);
void emit_union(char *s, char *f1, char *f2);
void emit_varchar(char *s, int c, char *f, int d);
void emit_load(char *s, char *f, int d, char* sep);
void emit_load_binary(char *s, char *f, int d);
void emit_store(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f);
void emit_filter(char *s, char *f, int e);
void emit_order(char *s, char *f, int e, int ll = 0);
void emit_group(char *s, char *f, int e);
void emit_select(char *s, char *f, int ll);
void emit_join(char *s, char *j1);
void emit_join_tab(char *s);
void emit_distinct(char *s, char *f);
/* Line 189 of yacc.c */
#line 124 "bison.cu"
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
# define YYERROR_VERBOSE 1
#else
# define YYERROR_VERBOSE 0
#endif
/* Enabling the token table. */
#ifndef YYTOKEN_TABLE
# define YYTOKEN_TABLE 0
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
FILENAME = 258,
NAME = 259,
STRING = 260,
INTNUM = 261,
DECIMAL1 = 262,
BOOL1 = 263,
APPROXNUM = 264,
USERVAR = 265,
ASSIGN = 266,
EQUAL = 267,
OR = 268,
XOR = 269,
AND = 270,
REGEXP = 271,
LIKE = 272,
IS = 273,
IN = 274,
NOT = 275,
BETWEEN = 276,
COMPARISON = 277,
SHIFT = 278,
MOD = 279,
UMINUS = 280,
LOAD = 281,
STREAM = 282,
FILTER = 283,
BY = 284,
JOIN = 285,
STORE = 286,
INTO = 287,
GROUP = 288,
FROM = 289,
SELECT = 290,
AS = 291,
ORDER = 292,
ASC = 293,
DESC = 294,
COUNT = 295,
USING = 296,
SUM = 297,
AVG = 298,
MIN = 299,
MAX = 300,
LIMIT = 301,
ON = 302,
BINARY = 303
};
#endif
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 214 of yacc.c */
#line 67 "bison.y"
int intval;
float floatval;
char *strval;
int subtok;
/* Line 214 of yacc.c */
#line 217 "bison.cu"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
/* Copy the second part of user declarations. */
/* Line 264 of yacc.c */
#line 229 "bison.cu"
#ifdef short
# undef short
#endif
#ifdef YYTYPE_UINT8
typedef YYTYPE_UINT8 yytype_uint8;
#else
typedef unsigned char yytype_uint8;
#endif
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
#elif (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
typedef signed char yytype_int8;
#else
typedef short int yytype_int8;
#endif
#ifdef YYTYPE_UINT16
typedef YYTYPE_UINT16 yytype_uint16;
#else
typedef unsigned short int yytype_uint16;
#endif
#ifdef YYTYPE_INT16
typedef YYTYPE_INT16 yytype_int16;
#else
typedef short int yytype_int16;
#endif
#ifndef YYSIZE_T
# ifdef __SIZE_TYPE__
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
# define YYSIZE_T unsigned int
# endif
#endif
#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
#ifndef YY_
# if YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
# define YY_(msgid) dgettext ("bison-runtime", msgid)
# endif
# endif
# ifndef YY_
# define YY_(msgid) msgid
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
# define YYUSE(e) ((void) (e))
#else
# define YYUSE(e) /* empty */
#endif
/* Identity function, used to suppress warnings about constant conditions. */
#ifndef lint
# define YYID(n) (n)
#else
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static int
YYID (int yyi)
#else
static int
YYID (yyi)
int yyi;
#endif
{
return yyi;
}
#endif
#if ! defined yyoverflow || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
# ifdef YYSTACK_USE_ALLOCA
# if YYSTACK_USE_ALLOCA
# ifdef __GNUC__
# define YYSTACK_ALLOC __builtin_alloca
# elif defined __BUILTIN_VA_ARG_INCR
# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
# elif defined _AIX
# define YYSTACK_ALLOC __alloca
# elif defined _MSC_VER
# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# endif
# endif
# endif
# ifdef YYSTACK_ALLOC
/* Pacify GCC's `empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
to allow for a few compiler-allocated temporary stack slots. */
# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
# endif
# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
# if (defined __cplusplus && ! defined _STDLIB_H \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# endif
#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
|| (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
{
yytype_int16 yyss_alloc;
YYSTYPE yyvs_alloc;
};
/* The size of the maximum gap between one aligned stack and the next. */
# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
/* The size of an array large to enough to hold all stacks, each with
N elements. */
# define YYSTACK_BYTES(N) \
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ YYSTACK_GAP_MAXIMUM)
/* Copy COUNT objects from FROM to TO. The source and destination do
not overlap. */
# ifndef YYCOPY
# if defined __GNUC__ && 1 < __GNUC__
# define YYCOPY(To, From, Count) \
__builtin_memcpy (To, From, (Count) * sizeof (*(From)))
# else
# define YYCOPY(To, From, Count) \
do \
{ \
YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(To)[yyi] = (From)[yyi]; \
} \
while (YYID (0))
# endif
# endif
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
do \
{ \
YYSIZE_T yynewbytes; \
YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
Stack = &yyptr->Stack_alloc; \
yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
yyptr += yynewbytes / sizeof (*yyptr); \
} \
while (YYID (0))
#endif
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 8
/* YYLAST -- Last index in YYTABLE. */
#define YYLAST 446
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 66
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 13
/* YYNRULES -- Number of rules. */
#define YYNRULES 62
/* YYNRULES -- Number of states. */
#define YYNSTATES 153
/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
#define YYUNDEFTOK 2
#define YYMAXUTOK 303
#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 20, 2, 2, 2, 31, 25, 2,
59, 60, 29, 27, 65, 28, 61, 30, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 64, 58,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 33, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 62, 24, 63, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 21, 22, 23, 26, 32,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57
};
#if YYDEBUG
/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
YYRHS. */
static const yytype_uint16 yyprhs[] =
{
0, 0, 3, 6, 10, 12, 20, 33, 43, 49,
56, 64, 74, 81, 83, 87, 89, 91, 93, 95,
97, 99, 109, 116, 119, 122, 127, 132, 137, 142,
147, 151, 155, 159, 163, 167, 171, 175, 179, 183,
187, 191, 194, 197, 201, 207, 211, 215, 220, 221,
225, 229, 235, 237, 241, 243, 247, 248, 250, 253,
258, 264, 265
};
/* YYRHS -- A `-1'-separated list of the rules' RHS. */
static const yytype_int8 yyrhs[] =
{
67, 0, -1, 68, 58, -1, 67, 68, 58, -1,
69, -1, 4, 11, 44, 72, 43, 4, 71, -1,
4, 11, 35, 3, 50, 59, 3, 60, 45, 59,
73, 60, -1, 4, 11, 35, 3, 57, 45, 59,
73, 60, -1, 4, 11, 37, 4, 76, -1, 4,
11, 46, 4, 38, 75, -1, 4, 11, 44, 72,
43, 4, 77, -1, 40, 4, 41, 3, 50, 59,
3, 60, 78, -1, 40, 4, 41, 3, 78, 57,
-1, 4, -1, 4, 61, 4, -1, 10, -1, 5,
-1, 6, -1, 9, -1, 7, -1, 8, -1, 4,
62, 6, 63, 64, 4, 59, 6, 60, -1, 4,
62, 6, 63, 64, 4, -1, 4, 47, -1, 4,
48, -1, 49, 59, 70, 60, -1, 51, 59, 70,
60, -1, 52, 59, 70, 60, -1, 53, 59, 70,
60, -1, 54, 59, 70, 60, -1, 70, 27, 70,
-1, 70, 28, 70, -1, 70, 29, 70, -1, 70,
30, 70, -1, 70, 31, 70, -1, 70, 32, 70,
-1, 70, 15, 70, -1, 70, 12, 70, -1, 70,
13, 70, -1, 70, 14, 70, -1, 70, 26, 70,
-1, 21, 70, -1, 20, 70, -1, 70, 23, 70,
-1, 70, 23, 59, 69, 60, -1, 59, 70, 60,
-1, 70, 18, 8, -1, 70, 18, 21, 8, -1,
-1, 42, 38, 74, -1, 70, 45, 4, -1, 72,
65, 70, 45, 4, -1, 70, -1, 73, 65, 70,
-1, 70, -1, 70, 65, 74, -1, -1, 74, -1,
38, 70, -1, 39, 4, 56, 70, -1, 39, 4,
56, 70, 77, -1, -1, 55, 6, -1
};
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint8 yyrline[] =
{
0, 137, 137, 138, 142, 145, 147, 149, 151, 153,
155, 157, 159, 164, 165, 166, 167, 168, 169, 170,
171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
184, 185, 186, 187, 188, 189, 191, 192, 193, 194,
195, 196, 197, 198, 200, 201, 205, 206, 209, 212,
216, 217, 221, 222, 226, 227, 230, 232, 235, 238,
239, 241, 244
};
#endif
#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
"$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM",
"DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL", "OR",
"XOR", "AND", "REGEXP", "LIKE", "IS", "IN", "'!'", "NOT", "BETWEEN",
"COMPARISON", "'|'", "'&'", "SHIFT", "'+'", "'-'", "'*'", "'/'", "'%'",
"MOD", "'^'", "UMINUS", "LOAD", "STREAM", "FILTER", "BY", "JOIN",
"STORE", "INTO", "GROUP", "FROM", "SELECT", "AS", "ORDER", "ASC", "DESC",
"COUNT", "USING", "SUM", "AVG", "MIN", "MAX", "LIMIT", "ON", "BINARY",
"';'", "'('", "')'", "'.'", "'{'", "'}'", "':'", "','", "$accept",
"stmt_list", "stmt", "select_stmt", "expr", "opt_group_list",
"expr_list", "load_list", "val_list", "opt_val_list", "opt_where",
"join_list", "opt_limit", 0
};
#endif
# ifdef YYPRINT
/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
token YYLEX-NUM. */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
33, 275, 276, 277, 124, 38, 278, 43, 45, 42,
47, 37, 279, 94, 280, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 59, 40,
41, 46, 123, 125, 58, 44
};
# endif
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
0, 66, 67, 67, 68, 69, 69, 69, 69, 69,
69, 69, 69, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 71, 71,
72, 72, 73, 73, 74, 74, 75, 75, 76, 77,
77, 78, 78
};
/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 2, 3, 1, 7, 12, 9, 5, 6,
7, 9, 6, 1, 3, 1, 1, 1, 1, 1,
1, 9, 6, 2, 2, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 2, 2, 3, 5, 3, 3, 4, 0, 3,
3, 5, 1, 3, 1, 3, 0, 1, 2, 4,
5, 0, 2
};
/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
STATE-NUM when YYTABLE doesn't specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
0, 0, 0, 0, 0, 4, 0, 0, 1, 0,
2, 0, 0, 0, 0, 0, 3, 0, 0, 13,
16, 17, 19, 20, 18, 15, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 61, 0, 0,
0, 8, 23, 24, 0, 0, 42, 41, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56, 0, 0, 0, 0, 0, 58, 14, 0, 0,
0, 0, 0, 0, 45, 37, 38, 39, 36, 46,
0, 0, 43, 40, 30, 31, 32, 33, 34, 35,
50, 48, 0, 54, 57, 9, 0, 62, 12, 0,
0, 0, 25, 26, 27, 28, 29, 47, 13, 0,
0, 0, 5, 10, 0, 0, 0, 0, 52, 0,
0, 44, 0, 0, 51, 55, 61, 0, 7, 0,
22, 0, 49, 11, 0, 53, 0, 59, 0, 0,
60, 6, 21
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
-1, 3, 4, 5, 103, 122, 35, 129, 104, 105,
41, 123, 73
};
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
#define YYPACT_NINF -122
static const yytype_int16 yypact[] =
{
14, -3, 7, 5, -34, -122, 50, 23, -122, 28,
-122, 52, 61, 62, 77, 85, -122, -35, 51, -45,
-122, -122, -122, -122, -122, -122, 62, 62, 33, 36,
44, 49, 58, 62, 300, -42, 71, -29, 59, 65,
62, -122, -122, -122, 115, 114, 2, 2, 62, 62,
62, 62, 62, 171, 62, 62, 62, 62, -2, 128,
62, 62, 62, 62, 62, 62, 62, 118, 119, 62,
62, 66, 121, 67, 126, 84, 364, -122, 81, 192,
214, 235, 257, 278, -122, 364, 383, 401, 142, -122,
122, 53, 408, 414, 69, 69, -122, -122, -122, -122,
-122, -32, 321, 127, -122, -122, 143, -122, -122, 87,
62, 88, -122, -122, -122, -122, -122, -122, 29, 91,
157, 124, -122, -122, 159, 62, 104, 130, 364, 15,
162, -122, 111, 62, -122, -122, 123, 117, -122, 62,
129, 62, -122, -122, 62, 364, 184, 342, 19, 131,
-122, -122, -122
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
-122, -122, 190, 105, -13, -122, -122, 64, -121, -122,
-122, 48, 73
};
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule which
number is the opposite. If zero, do what YYDEFACT says.
If YYTABLE_NINF, syntax error. */
#define YYTABLE_NINF -1
static const yytype_uint8 yytable[] =
{
34, 68, 42, 43, 135, 8, 89, 120, 6, 1,
121, 7, 142, 46, 47, 38, 44, 45, 1, 90,
53, 71, 39, 69, 10, 59, 72, 76, 60, 61,
62, 63, 64, 65, 66, 79, 80, 81, 82, 83,
6, 85, 86, 87, 88, 2, 92, 93, 94, 95,
96, 97, 98, 99, 2, 17, 102, 118, 20, 21,
22, 23, 24, 25, 15, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 138, 42, 43, 53, 151,
139, 36, 26, 27, 139, 11, 16, 12, 37, 40,
44, 45, 48, 2, 13, 49, 14, 128, 63, 64,
65, 66, 28, 50, 29, 30, 31, 32, 51, 70,
75, 28, 33, 29, 30, 31, 32, 52, 74, 77,
78, 33, 100, 101, 108, 106, 145, 107, 147, 109,
117, 128, 19, 20, 21, 22, 23, 24, 25, 54,
55, 56, 57, 110, 111, 58, 126, 127, 26, 27,
59, 131, 130, 60, 61, 62, 63, 64, 65, 66,
58, 132, 133, 134, 136, 59, 140, 141, 60, 61,
62, 63, 64, 65, 66, 137, 144, 28, 72, 29,
30, 31, 32, 54, 55, 56, 57, 91, 146, 58,
149, 152, 125, 9, 59, 150, 119, 60, 61, 62,
63, 64, 65, 66, 54, 55, 56, 57, 148, 143,
58, 0, 0, 0, 0, 59, 0, 0, 60, 61,
62, 63, 64, 65, 66, 0, 54, 55, 56, 57,
0, 84, 58, 0, 0, 0, 0, 59, 0, 0,
60, 61, 62, 63, 64, 65, 66, 54, 55, 56,
57, 0, 112, 58, 0, 0, 0, 0, 59, 0,
0, 60, 61, 62, 63, 64, 65, 66, 0, 54,
55, 56, 57, 0, 113, 58, 0, 0, 0, 0,
59, 0, 0, 60, 61, 62, 63, 64, 65, 66,
54, 55, 56, 57, 0, 114, 58, 0, 0, 0,
0, 59, 0, 0, 60, 61, 62, 63, 64, 65,
66, 0, 54, 55, 56, 57, 0, 115, 58, 0,
0, 0, 0, 59, 0, 0, 60, 61, 62, 63,
64, 65, 66, 54, 55, 56, 57, 0, 116, 58,
0, 0, 0, 0, 59, 67, 0, 60, 61, 62,
63, 64, 65, 66, 54, 55, 56, 57, 0, 0,
58, 0, 0, 0, 0, 59, 124, 0, 60, 61,
62, 63, 64, 65, 66, 0, 54, 55, 56, 57,
0, 120, 58, 0, 0, 0, 0, 59, 0, 0,
60, 61, 62, 63, 64, 65, 66, 56, 57, 0,
0, 58, 0, 0, 0, 0, 59, 0, 0, 60,
61, 62, 63, 64, 65, 66, 57, 0, 0, 58,
0, 0, 0, 0, 59, 0, 0, 60, 61, 62,
63, 64, 65, 66, 60, 61, 62, 63, 64, 65,
66, 61, 62, 63, 64, 65, 66
};
static const yytype_int16 yycheck[] =
{
13, 43, 47, 48, 125, 0, 8, 39, 11, 4,
42, 4, 133, 26, 27, 50, 61, 62, 4, 21,
33, 50, 57, 65, 58, 23, 55, 40, 26, 27,
28, 29, 30, 31, 32, 48, 49, 50, 51, 52,
11, 54, 55, 56, 57, 40, 59, 60, 61, 62,
63, 64, 65, 66, 40, 3, 69, 4, 5, 6,
7, 8, 9, 10, 41, 4, 4, 5, 6, 7,
8, 9, 10, 20, 21, 60, 47, 48, 91, 60,
65, 4, 20, 21, 65, 35, 58, 37, 3, 38,
61, 62, 59, 40, 44, 59, 46, 110, 29, 30,
31, 32, 49, 59, 51, 52, 53, 54, 59, 38,
45, 49, 59, 51, 52, 53, 54, 59, 59, 4,
6, 59, 4, 4, 57, 59, 139, 6, 141, 3,
8, 144, 4, 5, 6, 7, 8, 9, 10, 12,
13, 14, 15, 59, 63, 18, 3, 60, 20, 21,
23, 60, 64, 26, 27, 28, 29, 30, 31, 32,
18, 4, 38, 4, 60, 23, 4, 56, 26, 27,
28, 29, 30, 31, 32, 45, 59, 49, 55, 51,
52, 53, 54, 12, 13, 14, 15, 59, 59, 18,
6, 60, 65, 3, 23, 147, 91, 26, 27, 28,
29, 30, 31, 32, 12, 13, 14, 15, 144, 136,
18, -1, -1, -1, -1, 23, -1, -1, 26, 27,
28, 29, 30, 31, 32, -1, 12, 13, 14, 15,
-1, 60, 18, -1, -1, -1, -1, 23, -1, -1,
26, 27, 28, 29, 30, 31, 32, 12, 13, 14,
15, -1, 60, 18, -1, -1, -1, -1, 23, -1,
-1, 26, 27, 28, 29, 30, 31, 32, -1, 12,
13, 14, 15, -1, 60, 18, -1, -1, -1, -1,
23, -1, -1, 26, 27, 28, 29, 30, 31, 32,
12, 13, 14, 15, -1, 60, 18, -1, -1, -1,
-1, 23, -1, -1, 26, 27, 28, 29, 30, 31,
32, -1, 12, 13, 14, 15, -1, 60, 18, -1,
-1, -1, -1, 23, -1, -1, 26, 27, 28, 29,
30, 31, 32, 12, 13, 14, 15, -1, 60, 18,
-1, -1, -1, -1, 23, 45, -1, 26, 27, 28,
29, 30, 31, 32, 12, 13, 14, 15, -1, -1,
18, -1, -1, -1, -1, 23, 45, -1, 26, 27,
28, 29, 30, 31, 32, -1, 12, 13, 14, 15,
-1, 39, 18, -1, -1, -1, -1, 23, -1, -1,
26, 27, 28, 29, 30, 31, 32, 14, 15, -1,
-1, 18, -1, -1, -1, -1, 23, -1, -1, 26,
27, 28, 29, 30, 31, 32, 15, -1, -1, 18,
-1, -1, -1, -1, 23, -1, -1, 26, 27, 28,
29, 30, 31, 32, 26, 27, 28, 29, 30, 31,
32, 27, 28, 29, 30, 31, 32
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
0, 4, 40, 67, 68, 69, 11, 4, 0, 68,
58, 35, 37, 44, 46, 41, 58, 3, 4, 4,
5, 6, 7, 8, 9, 10, 20, 21, 49, 51,
52, 53, 54, 59, 70, 72, 4, 3, 50, 57,
38, 76, 47, 48, 61, 62, 70, 70, 59, 59,
59, 59, 59, 70, 12, 13, 14, 15, 18, 23,
26, 27, 28, 29, 30, 31, 32, 45, 43, 65,
38, 50, 55, 78, 59, 45, 70, 4, 6, 70,
70, 70, 70, 70, 60, 70, 70, 70, 70, 8,
21, 59, 70, 70, 70, 70, 70, 70, 70, 70,
4, 4, 70, 70, 74, 75, 59, 6, 57, 3,
59, 63, 60, 60, 60, 60, 60, 8, 4, 69,
39, 42, 71, 77, 45, 65, 3, 60, 70, 73,
64, 60, 4, 38, 4, 74, 60, 45, 60, 65,
4, 56, 74, 78, 59, 70, 59, 70, 73, 6,
77, 60, 60
};
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
#define YYEOF 0
#define YYACCEPT goto yyacceptlab
#define YYABORT goto yyabortlab
#define YYERROR goto yyerrorlab
/* Like YYERROR except do call yyerror. This remains here temporarily
to ease the transition to the new meaning of YYERROR, for GCC.
Once GCC version 2 has supplanted version 1, this can go. */
#define YYFAIL goto yyerrlab
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY && yylen == 1) \
{ \
yychar = (Token); \
yylval = (Value); \
yytoken = YYTRANSLATE (yychar); \
YYPOPSTACK (1); \
goto yybackup; \
} \
else \
{ \
yyerror (YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (YYID (0))
#define YYTERROR 1
#define YYERRCODE 256
/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
If N is 0, then set CURRENT to the empty location which ends
the previous symbol: RHS[0] (always defined). */
#define YYRHSLOC(Rhs, K) ((Rhs)[K])
#ifndef YYLLOC_DEFAULT
# define YYLLOC_DEFAULT(Current, Rhs, N) \
do \
if (YYID (N)) \
{ \
(Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
(Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
(Current).last_line = YYRHSLOC (Rhs, N).last_line; \
(Current).last_column = YYRHSLOC (Rhs, N).last_column; \
} \
else \
{ \
(Current).first_line = (Current).last_line = \
YYRHSLOC (Rhs, 0).last_line; \
(Current).first_column = (Current).last_column = \
YYRHSLOC (Rhs, 0).last_column; \
} \
while (YYID (0))
#endif
/* YY_LOCATION_PRINT -- Print the location on the stream.
This macro was not mandated originally: define only if we know
we won't break user code: when these are the locations we know. */
#ifndef YY_LOCATION_PRINT
# if YYLTYPE_IS_TRIVIAL
# define YY_LOCATION_PRINT(File, Loc) \
fprintf (File, "%d.%d-%d.%d", \
(Loc).first_line, (Loc).first_column, \
(Loc).last_line, (Loc).last_column)
# else
# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
# endif
#endif
/* YYLEX -- calling `yylex' with the right arguments. */
#ifdef YYLEX_PARAM
# define YYLEX yylex (YYLEX_PARAM)
#else
# define YYLEX yylex ()
#endif
/* Enable debugging if requested. */
#if YYDEBUG
# ifndef YYFPRINTF
# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
# define YYFPRINTF fprintf
# endif
# define YYDPRINTF(Args) \
do { \
if (yydebug) \
YYFPRINTF Args; \
} while (YYID (0))
# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
yy_symbol_print (stderr, \
Type, Value); \
YYFPRINTF (stderr, "\n"); \
} \
} while (YYID (0))
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_value_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (!yyvaluep)
return;
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
# else
YYUSE (yyoutput);
# endif
switch (yytype)
{
default:
break;
}
}
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (yytype < YYNTOKENS)
YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
else
YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
yy_symbol_value_print (yyoutput, yytype, yyvaluep);
YYFPRINTF (yyoutput, ")");
}
/*------------------------------------------------------------------.
| yy_stack_print -- Print the state stack from its BOTTOM up to its |
| TOP (included). |
`------------------------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
#else
static void
yy_stack_print (yybottom, yytop)
yytype_int16 *yybottom;
yytype_int16 *yytop;
#endif
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
{
int yybot = *yybottom;
YYFPRINTF (stderr, " %d", yybot);
}
YYFPRINTF (stderr, "\n");
}
# define YY_STACK_PRINT(Bottom, Top) \
do { \
if (yydebug) \
yy_stack_print ((Bottom), (Top)); \
} while (YYID (0))
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
#else
static void
yy_reduce_print (yyvsp, yyrule)
YYSTYPE *yyvsp;
int yyrule;
#endif
{
int yynrhs = yyr2[yyrule];
int yyi;
unsigned long int yylno = yyrline[yyrule];
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
&(yyvsp[(yyi + 1) - (yynrhs)])
);
YYFPRINTF (stderr, "\n");
}
}
# define YY_REDUCE_PRINT(Rule) \
do { \
if (yydebug) \
yy_reduce_print (yyvsp, Rule); \
} while (YYID (0))
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
int yydebug;
#else /* !YYDEBUG */
# define YYDPRINTF(Args)
# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
# define YY_STACK_PRINT(Bottom, Top)
# define YY_REDUCE_PRINT(Rule)
#endif /* !YYDEBUG */
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
# define YYMAXDEPTH 10000
#endif
#if YYERROR_VERBOSE
# ifndef yystrlen
# if defined __GLIBC__ && defined _STRING_H
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static YYSIZE_T
yystrlen (const char *yystr)
#else
static YYSIZE_T
yystrlen (yystr)
const char *yystr;
#endif
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
continue;
return yylen;
}
# endif
# endif
# ifndef yystpcpy
# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
# define yystpcpy stpcpy
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static char *
yystpcpy (char *yydest, const char *yysrc)
#else
static char *
yystpcpy (yydest, yysrc)
char *yydest;
const char *yysrc;
#endif
{
char *yyd = yydest;
const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
return yyd - 1;
}
# endif
# endif
# ifndef yytnamerr
/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
quotes and backslashes, so that it's suitable for yyerror. The
heuristic is that double-quoting is unnecessary unless the string
contains an apostrophe, a comma, or backslash (other than
backslash-backslash). YYSTR is taken from yytname. If YYRES is
null, do not copy; instead, return the length of what the result
would have been. */
static YYSIZE_T
yytnamerr (char *yyres, const char *yystr)
{
if (*yystr == '"')
{
YYSIZE_T yyn = 0;
char const *yyp = yystr;
for (;;)
switch (*++yyp)
{
case '\'':
case ',':
goto do_not_strip_quotes;
case '\\':
if (*++yyp != '\\')
goto do_not_strip_quotes;
/* Fall through. */
default:
if (yyres)
yyres[yyn] = *yyp;
yyn++;
break;
case '"':
if (yyres)
yyres[yyn] = '\0';
return yyn;
}
do_not_strip_quotes: ;
}
if (! yyres)
return yystrlen (yystr);
return yystpcpy (yyres, yystr) - yyres;
}
# endif
/* Copy into YYRESULT an error message about the unexpected token
YYCHAR while in state YYSTATE. Return the number of bytes copied,
including the terminating null byte. If YYRESULT is null, do not
copy anything; just return the number of bytes that would be
copied. As a special case, return 0 if an ordinary "syntax error"
message will do. Return YYSIZE_MAXIMUM if overflow occurs during
size calculation. */
static YYSIZE_T
yysyntax_error (char *yyresult, int yystate, int yychar)
{
int yyn = yypact[yystate];
if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
return 0;
else
{
int yytype = YYTRANSLATE (yychar);
YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
YYSIZE_T yysize = yysize0;
YYSIZE_T yysize1;
int yysize_overflow = 0;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
int yyx;
# if 0
/* This is so xgettext sees the translatable formats that are
constructed on the fly. */
YY_("syntax error, unexpected %s");
YY_("syntax error, unexpected %s, expecting %s");
YY_("syntax error, unexpected %s, expecting %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
# endif
char *yyfmt;
char const *yyf;
static char const yyunexpected[] = "syntax error, unexpected %s";
static char const yyexpecting[] = ", expecting %s";
static char const yyor[] = " or %s";
char yyformat[sizeof yyunexpected
+ sizeof yyexpecting - 1
+ ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
* (sizeof yyor - 1))];
char const *yyprefix = yyexpecting;
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. */
int yyxbegin = yyn < 0 ? -yyn : 0;
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn + 1;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
int yycount = 1;
yyarg[0] = yytname[yytype];
yyfmt = yystpcpy (yyformat, yyunexpected);
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
{
if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
yycount = 1;
yysize = yysize0;
yyformat[sizeof yyunexpected - 1] = '\0';
break;
}
yyarg[yycount++] = yytname[yyx];
yysize1 = yysize + yytnamerr (0, yytname[yyx]);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
yyfmt = yystpcpy (yyfmt, yyprefix);
yyprefix = yyor;
}
yyf = YY_(yyformat);
yysize1 = yysize + yystrlen (yyf);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
if (yysize_overflow)
return YYSIZE_MAXIMUM;
if (yyresult)
{
/* Avoid sprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
char *yyp = yyresult;
int yyi = 0;
while ((*yyp = *yyf) != '\0')
{
if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
{
yyp += yytnamerr (yyp, yyarg[yyi++]);
yyf += 2;
}
else
{
yyp++;
yyf++;
}
}
}
return yysize;
}
}
#endif /* YYERROR_VERBOSE */
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
#else
static void
yydestruct (yymsg, yytype, yyvaluep)
const char *yymsg;
int yytype;
YYSTYPE *yyvaluep;
#endif
{
YYUSE (yyvaluep);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
switch (yytype)
{
default:
break;
}
}
/* Prevent warnings from -Wmissing-prototypes. */
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int yyparse (void *YYPARSE_PARAM);
#else
int yyparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int yyparse (void);
#else
int yyparse ();
#endif
#endif /* ! YYPARSE_PARAM */
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
/* Number of syntax errors so far. */
int yynerrs;
/*-------------------------.
| yyparse or yypush_parse. |
`-------------------------*/
#ifdef YYPARSE_PARAM
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void *YYPARSE_PARAM)
#else
int
yyparse (YYPARSE_PARAM)
void *YYPARSE_PARAM;
#endif
#else /* ! YYPARSE_PARAM */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
#else
int
yyparse ()
#endif
#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
`yyss': related to states.
`yyvs': related to semantic values.
Refer to the stacks thru separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yytoken = 0;
yyss = yyssa;
yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* Initialize stack pointers.
Waste one element of value and location stack
so that they stay on the same level as the state stack.
The wasted elements are never initialized. */
yyssp = yyss;
yyvsp = yyvs;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yyn == YYPACT_NINF)
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = YYLEX;
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yyn == 0 || yyn == YYTABLE_NINF)
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
*++yyvsp = yylval;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
`$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 4:
/* Line 1455 of yacc.c */
#line 142 "bison.y"
{ emit("STMT"); ;}
break;
case 5:
/* Line 1455 of yacc.c */
#line 146 "bison.y"
{ emit_select((yyvsp[(1) - (7)].strval), (yyvsp[(6) - (7)].strval), (yyvsp[(7) - (7)].intval)); ;}
break;
case 6:
/* Line 1455 of yacc.c */
#line 148 "bison.y"
{ emit_load((yyvsp[(1) - (12)].strval), (yyvsp[(4) - (12)].strval), (yyvsp[(11) - (12)].intval), (yyvsp[(7) - (12)].strval)); ;}
break;
case 7:
/* Line 1455 of yacc.c */
#line 150 "bison.y"
{ emit_load_binary((yyvsp[(1) - (9)].strval), (yyvsp[(4) - (9)].strval), (yyvsp[(8) - (9)].intval)); ;}
break;
case 8:
/* Line 1455 of yacc.c */
#line 152 "bison.y"
{ emit_filter((yyvsp[(1) - (5)].strval), (yyvsp[(4) - (5)].strval), (yyvsp[(5) - (5)].intval));;}
break;
case 9:
/* Line 1455 of yacc.c */
#line 154 "bison.y"
{ emit_order((yyvsp[(1) - (6)].strval), (yyvsp[(4) - (6)].strval), (yyvsp[(6) - (6)].intval));;}
break;
case 10:
/* Line 1455 of yacc.c */
#line 156 "bison.y"
{ emit_join((yyvsp[(1) - (7)].strval),(yyvsp[(6) - (7)].strval)); ;}
break;
case 11:
/* Line 1455 of yacc.c */
#line 158 "bison.y"
{ emit_store((yyvsp[(2) - (9)].strval),(yyvsp[(4) - (9)].strval),(yyvsp[(7) - (9)].strval)); ;}
break;
case 12:
/* Line 1455 of yacc.c */
#line 160 "bison.y"
{ emit_store_binary((yyvsp[(2) - (6)].strval),(yyvsp[(4) - (6)].strval)); ;}
break;
case 13:
/* Line 1455 of yacc.c */
#line 164 "bison.y"
{ emit_name((yyvsp[(1) - (1)].strval)); ;}
break;
case 14:
/* Line 1455 of yacc.c */
#line 165 "bison.y"
{ emit("FIELDNAME %s.%s", (yyvsp[(1) - (3)].strval), (yyvsp[(3) - (3)].strval)); ;}
break;
case 15:
/* Line 1455 of yacc.c */
#line 166 "bison.y"
{ emit("USERVAR %s", (yyvsp[(1) - (1)].strval)); ;}
break;
case 16:
/* Line 1455 of yacc.c */
#line 167 "bison.y"
{ emit_string((yyvsp[(1) - (1)].strval)); ;}
break;
case 17:
/* Line 1455 of yacc.c */
#line 168 "bison.y"
{ emit_number((yyvsp[(1) - (1)].intval)); ;}
break;
case 18:
/* Line 1455 of yacc.c */
#line 169 "bison.y"
{ emit_float((yyvsp[(1) - (1)].floatval)); ;}
break;
case 19:
/* Line 1455 of yacc.c */
#line 170 "bison.y"
{ emit_decimal((yyvsp[(1) - (1)].intval)); ;}
break;
case 20:
/* Line 1455 of yacc.c */
#line 171 "bison.y"
{ emit("BOOL %d", (yyvsp[(1) - (1)].intval)); ;}
break;
case 21:
/* Line 1455 of yacc.c */
#line 172 "bison.y"
{ emit_varchar((yyvsp[(1) - (9)].strval), (yyvsp[(3) - (9)].intval), (yyvsp[(6) - (9)].strval), (yyvsp[(8) - (9)].intval));;}
break;
case 22:
/* Line 1455 of yacc.c */
#line 173 "bison.y"
{ emit_var((yyvsp[(1) - (6)].strval), (yyvsp[(3) - (6)].intval), (yyvsp[(6) - (6)].strval));;}
break;
case 23:
/* Line 1455 of yacc.c */
#line 174 "bison.y"
{ emit_var_asc((yyvsp[(1) - (2)].strval));;}
break;
case 24:
/* Line 1455 of yacc.c */
#line 175 "bison.y"
{ emit_var_desc((yyvsp[(1) - (2)].strval));;}
break;
case 25:
/* Line 1455 of yacc.c */
#line 176 "bison.y"
{ emit_count(); ;}
break;
case 26:
/* Line 1455 of yacc.c */
#line 177 "bison.y"
{ emit_sum(); ;}
break;
case 27:
/* Line 1455 of yacc.c */
#line 178 "bison.y"
{ emit_average(); ;}
break;
case 28:
/* Line 1455 of yacc.c */
#line 179 "bison.y"
{ emit_min(); ;}
break;
case 29:
/* Line 1455 of yacc.c */
#line 180 "bison.y"
{ emit_max(); ;}
break;
case 30:
/* Line 1455 of yacc.c */
#line 184 "bison.y"
{ emit_add(); ;}
break;
case 31:
/* Line 1455 of yacc.c */
#line 185 "bison.y"
{ emit_minus(); ;}
break;
case 32:
/* Line 1455 of yacc.c */
#line 186 "bison.y"
{ emit_mul(); ;}
break;
case 33:
/* Line 1455 of yacc.c */
#line 187 "bison.y"
{ emit_div(); ;}
break;
case 34:
/* Line 1455 of yacc.c */
#line 188 "bison.y"
{ emit("MOD"); ;}
break;
case 35:
/* Line 1455 of yacc.c */
#line 189 "bison.y"
{ emit("MOD"); ;}
break;
case 36:
/* Line 1455 of yacc.c */
#line 191 "bison.y"
{ emit_and(); ;}
break;
case 37:
/* Line 1455 of yacc.c */
#line 192 "bison.y"
{ emit_eq(); ;}
break;
case 38:
/* Line 1455 of yacc.c */
#line 193 "bison.y"
{ emit_or(); ;}
break;
case 39:
/* Line 1455 of yacc.c */
#line 194 "bison.y"
{ emit("XOR"); ;}
break;
case 40:
/* Line 1455 of yacc.c */
#line 195 "bison.y"
{ emit("SHIFT %s", (yyvsp[(2) - (3)].subtok)==1?"left":"right"); ;}
break;
case 41:
/* Line 1455 of yacc.c */
#line 196 "bison.y"
{ emit("NOT"); ;}
break;
case 42:
/* Line 1455 of yacc.c */
#line 197 "bison.y"
{ emit("NOT"); ;}
break;
case 43:
/* Line 1455 of yacc.c */
#line 198 "bison.y"
{ emit_cmp((yyvsp[(2) - (3)].subtok)); ;}
break;
case 44:
/* Line 1455 of yacc.c */
#line 200 "bison.y"
{ emit("CMPSELECT %d", (yyvsp[(2) - (5)].subtok)); ;}
break;
case 45:
/* Line 1455 of yacc.c */
#line 201 "bison.y"
{emit("EXPR");;}
break;
case 46:
/* Line 1455 of yacc.c */
#line 205 "bison.y"
{ emit("ISBOOL %d", (yyvsp[(3) - (3)].intval)); ;}
break;
case 47:
/* Line 1455 of yacc.c */
#line 206 "bison.y"
{ emit("ISBOOL %d", (yyvsp[(4) - (4)].intval)); emit("NOT"); ;}
break;
case 48:
/* Line 1455 of yacc.c */
#line 209 "bison.y"
{ /* nil */
(yyval.intval) = 0;
;}
break;
case 49:
/* Line 1455 of yacc.c */
#line 212 "bison.y"
{ (yyval.intval) = (yyvsp[(3) - (3)].intval);}
break;
case 50:
/* Line 1455 of yacc.c */
#line 216 "bison.y"
{ (yyval.intval) = 1; emit_sel_name((yyvsp[(3) - (3)].strval));;}
break;
case 51:
/* Line 1455 of yacc.c */
#line 217 "bison.y"
{ (yyval.intval) = (yyvsp[(1) - (5)].intval) + 1; emit_sel_name((yyvsp[(5) - (5)].strval));;}
break;
case 52:
/* Line 1455 of yacc.c */
#line 221 "bison.y"
{ (yyval.intval) = 1; ;}
break;
case 53:
/* Line 1455 of yacc.c */
#line 222 "bison.y"
{(yyval.intval) = (yyvsp[(1) - (3)].intval) + 1; ;}
break;
case 54:
/* Line 1455 of yacc.c */
#line 226 "bison.y"
{ (yyval.intval) = 1; ;}
break;
case 55:
/* Line 1455 of yacc.c */
#line 227 "bison.y"
{ (yyval.intval) = 1 + (yyvsp[(3) - (3)].intval); ;}
break;
case 56:
/* Line 1455 of yacc.c */
#line 230 "bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 58:
/* Line 1455 of yacc.c */
#line 235 "bison.y"
{ emit("FILTER BY"); ;}
break;
case 59:
/* Line 1455 of yacc.c */
#line 238 "bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (4)].strval));;}
break;
case 60:
/* Line 1455 of yacc.c */
#line 239 "bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (5)].strval)); ;}
break;
case 61:
/* Line 1455 of yacc.c */
#line 241 "bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 62:
/* Line 1455 of yacc.c */
#line 244 "bison.y"
{ emit_limit((yyvsp[(2) - (2)].intval)); ;}
break;
/* Line 1455 of yacc.c */
#line 2023 "bison.cu"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now `shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*------------------------------------.
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
{
YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
{
YYSIZE_T yyalloc = 2 * yysize;
if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
yyalloc = YYSTACK_ALLOC_MAXIMUM;
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yyalloc);
if (yymsg)
yymsg_alloc = yyalloc;
else
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
}
}
if (0 < yysize && yysize <= yymsg_alloc)
{
(void) yysyntax_error (yymsg, yystate, yychar);
yyerror (yymsg);
}
else
{
yyerror (YY_("syntax error"));
if (yysize != 0)
goto yyexhaustedlab;
}
}
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule which action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (yyn != YYPACT_NINF)
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
*++yyvsp = yylval;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined(yyoverflow) || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
/* Make sure YYID is used. */
return YYID (yyresult);
}
/* Line 1675 of yacc.c */
#line 247 "bison.y"
#include "filter.hip"
#include "select.cu"
#include "merge.cu"
#include "zone_map.cu"
FILE *file_pointer;
queue<string> namevars;
queue<string> typevars;
queue<int> sizevars;
queue<int> cols;
queue<string> op_type;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> j_col_count;
unsigned int sel_count = 0;
unsigned int join_cnt = 0;
int join_col_cnt = 0;
unsigned int eqq = 0;
stack<string> op_join;
unsigned int orig_recCount;
unsigned int statement_count = 0;
map<string,unsigned int> stat;
bool scan_state = 0;
string separator, f_file;
CUDPPHandle theCudpp;
using namespace thrust::placeholders;
void emit_name(char *name)
{
op_type.push("NAME");
op_value.push(name);
}
void emit_limit(int val)
{
op_nums.push(val);
}
void emit_string(char *str)
{ // remove the float_type quotes
string sss(str,1, strlen(str)-2);
op_type.push("STRING");
op_value.push(sss);
}
void emit_number(int_type val)
{
op_type.push("NUMBER");
op_nums.push(val);
}
void emit_float(float_type val)
{
op_type.push("FLOAT");
op_nums_f.push(val);
}
void emit_decimal(float_type val)
{
op_type.push("DECIMAL");
op_nums_f.push(val);
}
void emit_mul()
{
op_type.push("MUL");
}
void emit_add()
{
op_type.push("ADD");
}
void emit_div()
{
op_type.push("DIV");
}
void emit_and()
{
op_type.push("AND");
if (join_col_cnt == -1)
join_col_cnt++;
join_col_cnt++;
eqq = 0;
}
void emit_eq()
{
//op_type.push("JOIN");
eqq++;
join_cnt++;
if(eqq == join_col_cnt+1) {
j_col_count.push(join_col_cnt+1);
join_col_cnt = -1;
}
else if (join_col_cnt == -1 )
j_col_count.push(1);
}
void emit_or()
{
op_type.push("OR");
}
void emit_minus()
{
op_type.push("MINUS");
}
void emit_cmp(int val)
{
op_type.push("CMP");
op_nums.push(val);
}
void emit(char *s, ...)
{
}
void emit_var(char *s, int c, char *f)
{
namevars.push(s);
typevars.push(f);
sizevars.push(0);
cols.push(c);
}
void emit_var_asc(char *s)
{
op_type.push(s);
op_value.push("ASC");
}
void emit_var_desc(char *s)
{
op_type.push(s);
op_value.push("DESC");
}
void emit_varchar(char *s, int c, char *f, int d)
{
namevars.push(s);
typevars.push(f);
sizevars.push(d);
cols.push(c);
}
void emit_sel_name(char *s)
{
op_type.push("emit sel_name");
op_value.push(s);
sel_count++;
}
void emit_count()
{
op_type.push("COUNT");
}
void emit_sum()
{
op_type.push("SUM");
}
void emit_average()
{
op_type.push("AVG");
}
void emit_min()
{
op_type.push("MIN");
}
void emit_max()
{
op_type.push("MAX");
}
void emit_join_tab(char *s)
{
op_join.push(s);
};
thrust::device_ptr<unsigned int> order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, unsigned int segment)
{
unsigned int sz = a->mRecCount;
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(sz);
thrust::sequence(permutation, permutation+sz,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
// find the largest mRecSize of all data sources
unsigned int maxSize = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
CudaSet *t = varNames[setMap[*it]];
//cout << "MAX of " << setMap[*it] << " = " << t->mRecCount << endl;
if(t->mRecCount > maxSize)
maxSize = t->mRecCount;
};
//cout << "max size " << maxSize << endl;
//cout << "sort alloc " << maxSize << endl;
//cout << "order mem " << getFreeMem() << endl;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, maxSize*float_size));
for(int i=0; !exe_type.empty(); ++i, exe_type.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, sz, "ASC", (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, sz,"ASC", (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[colInd]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
update_permutation((c->d_columns)[j], raw_ptr, sz, "ASC", (char*)temp);
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
int i = a->columnNames[*it];
if ((a->type)[i] == 0)
apply_permutation(a->d_columns_int[a->type_index[i]], raw_ptr, sz, (int_type*)temp);
else if ((a->type)[i] == 1)
apply_permutation(a->d_columns_float[a->type_index[i]], raw_ptr, sz, (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[i]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
apply_permutation((c->d_columns)[j], raw_ptr, sz, (char*)temp);
};
};
hipFree(temp);
return permutation;
}
void emit_join(char *s, char *j1)
{
string j2 = op_join.top();
op_join.pop();
statement_count++;
if (scan_state == 0) {
if (stat.find(j1) == stat.end()) {
cout << "Join : couldn't find variable " << j1 << endl;
exit(1);
};
if (stat.find(j2) == stat.end()) {
cout << "Join : couldn't find variable " << j2 << endl;
exit(1);
};
stat[s] = statement_count;
stat[j1] = statement_count;
stat[j2] = statement_count;
return;
};
if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) {
clean_queues();
return;
};
CudaSet* left = varNames.find(j1)->second;
CudaSet* right = varNames.find(j2)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
string f1 = op_value.front();
op_value.pop();
string f2 = op_value.front();
op_value.pop();
cout << "JOIN " << s << " " << getFreeMem() << endl;;
std::clock_t start1 = std::clock();
CudaSet* c;
if (left->mRecCount == 0 || right->mRecCount == 0) {
c = new CudaSet(left,right,0, op_sel, op_sel_as);
if (left->fact_table == 1 || right->fact_table == 1)
c->fact_table = 1;
varNames[s] = c;
clean_queues();
return;
};
unsigned int colInd1 = (left->columnNames).find(f1)->second;
unsigned int colInd2 = (right->columnNames).find(f2)->second;
set<string> field_names;
stack<string> exe_type;
exe_type.push(f2);
field_names.insert(f2);
unsigned int *A, *B;
uint2* R;
unsigned int *devPtrA, *devPtrB;
uint2* *devPtrR;
size_t memsize;
unsigned int rcount = 0;
if(!left->prm.empty())
for(int i = 0; i < left->segCount; i ++) {
rcount = rcount + left->prm_count[setMap[f1]][i];
}
else
rcount = varNames[setMap[f1]]->mRecCount;
memsize = rcount * sizeof(unsigned int);
#ifdef _WIN64
A = (unsigned int*) VirtualAlloc(NULL, memsize, MEM_COMMIT, PAGE_READWRITE);
#else
A = (unsigned int*) valloc(memsize);
#endif
hipError_t err = hipHostRegister(A, memsize, hipHostRegisterMapped);
if (hipSuccess != err)
cout << hipGetErrorString( err ) << endl;
hipHostGetDevicePointer((void **) &devPtrA, (void *) A, 0);
if (hipSuccess != err)
cout << hipGetErrorString( err ) << endl;
thrust::device_ptr<unsigned int> ll((unsigned int*)devPtrA);
rcount = 0;
if(!right->prm.empty())
for(int i = 0; i < right->segCount; i ++) {
rcount = rcount + right->prm_count[setMap[f2]][i];
}
else
rcount = varNames[setMap[f2]]->mRecCount;
memsize = rcount * sizeof(unsigned int); // does not have to be aligned anymore
#ifdef _WIN64
B = (unsigned int*) VirtualAlloc(NULL, memsize, MEM_COMMIT, PAGE_READWRITE);
#else
B = (unsigned int*) valloc(memsize);
#endif
err = hipHostRegister(B, memsize, hipHostRegisterMapped);
if (hipSuccess != err)
cout << hipGetErrorString( err ) << endl;
hipHostGetDevicePointer((void **) &devPtrB, (void *) B, 0);
if (hipSuccess != err)
cout << hipGetErrorString( err ) << endl;
thrust::device_ptr<unsigned int> rr((unsigned int*)devPtrB);
cout << "A and B regged " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
queue<string> cc;
cc.push(f2);
unsigned int cnt_r = 0;
varNames[setMap[f2]]->oldRecCount = varNames[setMap[f2]]->mRecCount;
allocColumns(right, cc);
for(int i = 0; i < right->segCount; i++) {
// copy every segment to gpu then copy to host using host mapped memory
copyGatherJoin(right, rr, cc.front(), i, cnt_r);
};
//here we need to make sure that rr is ordered. If not then we order it and keep the permutation
bool sorted = thrust::is_sorted(rr,rr+cnt_r);
thrust::device_vector<unsigned int> v(cnt_r);
thrust::sequence(v.begin(),v.end(),0,1);
if(!sorted) {
thrust::sort_by_key(rr,rr+cnt_r, v.begin());
};
varNames[setMap[f2]]->mRecCount = varNames[setMap[f2]]->oldRecCount;
cc.pop();
cc.push(f1);
unsigned int cnt_l = 0;
varNames[setMap[f1]]->oldRecCount = varNames[setMap[f1]]->mRecCount;
allocColumns(left, cc);
for(int i = 0; i < left->segCount; i++) {
// copy every segment to gpu then copy to host using host mapped memory
copyGatherJoin(left, ll, cc.front(), i, cnt_l);
};
varNames[setMap[f1]]->mRecCount = varNames[setMap[f1]]->oldRecCount;
cout << "successfully loaded l && r " << cnt_l << " " << cnt_r << " " << getFreeMem() << endl;
thrust::device_vector<unsigned int> d_res1;
thrust::device_vector<unsigned int> d_res2;
std::clock_t start2 = std::clock();
memsize = cnt_l * sizeof(uint2);
#ifdef _WIN64
R = (uint2*) VirtualAlloc(NULL, memsize, MEM_COMMIT, PAGE_READWRITE);
#else
R = (uint2*) valloc(memsize);
#endif
err = hipHostRegister(R, memsize, hipHostRegisterMapped);
if (hipSuccess != err)
cout << hipGetErrorString( err ) << endl;
hipHostGetDevicePointer((void **) &devPtrR, (void *) R, 0);
if (hipSuccess != err)
cout << hipGetErrorString( err ) << endl;
thrust::device_ptr<uint2> res((uint2*)devPtrR);
std::cout<< "join reg time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
std::clock_t start3 = std::clock();
//thrust::device_vector<uint2> res(left->mRecCount);
if ((left->type)[colInd1] == 0 && (right->type)[colInd2] == 0) {
CUDPPHandle hash_table_handle;
//cout << "creating hash table " << cnt_r << endl;
CUDPPHashTableConfig config;
config.type = CUDPP_MULTIVALUE_HASH_TABLE;
//config.kInputSize = right->mRecCount;
config.kInputSize = 150000000;
config.space_usage = 1.1f;
cout << "creating table with " << right->mRecCount << endl;
cout << "MEM " << getFreeMem() << endl;
CUDPPResult result = cudppHashTable(theCudpp, &hash_table_handle, &config);
std::cout<< "table creation time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if (result == CUDPP_SUCCESS)
cout << "hash table created " << getFreeMem() << endl;
exit(0);
cout << "INSERT " << cnt_r << " " << right->mRecCount << " " << getFreeMem() << endl;
result = cudppHashInsert(hash_table_handle, thrust::raw_pointer_cast(rr),
thrust::raw_pointer_cast(v.data()), cnt_r);
std::cout<< "table insert time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if (result == CUDPP_SUCCESS)
cout << "hash table inserted " << getFreeMem() << endl;
for(int i = 0; i < 30;i++)
cudppHashRetrieve(hash_table_handle, thrust::raw_pointer_cast(ll),
thrust::raw_pointer_cast(res), cnt_l);
std::cout<< "table retrieve time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
cudppDestroyHashTable(theCudpp, hash_table_handle);
std::cout<< "table destroy time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if (result == CUDPP_SUCCESS)
cout << "hash table destroyed " << getFreeMem() << endl;
cout << "reducing " << cnt_l << endl;
std::cout<< "111 " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
uint2 rr = thrust::reduce(res, res+cnt_l, make_uint2(0,0), Uint2Sum());
std::cout<< "reduce time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if(rr.y) {
//thrust::device_vector<unsigned int> d_r(cnt_l);
thrust::device_ptr<unsigned int> d_r = thrust::device_malloc<unsigned int>(cnt_l);
std::cout<< "table malloc time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0);
uint2_split ff(thrust::raw_pointer_cast(res),thrust::raw_pointer_cast(d_r));
thrust::for_each(begin, begin + cnt_l, ff);
std::cout<< "table split time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
thrust::exclusive_scan(d_r, d_r+cnt_l, d_r ); // addresses
std::cout<< "table scan time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
d_res1.resize(rr.y);
d_res2.resize(rr.y);
std::cout<< "table resize time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
join_functor ff1(thrust::raw_pointer_cast(res),
thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(d_res1.data()),
thrust::raw_pointer_cast(d_res2.data()));
thrust::for_each(begin, begin + cnt_l, ff1);
std::cout<< "table foreach time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
thrust::device_free(d_r);
};
}
hipHostUnregister(A);
hipHostUnregister(B);
hipHostUnregister(R);
std::cout<< "unregged time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
#ifdef _WIN64
VirtualFree(A, 0, MEM_RELEASE);
VirtualFree(B, 0, MEM_RELEASE);
VirtualFree(R, 0, MEM_RELEASE);
#else
free(A);
free(B);
free(R);
#endif
std::cout<< "table unreg time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
c = new CudaSet(right,left,d_res1.size(),op_sel, op_sel_as);
bool left_check;
thrust::device_vector<unsigned int> p(d_res1.size());
thrust::device_vector<unsigned int> res_tmp(left->mRecCount);
std::cout<< "bad malloc time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
//gather prm of left and right vectors
while(!op_sel.empty()) {
if(c->prm.count(setMap[op_sel.front()]) == 0) {
CudaSet *t = varNames[setMap[op_sel.front()]];
CudaSet *lr;
if(left->columnNames.find(op_sel.front()) != left->columnNames.end()) {
lr = left;
left_check = 1;
}
else {
lr = right;
left_check = 0;
};
c->prm[setMap[op_sel.front()]].push_back(new unsigned int[d_res1.size()]);
c->prm_count[setMap[op_sel.front()]].push_back(d_res1.size());
if(lr->prm.size() != 0) {
// join prm segments, add seg_num*maxRecs to values and gather the result
unsigned int curr_count = 0;
for(unsigned int i = 0; i < lr->prm[setMap[op_sel.front()]].size(); i++) {
//lr->prm_d = lr->prm[setMap[op_sel.front()]][i];
if(lr->prm_d.size() == 0) // find the largest prm segment
lr->prm_d.resize(largest_prm(lr, op_sel.front()));
unsigned int g_size = lr->prm_count[setMap[op_sel.front()]][i];
hipMemcpy((void**)(thrust::raw_pointer_cast(lr->prm_d.data())), (void**)lr->prm[setMap[op_sel.front()]][i], 4*g_size, hipMemcpyHostToDevice);
thrust::transform(lr->prm_d.begin(), lr->prm_d.begin() + g_size,
res_tmp.begin() + curr_count, _1+(i*t->maxRecs));
curr_count = curr_count + lr->prm_count[setMap[op_sel.front()]][i];
};
if(left_check)
thrust::gather(d_res1.begin(), d_res1.end(), res_tmp.begin(), p.begin());
else
thrust::gather(d_res2.begin(), d_res2.end(), res_tmp.begin(), p.begin());
hipMemcpy((void**)c->prm[setMap[op_sel.front()]][0], (void**)(thrust::raw_pointer_cast(p.data())), 4*d_res1.size(), hipMemcpyDeviceToHost);
}
else { // copy d_res2 into prm[setMap[op_sel.front()]]
if(left_check)
thrust::copy(d_res1.begin(), d_res1.end(), p.begin());
else
thrust::copy(d_res2.begin(), d_res2.end(), p.begin());
hipMemcpy((void**)c->prm[setMap[op_sel.front()]][0], (void**)(thrust::raw_pointer_cast(p.data())), 4*d_res1.size(), hipMemcpyDeviceToHost);
}
};
op_sel.pop();
};
cout << "join final end " << d_res1.size() << " " << getFreeMem() << endl;
std::cout<< "table unreg time1 " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
//right->d_res1
//left->d_res2
// modify left's and right's perm tables using D_res1 and d_res2
left->deAllocOnDevice();
right->deAllocOnDevice();
for (map<string, std::vector<unsigned int*> >::iterator it=left->prm.begin() ; it != left->prm.end(); ++it ) {
varNames[(*it).first]->deAllocOnDevice();
};
for (map<string, std::vector<unsigned int*> >::iterator it=right->prm.begin() ; it != right->prm.end(); ++it ) {
varNames[(*it).first]->deAllocOnDevice();
};
varNames[s] = c;
c->maxRecs = c->mRecCount;
c->segCount = 1;
clean_queues();
if(stat[s] == statement_count) {
c->free();
varNames.erase(s);
};
if(stat[j1] == statement_count) {
left->free();
varNames.erase(j1);
};
if(stat[j2] == statement_count && (strcmp(j1,j2.c_str()) != 0)) {
right->free();
varNames.erase(j2);
};
std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_order(char *s, char *f, int e, int ll)
{
if(ll == 0)
statement_count++;
if (scan_state == 0 && ll == 0) {
if (stat.find(f) == stat.end()) {
cout << "Order : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end() ) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
if (a->mRecCount == 0) {
if(varNames.find(s) == varNames.end())
varNames[s] = new CudaSet(0,1);
else {
CudaSet* c = varNames.find(s)->second;
c->mRecCount = 0;
};
return;
};
stack<string> exe_type, exe_value;
cout << "order: " << s << " " << f << endl;;
for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) {
if ((op_type.front()).compare("NAME") == 0) {
exe_type.push(op_value.front());
exe_value.push("ASC");
}
else {
exe_type.push(op_type.front());
exe_value.push(op_value.front());
};
};
// initialize permutation to [0, 1, 2, ... ,N-1]
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::sequence(permutation, permutation+(a->mRecCount));
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
CudaSet *b = a->copyStruct(a->mRecCount);
b->isJoined = a->isJoined;
// find the largest mRecSize of all data sources
unsigned int maxSize = 0;
stack<string> tp(exe_type);
queue<string> op_vx;
while (!tp.empty()) {
op_vx.push(tp.top());
CudaSet *t = varNames[setMap[tp.top()]];
if(t->mRecCount > maxSize)
maxSize = t->mRecCount;
tp.pop();
};
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, maxSize*float_size));
varNames[setMap[exe_type.top()]]->oldRecCount = varNames[setMap[exe_type.top()]]->mRecCount;
allocColumns(a, op_vx);
copyColumns(a, op_vx, 0);
varNames[setMap[exe_type.top()]]->mRecCount = varNames[setMap[exe_type.top()]]->oldRecCount;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, a->mRecCount, exe_value.top(), (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[colInd]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
update_permutation((c->d_columns)[j], raw_ptr, a->mRecCount, exe_value.top(), (char*)temp);
};
};
// gather a's prm to b's prm
if(a->prm.size() != 0) {
thrust::device_vector<unsigned int> p(a->mRecCount);
thrust::device_vector<unsigned int> p_a(a->mRecCount);
for ( map<string, std::vector<unsigned int*> >::iterator it=a->prm.begin() ; it != a->prm.end(); ++it ) {
b->prm[(*it).first].push_back(new unsigned int[a->mRecCount]);
b->prm_count[(*it).first].push_back(a->mRecCount);
//p_a = (*it).second[0];
hipMemcpy((void**)(thrust::raw_pointer_cast(p_a.data())), (void**)(*it).second[0], 4*a->mRecCount, hipMemcpyHostToDevice);
thrust::gather(permutation, permutation+a->mRecCount, p_a.begin(), p.begin());
//b->prm[(*it).first][0] = p;
hipMemcpy((void**)b->prm[(*it).first][0], (void**)(thrust::raw_pointer_cast(p.data())), 4*a->mRecCount, hipMemcpyDeviceToHost);
};
}
else {
thrust::device_vector<unsigned int> p(a->mRecCount);
b->prm[a->name].push_back(new unsigned int[a->mRecCount]);
b->prm_count[a->name].push_back(a->mRecCount);
thrust::copy(permutation, permutation+a->mRecCount, p.begin());
//b->prm[a->name][0] = p;
hipMemcpy((void**)b->prm[a->name][0], (void**)(thrust::raw_pointer_cast(p.data())), 4*a->mRecCount, hipMemcpyDeviceToHost);
};
b->deAllocOnDevice();
a->deAllocOnDevice();
thrust::device_free(permutation);
hipFree(temp);
varNames[s] = b;
b->segCount = 1;
if (a->fact_table == 1)
b->fact_table = 1;
else
b->fact_table = 0;
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
}
void emit_select(char *s, char *f, int ll)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Select : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
queue<string> op_v1(op_value);
while(op_v1.size() > ll)
op_v1.pop();
stack<string> op_v2;
queue<string> op_v3;
for(int i=0; i < ll; ++i) {
op_v2.push(op_v1.front());
op_v3.push(op_v1.front());
op_v1.pop();
};
CudaSet *a;
a = varNames.find(f)->second;
if(a->mRecCount == 0) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
clean_queues();
return;
};
cout << "SELECT " << s << " " << f << endl;
std::clock_t start1 = std::clock();
// here we need to determine the column count and composition
queue<string> op_v(op_value);
queue<string> op_vx;
set<string> field_names;
map<string,string> aliases;
string tt;
for(int i=0; !op_v.empty(); ++i, op_v.pop()) {
if(a->columnNames.find(op_v.front()) != a->columnNames.end()) {
field_names.insert(op_v.front());
if(aliases.count(op_v.front()) == 0 && aliases.size() < ll) {
tt = op_v.front();
op_v.pop();
aliases[tt] = op_v.front();
};
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
op_vx.push(*it);
};
// find out how many columns a new set will have
queue<string> op_t(op_type);
int_type col_count = 0;
for(int i=0; !op_t.empty(); ++i, op_t.pop())
if((op_t.front()).compare("emit sel_name") == 0)
col_count++;
CudaSet* b, *c;
allocColumns(a, op_vx);
unsigned int cycle_count = 1;
curr_segment = 100;
if(a->prm.size() <= 1)
cycle_count = varNames[setMap[op_value.front()]]->segCount;
unsigned int ol_count = a->mRecCount;
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
//bck = a;
for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE
cout << "cycle " << i << " select mem " << getFreeMem() << endl;
std::clock_t start2 = std::clock();
if(i == 0)
b = new CudaSet(0, col_count);
copyColumns(a, op_vx, i);
std::cout<< "cpy time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
CudaSet *t = varNames[setMap[op_vx.front()]];
if (ll != 0) {
thrust::device_ptr<unsigned int> perm = order_inplace(a,op_v2,field_names,i);
std::cout<< "order time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
thrust::device_free(perm);
a->GroupBy(op_v3);
std::cout<< "grp time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
};
select(op_type,op_value,op_nums, op_nums_f,a,b, a->mRecCount);
std::cout<< "select time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if(i == 1) {
for ( map<string,int>::iterator it=b->columnNames.begin() ; it != b->columnNames.end(); ++it )
setMap[(*it).first] = s;
};
if (ll != 0) {
if (i == 0) {
c = new CudaSet(b->mRecCount, col_count);
c->fact_table = 1;
c->segCount = 1;
}
else {
c->resize(b->mRecCount);
};
add(c,b,op_v3);
};
};
a->mRecCount = ol_count;
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
if(stat[f] == statement_count) {
a->deAllocOnDevice();
};
if (ll != 0) {
CudaSet *r = merge(c,op_v3, op_v2, aliases);
c->free();
c = r;
};
c->maxRecs = c->mRecCount;
c->name = s;
c->keep = 1;
for ( map<string,int>::iterator it=c->columnNames.begin() ; it != c->columnNames.end(); ++it ) {
setMap[(*it).first] = s;
};
cout << "final select " << c->mRecCount << endl;
clean_queues();
if (ll != 0) {
varNames[s] = c;
b->free();
}
else
varNames[s] = b;
varNames[s]->keep = 1;
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_filter(char *s, char *f, int e)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Filter : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
// save the filter parameters for possible later zone map filtering
top_type[f] = op_type;
top_value[f] = op_value;
top_nums[f] = op_nums;
top_nums_f[f] = op_nums_f;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
std::clock_t start1 = std::clock();
if(a->mRecCount == 0) {
b = new CudaSet(0,1);
}
else {
cout << "FILTER " << s << " " << f << endl;
std::clock_t start1 = std::clock();
b = a->copyDeviceStruct();
b->name = s;
b->isJoined = a->isJoined;
// if prm.size() <= 1 then process segment by segment
// else copy entire column to gpu
unsigned int cycle_count = 1;
allocColumns(a, op_value);
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
if(!a->isJoined)
cycle_count = varNames[setMap[op_value.front()]]->segCount;
thrust::device_vector<unsigned int> p(a->maxRecs);
curr_segment = 100;
for(unsigned int i = 0; i < cycle_count; i++) {
copyColumns(a, op_value, i);
filter(op_type,op_value,op_nums, op_nums_f,a, b, i, p);
};
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
cout << "filter is finished " << b->mRecCount << " " << getFreeMem() << endl;
//dealloc sources
for (map<string, std::vector<unsigned int*> >::iterator it=a->prm.begin() ; it != a->prm.end(); ++it ) {
varNames[(*it).first]->deAllocOnDevice();
};
a->deAllocOnDevice();
};
clean_queues();
if (varNames.count(s) > 0)
varNames[s]->free();
varNames[s] = b;
if(stat[s] == statement_count) {
b->free();
varNames.erase(s);
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
std::cout<< "filter time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void emit_store(char *s, char *f, char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
cout << "STORE: " << s << " " << f << " " << sep << endl;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Store(f,sep, limit, 0);
if(stat[s] == statement_count && a->keep == 0) {
a->free();
varNames.erase(s);
};
};
void emit_store_binary(char *s, char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(stat[f] == statement_count)
a->deAllocOnDevice();
printf("STORE: %s %s \n", s, f);
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
fact_file_loaded = 0;
while(!fact_file_loaded) {
cout << "LOADING " << f_file << " " << separator << endl;
fact_file_loaded = a->LoadBigFile(f_file.c_str(), separator.c_str());
cout << "STORING " << f << " " << limit << endl;
a->Store(f,"", limit, 1);
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(s);
};
};
void emit_load_binary(char *s, char *f, int d)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("BINARY LOAD: %s %s \n", s, f);
std::clock_t start1 = std::clock();
CudaSet *a;
unsigned int segCount, maxRecs;
char f1[100];
strcpy(f1, f);
strcat(f1,".");
char col_pos[3];
itoaa(cols.front(),col_pos);
strcat(f1,col_pos);
FILE* ff = fopen(f1, "rb");
fseeko(ff, -16, SEEK_END);
fread((char *)&totalRecs, 8, 1, ff);
fread((char *)&segCount, 4, 1, ff);
fread((char *)&maxRecs, 4, 1, ff);
fclose(ff);
queue<string> names(namevars);
while(!names.empty()) {
setMap[names.front()] = s;
names.pop();
};
a = new CudaSet(namevars, typevars, sizevars, cols,totalRecs, f);
a->segCount = segCount;
a->maxRecs = maxRecs;
a->keep = 1;
varNames[s] = a;
if(stat[s] == statement_count ) {
a->free();
varNames.erase(s);
};
std::cout<< "load time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_load(char *s, char *f, int d, char* sep)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("LOAD: %s %s %d %s \n", s, f, d, sep);
CudaSet *a;
a = new CudaSet(namevars, typevars, sizevars, cols, process_count);
a->mRecCount = 0;
a->resize(process_count);
a->keep = true;
a->fact_table = 1;
//a->LoadBigFile(f, sep);
string separator1(sep);
separator = separator1;
string ff(f);
f_file = ff;
a->maxRecs = a->mRecCount;
a->segCount = 0;
varNames[s] = a;
if(stat[s] == statement_count) {
a->free();
varNames.erase(s);
};
}
void yyerror(char *s, ...)
{
extern int yylineno;
va_list ap;
va_start(ap, s);
fprintf(stderr, "%d: error: ", yylineno);
vfprintf(stderr, s, ap);
fprintf(stderr, "\n");
}
void clean_queues()
{
while(!op_type.empty()) op_type.pop();
while(!op_value.empty()) op_value.pop();
while(!op_join.empty()) op_join.pop();
while(!op_nums.empty()) op_nums.pop();
while(!op_nums_f.empty()) op_nums_f.pop();
while(!j_col_count.empty()) j_col_count.pop();
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
sel_count = 0;
join_cnt = 0;
join_col_cnt = -1;
eqq = 0;
}
int main(int ac, char **av)
{
extern FILE *yyin;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if (!deviceProp.canMapHostMemory)
cout << "Device 0 cannot map host memory" << endl;
hipSetDeviceFlags(hipDeviceMapHost);
cudppCreate(&theCudpp);
if (ac == 1) {
cout << "Usage : alenka -l process_count script.sql" << endl;
exit(1);
};
if(strcmp(av[1],"-l") == 0) {
process_count = atoff(av[2]);
cout << "Process count = " << process_count << endl;
}
else {
process_count = 6200000;
cout << "Process count = 6200000 " << endl;
};
if((yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[ac-1]);
exit(1);
};
if(yyparse()) {
printf("SQL scan parse failed\n");
exit(1);
};
fclose(yyin);
scan_state = 1;
std::clock_t start1 = std::clock();
statement_count = 0;
clean_queues();
if(ac > 1 && (yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[1]);
exit(1);
}
PROC_FLUSH_BUF ( yyin );
statement_count = 0;
if(!yyparse())
cout << "SQL scan parse worked" << endl;
else
cout << "SQL scan parse failed" << endl;
fclose(yyin);
std::cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
cudppDestroy(theCudpp);
}
| 58b36ea91dfdf5c16e1403568b3dd062ca61963b.cu |
/* A Bison parser, made by GNU Bison 2.4.1. */
/* Skeleton implementation for Bison's Yacc-like parsers in C
Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* C LALR(1) parser skeleton written by Richard Stallman, by
simplifying the original so-called "semantic" parser. */
/* All symbols defined below should begin with yy or YY, to avoid
infringing on user name space. This should be done even for local
variables, as they might otherwise be expanded by user macros.
There are some unavoidable exceptions within include files to
define necessary library symbols; they are noted "INFRINGES ON
USER NAME SPACE" below. */
/* Identify Bison output. */
#define YYBISON 1
/* Bison version. */
#define YYBISON_VERSION "2.4.1"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
/* Pure parsers. */
#define YYPURE 0
/* Push parsers. */
#define YYPUSH 0
/* Pull parsers. */
#define YYPULL 1
/* Using locations. */
#define YYLSP_NEEDED 0
/* Copy the first part of user declarations. */
/* Line 189 of yacc.c */
#line 17 "bison.y"
#include "lex.yy.c"
#include "cm.cu"
void clean_queues();
void order_inplace(CudaSet* a, stack<string> exe_type);
void yyerror(char *s, ...);
void emit(char *s, ...);
void emit_mul();
void emit_add();
void emit_minus();
void emit_div();
void emit_and();
void emit_eq();
void emit_or();
void emit_cmp(int val);
void emit_var(char *s, int c, char *f);
void emit_var_asc(char *s);
void emit_var_desc(char *s);
void emit_name(char *name);
void emit_count();
void emit_sum();
void emit_average();
void emit_min();
void emit_max();
void emit_string(char *str);
void emit_number(int_type val);
void emit_float(float_type val);
void emit_decimal(float_type val);
void emit_sel_name(char* name);
void emit_limit(int val);
void emit_union(char *s, char *f1, char *f2);
void emit_varchar(char *s, int c, char *f, int d);
void emit_load(char *s, char *f, int d, char* sep);
void emit_load_binary(char *s, char *f, int d);
void emit_store(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f);
void emit_filter(char *s, char *f, int e);
void emit_order(char *s, char *f, int e, int ll = 0);
void emit_group(char *s, char *f, int e);
void emit_select(char *s, char *f, int ll);
void emit_join(char *s, char *j1);
void emit_join_tab(char *s);
void emit_distinct(char *s, char *f);
/* Line 189 of yacc.c */
#line 124 "bison.cu"
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
# define YYERROR_VERBOSE 1
#else
# define YYERROR_VERBOSE 0
#endif
/* Enabling the token table. */
#ifndef YYTOKEN_TABLE
# define YYTOKEN_TABLE 0
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
FILENAME = 258,
NAME = 259,
STRING = 260,
INTNUM = 261,
DECIMAL1 = 262,
BOOL1 = 263,
APPROXNUM = 264,
USERVAR = 265,
ASSIGN = 266,
EQUAL = 267,
OR = 268,
XOR = 269,
AND = 270,
REGEXP = 271,
LIKE = 272,
IS = 273,
IN = 274,
NOT = 275,
BETWEEN = 276,
COMPARISON = 277,
SHIFT = 278,
MOD = 279,
UMINUS = 280,
LOAD = 281,
STREAM = 282,
FILTER = 283,
BY = 284,
JOIN = 285,
STORE = 286,
INTO = 287,
GROUP = 288,
FROM = 289,
SELECT = 290,
AS = 291,
ORDER = 292,
ASC = 293,
DESC = 294,
COUNT = 295,
USING = 296,
SUM = 297,
AVG = 298,
MIN = 299,
MAX = 300,
LIMIT = 301,
ON = 302,
BINARY = 303
};
#endif
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 214 of yacc.c */
#line 67 "bison.y"
int intval;
float floatval;
char *strval;
int subtok;
/* Line 214 of yacc.c */
#line 217 "bison.cu"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
/* Copy the second part of user declarations. */
/* Line 264 of yacc.c */
#line 229 "bison.cu"
#ifdef short
# undef short
#endif
#ifdef YYTYPE_UINT8
typedef YYTYPE_UINT8 yytype_uint8;
#else
typedef unsigned char yytype_uint8;
#endif
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
#elif (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
typedef signed char yytype_int8;
#else
typedef short int yytype_int8;
#endif
#ifdef YYTYPE_UINT16
typedef YYTYPE_UINT16 yytype_uint16;
#else
typedef unsigned short int yytype_uint16;
#endif
#ifdef YYTYPE_INT16
typedef YYTYPE_INT16 yytype_int16;
#else
typedef short int yytype_int16;
#endif
#ifndef YYSIZE_T
# ifdef __SIZE_TYPE__
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
# define YYSIZE_T unsigned int
# endif
#endif
#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
#ifndef YY_
# if YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
# define YY_(msgid) dgettext ("bison-runtime", msgid)
# endif
# endif
# ifndef YY_
# define YY_(msgid) msgid
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
# define YYUSE(e) ((void) (e))
#else
# define YYUSE(e) /* empty */
#endif
/* Identity function, used to suppress warnings about constant conditions. */
#ifndef lint
# define YYID(n) (n)
#else
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static int
YYID (int yyi)
#else
static int
YYID (yyi)
int yyi;
#endif
{
return yyi;
}
#endif
#if ! defined yyoverflow || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
# ifdef YYSTACK_USE_ALLOCA
# if YYSTACK_USE_ALLOCA
# ifdef __GNUC__
# define YYSTACK_ALLOC __builtin_alloca
# elif defined __BUILTIN_VA_ARG_INCR
# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
# elif defined _AIX
# define YYSTACK_ALLOC __alloca
# elif defined _MSC_VER
# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# endif
# endif
# endif
# ifdef YYSTACK_ALLOC
/* Pacify GCC's `empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
to allow for a few compiler-allocated temporary stack slots. */
# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
# endif
# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
# if (defined __cplusplus && ! defined _STDLIB_H \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# endif
#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
|| (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
{
yytype_int16 yyss_alloc;
YYSTYPE yyvs_alloc;
};
/* The size of the maximum gap between one aligned stack and the next. */
# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
/* The size of an array large to enough to hold all stacks, each with
N elements. */
# define YYSTACK_BYTES(N) \
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ YYSTACK_GAP_MAXIMUM)
/* Copy COUNT objects from FROM to TO. The source and destination do
not overlap. */
# ifndef YYCOPY
# if defined __GNUC__ && 1 < __GNUC__
# define YYCOPY(To, From, Count) \
__builtin_memcpy (To, From, (Count) * sizeof (*(From)))
# else
# define YYCOPY(To, From, Count) \
do \
{ \
YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(To)[yyi] = (From)[yyi]; \
} \
while (YYID (0))
# endif
# endif
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
do \
{ \
YYSIZE_T yynewbytes; \
YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
Stack = &yyptr->Stack_alloc; \
yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
yyptr += yynewbytes / sizeof (*yyptr); \
} \
while (YYID (0))
#endif
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 8
/* YYLAST -- Last index in YYTABLE. */
#define YYLAST 446
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 66
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 13
/* YYNRULES -- Number of rules. */
#define YYNRULES 62
/* YYNRULES -- Number of states. */
#define YYNSTATES 153
/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
#define YYUNDEFTOK 2
#define YYMAXUTOK 303
#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 20, 2, 2, 2, 31, 25, 2,
59, 60, 29, 27, 65, 28, 61, 30, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 64, 58,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 33, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 62, 24, 63, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 21, 22, 23, 26, 32,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57
};
#if YYDEBUG
/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
YYRHS. */
static const yytype_uint16 yyprhs[] =
{
0, 0, 3, 6, 10, 12, 20, 33, 43, 49,
56, 64, 74, 81, 83, 87, 89, 91, 93, 95,
97, 99, 109, 116, 119, 122, 127, 132, 137, 142,
147, 151, 155, 159, 163, 167, 171, 175, 179, 183,
187, 191, 194, 197, 201, 207, 211, 215, 220, 221,
225, 229, 235, 237, 241, 243, 247, 248, 250, 253,
258, 264, 265
};
/* YYRHS -- A `-1'-separated list of the rules' RHS. */
static const yytype_int8 yyrhs[] =
{
67, 0, -1, 68, 58, -1, 67, 68, 58, -1,
69, -1, 4, 11, 44, 72, 43, 4, 71, -1,
4, 11, 35, 3, 50, 59, 3, 60, 45, 59,
73, 60, -1, 4, 11, 35, 3, 57, 45, 59,
73, 60, -1, 4, 11, 37, 4, 76, -1, 4,
11, 46, 4, 38, 75, -1, 4, 11, 44, 72,
43, 4, 77, -1, 40, 4, 41, 3, 50, 59,
3, 60, 78, -1, 40, 4, 41, 3, 78, 57,
-1, 4, -1, 4, 61, 4, -1, 10, -1, 5,
-1, 6, -1, 9, -1, 7, -1, 8, -1, 4,
62, 6, 63, 64, 4, 59, 6, 60, -1, 4,
62, 6, 63, 64, 4, -1, 4, 47, -1, 4,
48, -1, 49, 59, 70, 60, -1, 51, 59, 70,
60, -1, 52, 59, 70, 60, -1, 53, 59, 70,
60, -1, 54, 59, 70, 60, -1, 70, 27, 70,
-1, 70, 28, 70, -1, 70, 29, 70, -1, 70,
30, 70, -1, 70, 31, 70, -1, 70, 32, 70,
-1, 70, 15, 70, -1, 70, 12, 70, -1, 70,
13, 70, -1, 70, 14, 70, -1, 70, 26, 70,
-1, 21, 70, -1, 20, 70, -1, 70, 23, 70,
-1, 70, 23, 59, 69, 60, -1, 59, 70, 60,
-1, 70, 18, 8, -1, 70, 18, 21, 8, -1,
-1, 42, 38, 74, -1, 70, 45, 4, -1, 72,
65, 70, 45, 4, -1, 70, -1, 73, 65, 70,
-1, 70, -1, 70, 65, 74, -1, -1, 74, -1,
38, 70, -1, 39, 4, 56, 70, -1, 39, 4,
56, 70, 77, -1, -1, 55, 6, -1
};
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint8 yyrline[] =
{
0, 137, 137, 138, 142, 145, 147, 149, 151, 153,
155, 157, 159, 164, 165, 166, 167, 168, 169, 170,
171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
184, 185, 186, 187, 188, 189, 191, 192, 193, 194,
195, 196, 197, 198, 200, 201, 205, 206, 209, 212,
216, 217, 221, 222, 226, 227, 230, 232, 235, 238,
239, 241, 244
};
#endif
#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
"$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM",
"DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL", "OR",
"XOR", "AND", "REGEXP", "LIKE", "IS", "IN", "'!'", "NOT", "BETWEEN",
"COMPARISON", "'|'", "'&'", "SHIFT", "'+'", "'-'", "'*'", "'/'", "'%'",
"MOD", "'^'", "UMINUS", "LOAD", "STREAM", "FILTER", "BY", "JOIN",
"STORE", "INTO", "GROUP", "FROM", "SELECT", "AS", "ORDER", "ASC", "DESC",
"COUNT", "USING", "SUM", "AVG", "MIN", "MAX", "LIMIT", "ON", "BINARY",
"';'", "'('", "')'", "'.'", "'{'", "'}'", "':'", "','", "$accept",
"stmt_list", "stmt", "select_stmt", "expr", "opt_group_list",
"expr_list", "load_list", "val_list", "opt_val_list", "opt_where",
"join_list", "opt_limit", 0
};
#endif
# ifdef YYPRINT
/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
token YYLEX-NUM. */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
33, 275, 276, 277, 124, 38, 278, 43, 45, 42,
47, 37, 279, 94, 280, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 59, 40,
41, 46, 123, 125, 58, 44
};
# endif
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
0, 66, 67, 67, 68, 69, 69, 69, 69, 69,
69, 69, 69, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 71, 71,
72, 72, 73, 73, 74, 74, 75, 75, 76, 77,
77, 78, 78
};
/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 2, 3, 1, 7, 12, 9, 5, 6,
7, 9, 6, 1, 3, 1, 1, 1, 1, 1,
1, 9, 6, 2, 2, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 2, 2, 3, 5, 3, 3, 4, 0, 3,
3, 5, 1, 3, 1, 3, 0, 1, 2, 4,
5, 0, 2
};
/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
STATE-NUM when YYTABLE doesn't specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
0, 0, 0, 0, 0, 4, 0, 0, 1, 0,
2, 0, 0, 0, 0, 0, 3, 0, 0, 13,
16, 17, 19, 20, 18, 15, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 61, 0, 0,
0, 8, 23, 24, 0, 0, 42, 41, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56, 0, 0, 0, 0, 0, 58, 14, 0, 0,
0, 0, 0, 0, 45, 37, 38, 39, 36, 46,
0, 0, 43, 40, 30, 31, 32, 33, 34, 35,
50, 48, 0, 54, 57, 9, 0, 62, 12, 0,
0, 0, 25, 26, 27, 28, 29, 47, 13, 0,
0, 0, 5, 10, 0, 0, 0, 0, 52, 0,
0, 44, 0, 0, 51, 55, 61, 0, 7, 0,
22, 0, 49, 11, 0, 53, 0, 59, 0, 0,
60, 6, 21
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
-1, 3, 4, 5, 103, 122, 35, 129, 104, 105,
41, 123, 73
};
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
#define YYPACT_NINF -122
static const yytype_int16 yypact[] =
{
14, -3, 7, 5, -34, -122, 50, 23, -122, 28,
-122, 52, 61, 62, 77, 85, -122, -35, 51, -45,
-122, -122, -122, -122, -122, -122, 62, 62, 33, 36,
44, 49, 58, 62, 300, -42, 71, -29, 59, 65,
62, -122, -122, -122, 115, 114, 2, 2, 62, 62,
62, 62, 62, 171, 62, 62, 62, 62, -2, 128,
62, 62, 62, 62, 62, 62, 62, 118, 119, 62,
62, 66, 121, 67, 126, 84, 364, -122, 81, 192,
214, 235, 257, 278, -122, 364, 383, 401, 142, -122,
122, 53, 408, 414, 69, 69, -122, -122, -122, -122,
-122, -32, 321, 127, -122, -122, 143, -122, -122, 87,
62, 88, -122, -122, -122, -122, -122, -122, 29, 91,
157, 124, -122, -122, 159, 62, 104, 130, 364, 15,
162, -122, 111, 62, -122, -122, 123, 117, -122, 62,
129, 62, -122, -122, 62, 364, 184, 342, 19, 131,
-122, -122, -122
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
-122, -122, 190, 105, -13, -122, -122, 64, -121, -122,
-122, 48, 73
};
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule which
number is the opposite. If zero, do what YYDEFACT says.
If YYTABLE_NINF, syntax error. */
#define YYTABLE_NINF -1
static const yytype_uint8 yytable[] =
{
34, 68, 42, 43, 135, 8, 89, 120, 6, 1,
121, 7, 142, 46, 47, 38, 44, 45, 1, 90,
53, 71, 39, 69, 10, 59, 72, 76, 60, 61,
62, 63, 64, 65, 66, 79, 80, 81, 82, 83,
6, 85, 86, 87, 88, 2, 92, 93, 94, 95,
96, 97, 98, 99, 2, 17, 102, 118, 20, 21,
22, 23, 24, 25, 15, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 138, 42, 43, 53, 151,
139, 36, 26, 27, 139, 11, 16, 12, 37, 40,
44, 45, 48, 2, 13, 49, 14, 128, 63, 64,
65, 66, 28, 50, 29, 30, 31, 32, 51, 70,
75, 28, 33, 29, 30, 31, 32, 52, 74, 77,
78, 33, 100, 101, 108, 106, 145, 107, 147, 109,
117, 128, 19, 20, 21, 22, 23, 24, 25, 54,
55, 56, 57, 110, 111, 58, 126, 127, 26, 27,
59, 131, 130, 60, 61, 62, 63, 64, 65, 66,
58, 132, 133, 134, 136, 59, 140, 141, 60, 61,
62, 63, 64, 65, 66, 137, 144, 28, 72, 29,
30, 31, 32, 54, 55, 56, 57, 91, 146, 58,
149, 152, 125, 9, 59, 150, 119, 60, 61, 62,
63, 64, 65, 66, 54, 55, 56, 57, 148, 143,
58, 0, 0, 0, 0, 59, 0, 0, 60, 61,
62, 63, 64, 65, 66, 0, 54, 55, 56, 57,
0, 84, 58, 0, 0, 0, 0, 59, 0, 0,
60, 61, 62, 63, 64, 65, 66, 54, 55, 56,
57, 0, 112, 58, 0, 0, 0, 0, 59, 0,
0, 60, 61, 62, 63, 64, 65, 66, 0, 54,
55, 56, 57, 0, 113, 58, 0, 0, 0, 0,
59, 0, 0, 60, 61, 62, 63, 64, 65, 66,
54, 55, 56, 57, 0, 114, 58, 0, 0, 0,
0, 59, 0, 0, 60, 61, 62, 63, 64, 65,
66, 0, 54, 55, 56, 57, 0, 115, 58, 0,
0, 0, 0, 59, 0, 0, 60, 61, 62, 63,
64, 65, 66, 54, 55, 56, 57, 0, 116, 58,
0, 0, 0, 0, 59, 67, 0, 60, 61, 62,
63, 64, 65, 66, 54, 55, 56, 57, 0, 0,
58, 0, 0, 0, 0, 59, 124, 0, 60, 61,
62, 63, 64, 65, 66, 0, 54, 55, 56, 57,
0, 120, 58, 0, 0, 0, 0, 59, 0, 0,
60, 61, 62, 63, 64, 65, 66, 56, 57, 0,
0, 58, 0, 0, 0, 0, 59, 0, 0, 60,
61, 62, 63, 64, 65, 66, 57, 0, 0, 58,
0, 0, 0, 0, 59, 0, 0, 60, 61, 62,
63, 64, 65, 66, 60, 61, 62, 63, 64, 65,
66, 61, 62, 63, 64, 65, 66
};
static const yytype_int16 yycheck[] =
{
13, 43, 47, 48, 125, 0, 8, 39, 11, 4,
42, 4, 133, 26, 27, 50, 61, 62, 4, 21,
33, 50, 57, 65, 58, 23, 55, 40, 26, 27,
28, 29, 30, 31, 32, 48, 49, 50, 51, 52,
11, 54, 55, 56, 57, 40, 59, 60, 61, 62,
63, 64, 65, 66, 40, 3, 69, 4, 5, 6,
7, 8, 9, 10, 41, 4, 4, 5, 6, 7,
8, 9, 10, 20, 21, 60, 47, 48, 91, 60,
65, 4, 20, 21, 65, 35, 58, 37, 3, 38,
61, 62, 59, 40, 44, 59, 46, 110, 29, 30,
31, 32, 49, 59, 51, 52, 53, 54, 59, 38,
45, 49, 59, 51, 52, 53, 54, 59, 59, 4,
6, 59, 4, 4, 57, 59, 139, 6, 141, 3,
8, 144, 4, 5, 6, 7, 8, 9, 10, 12,
13, 14, 15, 59, 63, 18, 3, 60, 20, 21,
23, 60, 64, 26, 27, 28, 29, 30, 31, 32,
18, 4, 38, 4, 60, 23, 4, 56, 26, 27,
28, 29, 30, 31, 32, 45, 59, 49, 55, 51,
52, 53, 54, 12, 13, 14, 15, 59, 59, 18,
6, 60, 65, 3, 23, 147, 91, 26, 27, 28,
29, 30, 31, 32, 12, 13, 14, 15, 144, 136,
18, -1, -1, -1, -1, 23, -1, -1, 26, 27,
28, 29, 30, 31, 32, -1, 12, 13, 14, 15,
-1, 60, 18, -1, -1, -1, -1, 23, -1, -1,
26, 27, 28, 29, 30, 31, 32, 12, 13, 14,
15, -1, 60, 18, -1, -1, -1, -1, 23, -1,
-1, 26, 27, 28, 29, 30, 31, 32, -1, 12,
13, 14, 15, -1, 60, 18, -1, -1, -1, -1,
23, -1, -1, 26, 27, 28, 29, 30, 31, 32,
12, 13, 14, 15, -1, 60, 18, -1, -1, -1,
-1, 23, -1, -1, 26, 27, 28, 29, 30, 31,
32, -1, 12, 13, 14, 15, -1, 60, 18, -1,
-1, -1, -1, 23, -1, -1, 26, 27, 28, 29,
30, 31, 32, 12, 13, 14, 15, -1, 60, 18,
-1, -1, -1, -1, 23, 45, -1, 26, 27, 28,
29, 30, 31, 32, 12, 13, 14, 15, -1, -1,
18, -1, -1, -1, -1, 23, 45, -1, 26, 27,
28, 29, 30, 31, 32, -1, 12, 13, 14, 15,
-1, 39, 18, -1, -1, -1, -1, 23, -1, -1,
26, 27, 28, 29, 30, 31, 32, 14, 15, -1,
-1, 18, -1, -1, -1, -1, 23, -1, -1, 26,
27, 28, 29, 30, 31, 32, 15, -1, -1, 18,
-1, -1, -1, -1, 23, -1, -1, 26, 27, 28,
29, 30, 31, 32, 26, 27, 28, 29, 30, 31,
32, 27, 28, 29, 30, 31, 32
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
0, 4, 40, 67, 68, 69, 11, 4, 0, 68,
58, 35, 37, 44, 46, 41, 58, 3, 4, 4,
5, 6, 7, 8, 9, 10, 20, 21, 49, 51,
52, 53, 54, 59, 70, 72, 4, 3, 50, 57,
38, 76, 47, 48, 61, 62, 70, 70, 59, 59,
59, 59, 59, 70, 12, 13, 14, 15, 18, 23,
26, 27, 28, 29, 30, 31, 32, 45, 43, 65,
38, 50, 55, 78, 59, 45, 70, 4, 6, 70,
70, 70, 70, 70, 60, 70, 70, 70, 70, 8,
21, 59, 70, 70, 70, 70, 70, 70, 70, 70,
4, 4, 70, 70, 74, 75, 59, 6, 57, 3,
59, 63, 60, 60, 60, 60, 60, 8, 4, 69,
39, 42, 71, 77, 45, 65, 3, 60, 70, 73,
64, 60, 4, 38, 4, 74, 60, 45, 60, 65,
4, 56, 74, 78, 59, 70, 59, 70, 73, 6,
77, 60, 60
};
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
#define YYEOF 0
#define YYACCEPT goto yyacceptlab
#define YYABORT goto yyabortlab
#define YYERROR goto yyerrorlab
/* Like YYERROR except do call yyerror. This remains here temporarily
to ease the transition to the new meaning of YYERROR, for GCC.
Once GCC version 2 has supplanted version 1, this can go. */
#define YYFAIL goto yyerrlab
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY && yylen == 1) \
{ \
yychar = (Token); \
yylval = (Value); \
yytoken = YYTRANSLATE (yychar); \
YYPOPSTACK (1); \
goto yybackup; \
} \
else \
{ \
yyerror (YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (YYID (0))
#define YYTERROR 1
#define YYERRCODE 256
/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
If N is 0, then set CURRENT to the empty location which ends
the previous symbol: RHS[0] (always defined). */
#define YYRHSLOC(Rhs, K) ((Rhs)[K])
#ifndef YYLLOC_DEFAULT
# define YYLLOC_DEFAULT(Current, Rhs, N) \
do \
if (YYID (N)) \
{ \
(Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
(Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
(Current).last_line = YYRHSLOC (Rhs, N).last_line; \
(Current).last_column = YYRHSLOC (Rhs, N).last_column; \
} \
else \
{ \
(Current).first_line = (Current).last_line = \
YYRHSLOC (Rhs, 0).last_line; \
(Current).first_column = (Current).last_column = \
YYRHSLOC (Rhs, 0).last_column; \
} \
while (YYID (0))
#endif
/* YY_LOCATION_PRINT -- Print the location on the stream.
This macro was not mandated originally: define only if we know
we won't break user code: when these are the locations we know. */
#ifndef YY_LOCATION_PRINT
# if YYLTYPE_IS_TRIVIAL
# define YY_LOCATION_PRINT(File, Loc) \
fprintf (File, "%d.%d-%d.%d", \
(Loc).first_line, (Loc).first_column, \
(Loc).last_line, (Loc).last_column)
# else
# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
# endif
#endif
/* YYLEX -- calling `yylex' with the right arguments. */
#ifdef YYLEX_PARAM
# define YYLEX yylex (YYLEX_PARAM)
#else
# define YYLEX yylex ()
#endif
/* Enable debugging if requested. */
#if YYDEBUG
# ifndef YYFPRINTF
# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
# define YYFPRINTF fprintf
# endif
# define YYDPRINTF(Args) \
do { \
if (yydebug) \
YYFPRINTF Args; \
} while (YYID (0))
# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
yy_symbol_print (stderr, \
Type, Value); \
YYFPRINTF (stderr, "\n"); \
} \
} while (YYID (0))
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_value_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (!yyvaluep)
return;
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
# else
YYUSE (yyoutput);
# endif
switch (yytype)
{
default:
break;
}
}
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (yytype < YYNTOKENS)
YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
else
YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
yy_symbol_value_print (yyoutput, yytype, yyvaluep);
YYFPRINTF (yyoutput, ")");
}
/*------------------------------------------------------------------.
| yy_stack_print -- Print the state stack from its BOTTOM up to its |
| TOP (included). |
`------------------------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
#else
static void
yy_stack_print (yybottom, yytop)
yytype_int16 *yybottom;
yytype_int16 *yytop;
#endif
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
{
int yybot = *yybottom;
YYFPRINTF (stderr, " %d", yybot);
}
YYFPRINTF (stderr, "\n");
}
# define YY_STACK_PRINT(Bottom, Top) \
do { \
if (yydebug) \
yy_stack_print ((Bottom), (Top)); \
} while (YYID (0))
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
#else
static void
yy_reduce_print (yyvsp, yyrule)
YYSTYPE *yyvsp;
int yyrule;
#endif
{
int yynrhs = yyr2[yyrule];
int yyi;
unsigned long int yylno = yyrline[yyrule];
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
&(yyvsp[(yyi + 1) - (yynrhs)])
);
YYFPRINTF (stderr, "\n");
}
}
# define YY_REDUCE_PRINT(Rule) \
do { \
if (yydebug) \
yy_reduce_print (yyvsp, Rule); \
} while (YYID (0))
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
int yydebug;
#else /* !YYDEBUG */
# define YYDPRINTF(Args)
# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
# define YY_STACK_PRINT(Bottom, Top)
# define YY_REDUCE_PRINT(Rule)
#endif /* !YYDEBUG */
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
# define YYMAXDEPTH 10000
#endif
#if YYERROR_VERBOSE
# ifndef yystrlen
# if defined __GLIBC__ && defined _STRING_H
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static YYSIZE_T
yystrlen (const char *yystr)
#else
static YYSIZE_T
yystrlen (yystr)
const char *yystr;
#endif
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
continue;
return yylen;
}
# endif
# endif
# ifndef yystpcpy
# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
# define yystpcpy stpcpy
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static char *
yystpcpy (char *yydest, const char *yysrc)
#else
static char *
yystpcpy (yydest, yysrc)
char *yydest;
const char *yysrc;
#endif
{
char *yyd = yydest;
const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
return yyd - 1;
}
# endif
# endif
# ifndef yytnamerr
/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
quotes and backslashes, so that it's suitable for yyerror. The
heuristic is that double-quoting is unnecessary unless the string
contains an apostrophe, a comma, or backslash (other than
backslash-backslash). YYSTR is taken from yytname. If YYRES is
null, do not copy; instead, return the length of what the result
would have been. */
static YYSIZE_T
yytnamerr (char *yyres, const char *yystr)
{
if (*yystr == '"')
{
YYSIZE_T yyn = 0;
char const *yyp = yystr;
for (;;)
switch (*++yyp)
{
case '\'':
case ',':
goto do_not_strip_quotes;
case '\\':
if (*++yyp != '\\')
goto do_not_strip_quotes;
/* Fall through. */
default:
if (yyres)
yyres[yyn] = *yyp;
yyn++;
break;
case '"':
if (yyres)
yyres[yyn] = '\0';
return yyn;
}
do_not_strip_quotes: ;
}
if (! yyres)
return yystrlen (yystr);
return yystpcpy (yyres, yystr) - yyres;
}
# endif
/* Copy into YYRESULT an error message about the unexpected token
YYCHAR while in state YYSTATE. Return the number of bytes copied,
including the terminating null byte. If YYRESULT is null, do not
copy anything; just return the number of bytes that would be
copied. As a special case, return 0 if an ordinary "syntax error"
message will do. Return YYSIZE_MAXIMUM if overflow occurs during
size calculation. */
static YYSIZE_T
yysyntax_error (char *yyresult, int yystate, int yychar)
{
int yyn = yypact[yystate];
if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
return 0;
else
{
int yytype = YYTRANSLATE (yychar);
YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
YYSIZE_T yysize = yysize0;
YYSIZE_T yysize1;
int yysize_overflow = 0;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
int yyx;
# if 0
/* This is so xgettext sees the translatable formats that are
constructed on the fly. */
YY_("syntax error, unexpected %s");
YY_("syntax error, unexpected %s, expecting %s");
YY_("syntax error, unexpected %s, expecting %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
# endif
char *yyfmt;
char const *yyf;
static char const yyunexpected[] = "syntax error, unexpected %s";
static char const yyexpecting[] = ", expecting %s";
static char const yyor[] = " or %s";
char yyformat[sizeof yyunexpected
+ sizeof yyexpecting - 1
+ ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
* (sizeof yyor - 1))];
char const *yyprefix = yyexpecting;
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. */
int yyxbegin = yyn < 0 ? -yyn : 0;
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn + 1;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
int yycount = 1;
yyarg[0] = yytname[yytype];
yyfmt = yystpcpy (yyformat, yyunexpected);
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
{
if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
yycount = 1;
yysize = yysize0;
yyformat[sizeof yyunexpected - 1] = '\0';
break;
}
yyarg[yycount++] = yytname[yyx];
yysize1 = yysize + yytnamerr (0, yytname[yyx]);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
yyfmt = yystpcpy (yyfmt, yyprefix);
yyprefix = yyor;
}
yyf = YY_(yyformat);
yysize1 = yysize + yystrlen (yyf);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
if (yysize_overflow)
return YYSIZE_MAXIMUM;
if (yyresult)
{
/* Avoid sprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
char *yyp = yyresult;
int yyi = 0;
while ((*yyp = *yyf) != '\0')
{
if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
{
yyp += yytnamerr (yyp, yyarg[yyi++]);
yyf += 2;
}
else
{
yyp++;
yyf++;
}
}
}
return yysize;
}
}
#endif /* YYERROR_VERBOSE */
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
#else
static void
yydestruct (yymsg, yytype, yyvaluep)
const char *yymsg;
int yytype;
YYSTYPE *yyvaluep;
#endif
{
YYUSE (yyvaluep);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
switch (yytype)
{
default:
break;
}
}
/* Prevent warnings from -Wmissing-prototypes. */
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int yyparse (void *YYPARSE_PARAM);
#else
int yyparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int yyparse (void);
#else
int yyparse ();
#endif
#endif /* ! YYPARSE_PARAM */
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
/* Number of syntax errors so far. */
int yynerrs;
/*-------------------------.
| yyparse or yypush_parse. |
`-------------------------*/
#ifdef YYPARSE_PARAM
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void *YYPARSE_PARAM)
#else
int
yyparse (YYPARSE_PARAM)
void *YYPARSE_PARAM;
#endif
#else /* ! YYPARSE_PARAM */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
#else
int
yyparse ()
#endif
#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
`yyss': related to states.
`yyvs': related to semantic values.
Refer to the stacks thru separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yytoken = 0;
yyss = yyssa;
yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* Initialize stack pointers.
Waste one element of value and location stack
so that they stay on the same level as the state stack.
The wasted elements are never initialized. */
yyssp = yyss;
yyvsp = yyvs;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yyn == YYPACT_NINF)
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = YYLEX;
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yyn == 0 || yyn == YYTABLE_NINF)
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
*++yyvsp = yylval;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
`$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 4:
/* Line 1455 of yacc.c */
#line 142 "bison.y"
{ emit("STMT"); ;}
break;
case 5:
/* Line 1455 of yacc.c */
#line 146 "bison.y"
{ emit_select((yyvsp[(1) - (7)].strval), (yyvsp[(6) - (7)].strval), (yyvsp[(7) - (7)].intval)); ;}
break;
case 6:
/* Line 1455 of yacc.c */
#line 148 "bison.y"
{ emit_load((yyvsp[(1) - (12)].strval), (yyvsp[(4) - (12)].strval), (yyvsp[(11) - (12)].intval), (yyvsp[(7) - (12)].strval)); ;}
break;
case 7:
/* Line 1455 of yacc.c */
#line 150 "bison.y"
{ emit_load_binary((yyvsp[(1) - (9)].strval), (yyvsp[(4) - (9)].strval), (yyvsp[(8) - (9)].intval)); ;}
break;
case 8:
/* Line 1455 of yacc.c */
#line 152 "bison.y"
{ emit_filter((yyvsp[(1) - (5)].strval), (yyvsp[(4) - (5)].strval), (yyvsp[(5) - (5)].intval));;}
break;
case 9:
/* Line 1455 of yacc.c */
#line 154 "bison.y"
{ emit_order((yyvsp[(1) - (6)].strval), (yyvsp[(4) - (6)].strval), (yyvsp[(6) - (6)].intval));;}
break;
case 10:
/* Line 1455 of yacc.c */
#line 156 "bison.y"
{ emit_join((yyvsp[(1) - (7)].strval),(yyvsp[(6) - (7)].strval)); ;}
break;
case 11:
/* Line 1455 of yacc.c */
#line 158 "bison.y"
{ emit_store((yyvsp[(2) - (9)].strval),(yyvsp[(4) - (9)].strval),(yyvsp[(7) - (9)].strval)); ;}
break;
case 12:
/* Line 1455 of yacc.c */
#line 160 "bison.y"
{ emit_store_binary((yyvsp[(2) - (6)].strval),(yyvsp[(4) - (6)].strval)); ;}
break;
case 13:
/* Line 1455 of yacc.c */
#line 164 "bison.y"
{ emit_name((yyvsp[(1) - (1)].strval)); ;}
break;
case 14:
/* Line 1455 of yacc.c */
#line 165 "bison.y"
{ emit("FIELDNAME %s.%s", (yyvsp[(1) - (3)].strval), (yyvsp[(3) - (3)].strval)); ;}
break;
case 15:
/* Line 1455 of yacc.c */
#line 166 "bison.y"
{ emit("USERVAR %s", (yyvsp[(1) - (1)].strval)); ;}
break;
case 16:
/* Line 1455 of yacc.c */
#line 167 "bison.y"
{ emit_string((yyvsp[(1) - (1)].strval)); ;}
break;
case 17:
/* Line 1455 of yacc.c */
#line 168 "bison.y"
{ emit_number((yyvsp[(1) - (1)].intval)); ;}
break;
case 18:
/* Line 1455 of yacc.c */
#line 169 "bison.y"
{ emit_float((yyvsp[(1) - (1)].floatval)); ;}
break;
case 19:
/* Line 1455 of yacc.c */
#line 170 "bison.y"
{ emit_decimal((yyvsp[(1) - (1)].intval)); ;}
break;
case 20:
/* Line 1455 of yacc.c */
#line 171 "bison.y"
{ emit("BOOL %d", (yyvsp[(1) - (1)].intval)); ;}
break;
case 21:
/* Line 1455 of yacc.c */
#line 172 "bison.y"
{ emit_varchar((yyvsp[(1) - (9)].strval), (yyvsp[(3) - (9)].intval), (yyvsp[(6) - (9)].strval), (yyvsp[(8) - (9)].intval));;}
break;
case 22:
/* Line 1455 of yacc.c */
#line 173 "bison.y"
{ emit_var((yyvsp[(1) - (6)].strval), (yyvsp[(3) - (6)].intval), (yyvsp[(6) - (6)].strval));;}
break;
case 23:
/* Line 1455 of yacc.c */
#line 174 "bison.y"
{ emit_var_asc((yyvsp[(1) - (2)].strval));;}
break;
case 24:
/* Line 1455 of yacc.c */
#line 175 "bison.y"
{ emit_var_desc((yyvsp[(1) - (2)].strval));;}
break;
case 25:
/* Line 1455 of yacc.c */
#line 176 "bison.y"
{ emit_count(); ;}
break;
case 26:
/* Line 1455 of yacc.c */
#line 177 "bison.y"
{ emit_sum(); ;}
break;
case 27:
/* Line 1455 of yacc.c */
#line 178 "bison.y"
{ emit_average(); ;}
break;
case 28:
/* Line 1455 of yacc.c */
#line 179 "bison.y"
{ emit_min(); ;}
break;
case 29:
/* Line 1455 of yacc.c */
#line 180 "bison.y"
{ emit_max(); ;}
break;
case 30:
/* Line 1455 of yacc.c */
#line 184 "bison.y"
{ emit_add(); ;}
break;
case 31:
/* Line 1455 of yacc.c */
#line 185 "bison.y"
{ emit_minus(); ;}
break;
case 32:
/* Line 1455 of yacc.c */
#line 186 "bison.y"
{ emit_mul(); ;}
break;
case 33:
/* Line 1455 of yacc.c */
#line 187 "bison.y"
{ emit_div(); ;}
break;
case 34:
/* Line 1455 of yacc.c */
#line 188 "bison.y"
{ emit("MOD"); ;}
break;
case 35:
/* Line 1455 of yacc.c */
#line 189 "bison.y"
{ emit("MOD"); ;}
break;
case 36:
/* Line 1455 of yacc.c */
#line 191 "bison.y"
{ emit_and(); ;}
break;
case 37:
/* Line 1455 of yacc.c */
#line 192 "bison.y"
{ emit_eq(); ;}
break;
case 38:
/* Line 1455 of yacc.c */
#line 193 "bison.y"
{ emit_or(); ;}
break;
case 39:
/* Line 1455 of yacc.c */
#line 194 "bison.y"
{ emit("XOR"); ;}
break;
case 40:
/* Line 1455 of yacc.c */
#line 195 "bison.y"
{ emit("SHIFT %s", (yyvsp[(2) - (3)].subtok)==1?"left":"right"); ;}
break;
case 41:
/* Line 1455 of yacc.c */
#line 196 "bison.y"
{ emit("NOT"); ;}
break;
case 42:
/* Line 1455 of yacc.c */
#line 197 "bison.y"
{ emit("NOT"); ;}
break;
case 43:
/* Line 1455 of yacc.c */
#line 198 "bison.y"
{ emit_cmp((yyvsp[(2) - (3)].subtok)); ;}
break;
case 44:
/* Line 1455 of yacc.c */
#line 200 "bison.y"
{ emit("CMPSELECT %d", (yyvsp[(2) - (5)].subtok)); ;}
break;
case 45:
/* Line 1455 of yacc.c */
#line 201 "bison.y"
{emit("EXPR");;}
break;
case 46:
/* Line 1455 of yacc.c */
#line 205 "bison.y"
{ emit("ISBOOL %d", (yyvsp[(3) - (3)].intval)); ;}
break;
case 47:
/* Line 1455 of yacc.c */
#line 206 "bison.y"
{ emit("ISBOOL %d", (yyvsp[(4) - (4)].intval)); emit("NOT"); ;}
break;
case 48:
/* Line 1455 of yacc.c */
#line 209 "bison.y"
{ /* nil */
(yyval.intval) = 0;
;}
break;
case 49:
/* Line 1455 of yacc.c */
#line 212 "bison.y"
{ (yyval.intval) = (yyvsp[(3) - (3)].intval);}
break;
case 50:
/* Line 1455 of yacc.c */
#line 216 "bison.y"
{ (yyval.intval) = 1; emit_sel_name((yyvsp[(3) - (3)].strval));;}
break;
case 51:
/* Line 1455 of yacc.c */
#line 217 "bison.y"
{ (yyval.intval) = (yyvsp[(1) - (5)].intval) + 1; emit_sel_name((yyvsp[(5) - (5)].strval));;}
break;
case 52:
/* Line 1455 of yacc.c */
#line 221 "bison.y"
{ (yyval.intval) = 1; ;}
break;
case 53:
/* Line 1455 of yacc.c */
#line 222 "bison.y"
{(yyval.intval) = (yyvsp[(1) - (3)].intval) + 1; ;}
break;
case 54:
/* Line 1455 of yacc.c */
#line 226 "bison.y"
{ (yyval.intval) = 1; ;}
break;
case 55:
/* Line 1455 of yacc.c */
#line 227 "bison.y"
{ (yyval.intval) = 1 + (yyvsp[(3) - (3)].intval); ;}
break;
case 56:
/* Line 1455 of yacc.c */
#line 230 "bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 58:
/* Line 1455 of yacc.c */
#line 235 "bison.y"
{ emit("FILTER BY"); ;}
break;
case 59:
/* Line 1455 of yacc.c */
#line 238 "bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (4)].strval));;}
break;
case 60:
/* Line 1455 of yacc.c */
#line 239 "bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (5)].strval)); ;}
break;
case 61:
/* Line 1455 of yacc.c */
#line 241 "bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 62:
/* Line 1455 of yacc.c */
#line 244 "bison.y"
{ emit_limit((yyvsp[(2) - (2)].intval)); ;}
break;
/* Line 1455 of yacc.c */
#line 2023 "bison.cu"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now `shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*------------------------------------.
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
{
YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
{
YYSIZE_T yyalloc = 2 * yysize;
if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
yyalloc = YYSTACK_ALLOC_MAXIMUM;
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yyalloc);
if (yymsg)
yymsg_alloc = yyalloc;
else
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
}
}
if (0 < yysize && yysize <= yymsg_alloc)
{
(void) yysyntax_error (yymsg, yystate, yychar);
yyerror (yymsg);
}
else
{
yyerror (YY_("syntax error"));
if (yysize != 0)
goto yyexhaustedlab;
}
}
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule which action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (yyn != YYPACT_NINF)
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
*++yyvsp = yylval;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined(yyoverflow) || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
/* Make sure YYID is used. */
return YYID (yyresult);
}
/* Line 1675 of yacc.c */
#line 247 "bison.y"
#include "filter.cu"
#include "select.cu"
#include "merge.cu"
#include "zone_map.cu"
FILE *file_pointer;
queue<string> namevars;
queue<string> typevars;
queue<int> sizevars;
queue<int> cols;
queue<string> op_type;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> j_col_count;
unsigned int sel_count = 0;
unsigned int join_cnt = 0;
int join_col_cnt = 0;
unsigned int eqq = 0;
stack<string> op_join;
unsigned int orig_recCount;
unsigned int statement_count = 0;
map<string,unsigned int> stat;
bool scan_state = 0;
string separator, f_file;
CUDPPHandle theCudpp;
using namespace thrust::placeholders;
void emit_name(char *name)
{
op_type.push("NAME");
op_value.push(name);
}
void emit_limit(int val)
{
op_nums.push(val);
}
void emit_string(char *str)
{ // remove the float_type quotes
string sss(str,1, strlen(str)-2);
op_type.push("STRING");
op_value.push(sss);
}
void emit_number(int_type val)
{
op_type.push("NUMBER");
op_nums.push(val);
}
void emit_float(float_type val)
{
op_type.push("FLOAT");
op_nums_f.push(val);
}
void emit_decimal(float_type val)
{
op_type.push("DECIMAL");
op_nums_f.push(val);
}
void emit_mul()
{
op_type.push("MUL");
}
void emit_add()
{
op_type.push("ADD");
}
void emit_div()
{
op_type.push("DIV");
}
void emit_and()
{
op_type.push("AND");
if (join_col_cnt == -1)
join_col_cnt++;
join_col_cnt++;
eqq = 0;
}
void emit_eq()
{
//op_type.push("JOIN");
eqq++;
join_cnt++;
if(eqq == join_col_cnt+1) {
j_col_count.push(join_col_cnt+1);
join_col_cnt = -1;
}
else if (join_col_cnt == -1 )
j_col_count.push(1);
}
void emit_or()
{
op_type.push("OR");
}
void emit_minus()
{
op_type.push("MINUS");
}
void emit_cmp(int val)
{
op_type.push("CMP");
op_nums.push(val);
}
void emit(char *s, ...)
{
}
void emit_var(char *s, int c, char *f)
{
namevars.push(s);
typevars.push(f);
sizevars.push(0);
cols.push(c);
}
void emit_var_asc(char *s)
{
op_type.push(s);
op_value.push("ASC");
}
void emit_var_desc(char *s)
{
op_type.push(s);
op_value.push("DESC");
}
void emit_varchar(char *s, int c, char *f, int d)
{
namevars.push(s);
typevars.push(f);
sizevars.push(d);
cols.push(c);
}
void emit_sel_name(char *s)
{
op_type.push("emit sel_name");
op_value.push(s);
sel_count++;
}
void emit_count()
{
op_type.push("COUNT");
}
void emit_sum()
{
op_type.push("SUM");
}
void emit_average()
{
op_type.push("AVG");
}
void emit_min()
{
op_type.push("MIN");
}
void emit_max()
{
op_type.push("MAX");
}
void emit_join_tab(char *s)
{
op_join.push(s);
};
thrust::device_ptr<unsigned int> order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, unsigned int segment)
{
unsigned int sz = a->mRecCount;
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(sz);
thrust::sequence(permutation, permutation+sz,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
// find the largest mRecSize of all data sources
unsigned int maxSize = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
CudaSet *t = varNames[setMap[*it]];
//cout << "MAX of " << setMap[*it] << " = " << t->mRecCount << endl;
if(t->mRecCount > maxSize)
maxSize = t->mRecCount;
};
//cout << "max size " << maxSize << endl;
//cout << "sort alloc " << maxSize << endl;
//cout << "order mem " << getFreeMem() << endl;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, maxSize*float_size));
for(int i=0; !exe_type.empty(); ++i, exe_type.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, sz, "ASC", (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, sz,"ASC", (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[colInd]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
update_permutation((c->d_columns)[j], raw_ptr, sz, "ASC", (char*)temp);
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
int i = a->columnNames[*it];
if ((a->type)[i] == 0)
apply_permutation(a->d_columns_int[a->type_index[i]], raw_ptr, sz, (int_type*)temp);
else if ((a->type)[i] == 1)
apply_permutation(a->d_columns_float[a->type_index[i]], raw_ptr, sz, (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[i]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
apply_permutation((c->d_columns)[j], raw_ptr, sz, (char*)temp);
};
};
cudaFree(temp);
return permutation;
}
void emit_join(char *s, char *j1)
{
string j2 = op_join.top();
op_join.pop();
statement_count++;
if (scan_state == 0) {
if (stat.find(j1) == stat.end()) {
cout << "Join : couldn't find variable " << j1 << endl;
exit(1);
};
if (stat.find(j2) == stat.end()) {
cout << "Join : couldn't find variable " << j2 << endl;
exit(1);
};
stat[s] = statement_count;
stat[j1] = statement_count;
stat[j2] = statement_count;
return;
};
if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) {
clean_queues();
return;
};
CudaSet* left = varNames.find(j1)->second;
CudaSet* right = varNames.find(j2)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
string f1 = op_value.front();
op_value.pop();
string f2 = op_value.front();
op_value.pop();
cout << "JOIN " << s << " " << getFreeMem() << endl;;
std::clock_t start1 = std::clock();
CudaSet* c;
if (left->mRecCount == 0 || right->mRecCount == 0) {
c = new CudaSet(left,right,0, op_sel, op_sel_as);
if (left->fact_table == 1 || right->fact_table == 1)
c->fact_table = 1;
varNames[s] = c;
clean_queues();
return;
};
unsigned int colInd1 = (left->columnNames).find(f1)->second;
unsigned int colInd2 = (right->columnNames).find(f2)->second;
set<string> field_names;
stack<string> exe_type;
exe_type.push(f2);
field_names.insert(f2);
unsigned int *A, *B;
uint2* R;
unsigned int *devPtrA, *devPtrB;
uint2* *devPtrR;
size_t memsize;
unsigned int rcount = 0;
if(!left->prm.empty())
for(int i = 0; i < left->segCount; i ++) {
rcount = rcount + left->prm_count[setMap[f1]][i];
}
else
rcount = varNames[setMap[f1]]->mRecCount;
memsize = rcount * sizeof(unsigned int);
#ifdef _WIN64
A = (unsigned int*) VirtualAlloc(NULL, memsize, MEM_COMMIT, PAGE_READWRITE);
#else
A = (unsigned int*) valloc(memsize);
#endif
cudaError_t err = cudaHostRegister(A, memsize, cudaHostRegisterMapped);
if (cudaSuccess != err)
cout << cudaGetErrorString( err ) << endl;
cudaHostGetDevicePointer((void **) &devPtrA, (void *) A, 0);
if (cudaSuccess != err)
cout << cudaGetErrorString( err ) << endl;
thrust::device_ptr<unsigned int> ll((unsigned int*)devPtrA);
rcount = 0;
if(!right->prm.empty())
for(int i = 0; i < right->segCount; i ++) {
rcount = rcount + right->prm_count[setMap[f2]][i];
}
else
rcount = varNames[setMap[f2]]->mRecCount;
memsize = rcount * sizeof(unsigned int); // does not have to be aligned anymore
#ifdef _WIN64
B = (unsigned int*) VirtualAlloc(NULL, memsize, MEM_COMMIT, PAGE_READWRITE);
#else
B = (unsigned int*) valloc(memsize);
#endif
err = cudaHostRegister(B, memsize, cudaHostRegisterMapped);
if (cudaSuccess != err)
cout << cudaGetErrorString( err ) << endl;
cudaHostGetDevicePointer((void **) &devPtrB, (void *) B, 0);
if (cudaSuccess != err)
cout << cudaGetErrorString( err ) << endl;
thrust::device_ptr<unsigned int> rr((unsigned int*)devPtrB);
cout << "A and B regged " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
queue<string> cc;
cc.push(f2);
unsigned int cnt_r = 0;
varNames[setMap[f2]]->oldRecCount = varNames[setMap[f2]]->mRecCount;
allocColumns(right, cc);
for(int i = 0; i < right->segCount; i++) {
// copy every segment to gpu then copy to host using host mapped memory
copyGatherJoin(right, rr, cc.front(), i, cnt_r);
};
//here we need to make sure that rr is ordered. If not then we order it and keep the permutation
bool sorted = thrust::is_sorted(rr,rr+cnt_r);
thrust::device_vector<unsigned int> v(cnt_r);
thrust::sequence(v.begin(),v.end(),0,1);
if(!sorted) {
thrust::sort_by_key(rr,rr+cnt_r, v.begin());
};
varNames[setMap[f2]]->mRecCount = varNames[setMap[f2]]->oldRecCount;
cc.pop();
cc.push(f1);
unsigned int cnt_l = 0;
varNames[setMap[f1]]->oldRecCount = varNames[setMap[f1]]->mRecCount;
allocColumns(left, cc);
for(int i = 0; i < left->segCount; i++) {
// copy every segment to gpu then copy to host using host mapped memory
copyGatherJoin(left, ll, cc.front(), i, cnt_l);
};
varNames[setMap[f1]]->mRecCount = varNames[setMap[f1]]->oldRecCount;
cout << "successfully loaded l && r " << cnt_l << " " << cnt_r << " " << getFreeMem() << endl;
thrust::device_vector<unsigned int> d_res1;
thrust::device_vector<unsigned int> d_res2;
std::clock_t start2 = std::clock();
memsize = cnt_l * sizeof(uint2);
#ifdef _WIN64
R = (uint2*) VirtualAlloc(NULL, memsize, MEM_COMMIT, PAGE_READWRITE);
#else
R = (uint2*) valloc(memsize);
#endif
err = cudaHostRegister(R, memsize, cudaHostRegisterMapped);
if (cudaSuccess != err)
cout << cudaGetErrorString( err ) << endl;
cudaHostGetDevicePointer((void **) &devPtrR, (void *) R, 0);
if (cudaSuccess != err)
cout << cudaGetErrorString( err ) << endl;
thrust::device_ptr<uint2> res((uint2*)devPtrR);
std::cout<< "join reg time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
std::clock_t start3 = std::clock();
//thrust::device_vector<uint2> res(left->mRecCount);
if ((left->type)[colInd1] == 0 && (right->type)[colInd2] == 0) {
CUDPPHandle hash_table_handle;
//cout << "creating hash table " << cnt_r << endl;
CUDPPHashTableConfig config;
config.type = CUDPP_MULTIVALUE_HASH_TABLE;
//config.kInputSize = right->mRecCount;
config.kInputSize = 150000000;
config.space_usage = 1.1f;
cout << "creating table with " << right->mRecCount << endl;
cout << "MEM " << getFreeMem() << endl;
CUDPPResult result = cudppHashTable(theCudpp, &hash_table_handle, &config);
std::cout<< "table creation time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if (result == CUDPP_SUCCESS)
cout << "hash table created " << getFreeMem() << endl;
exit(0);
cout << "INSERT " << cnt_r << " " << right->mRecCount << " " << getFreeMem() << endl;
result = cudppHashInsert(hash_table_handle, thrust::raw_pointer_cast(rr),
thrust::raw_pointer_cast(v.data()), cnt_r);
std::cout<< "table insert time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if (result == CUDPP_SUCCESS)
cout << "hash table inserted " << getFreeMem() << endl;
for(int i = 0; i < 30;i++)
cudppHashRetrieve(hash_table_handle, thrust::raw_pointer_cast(ll),
thrust::raw_pointer_cast(res), cnt_l);
std::cout<< "table retrieve time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
cudppDestroyHashTable(theCudpp, hash_table_handle);
std::cout<< "table destroy time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if (result == CUDPP_SUCCESS)
cout << "hash table destroyed " << getFreeMem() << endl;
cout << "reducing " << cnt_l << endl;
std::cout<< "111 " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
uint2 rr = thrust::reduce(res, res+cnt_l, make_uint2(0,0), Uint2Sum());
std::cout<< "reduce time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if(rr.y) {
//thrust::device_vector<unsigned int> d_r(cnt_l);
thrust::device_ptr<unsigned int> d_r = thrust::device_malloc<unsigned int>(cnt_l);
std::cout<< "table malloc time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0);
uint2_split ff(thrust::raw_pointer_cast(res),thrust::raw_pointer_cast(d_r));
thrust::for_each(begin, begin + cnt_l, ff);
std::cout<< "table split time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
thrust::exclusive_scan(d_r, d_r+cnt_l, d_r ); // addresses
std::cout<< "table scan time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
d_res1.resize(rr.y);
d_res2.resize(rr.y);
std::cout<< "table resize time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
join_functor ff1(thrust::raw_pointer_cast(res),
thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(d_res1.data()),
thrust::raw_pointer_cast(d_res2.data()));
thrust::for_each(begin, begin + cnt_l, ff1);
std::cout<< "table foreach time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
thrust::device_free(d_r);
};
}
cudaHostUnregister(A);
cudaHostUnregister(B);
cudaHostUnregister(R);
std::cout<< "unregged time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
#ifdef _WIN64
VirtualFree(A, 0, MEM_RELEASE);
VirtualFree(B, 0, MEM_RELEASE);
VirtualFree(R, 0, MEM_RELEASE);
#else
free(A);
free(B);
free(R);
#endif
std::cout<< "table unreg time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
c = new CudaSet(right,left,d_res1.size(),op_sel, op_sel_as);
bool left_check;
thrust::device_vector<unsigned int> p(d_res1.size());
thrust::device_vector<unsigned int> res_tmp(left->mRecCount);
std::cout<< "bad malloc time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
//gather prm of left and right vectors
while(!op_sel.empty()) {
if(c->prm.count(setMap[op_sel.front()]) == 0) {
CudaSet *t = varNames[setMap[op_sel.front()]];
CudaSet *lr;
if(left->columnNames.find(op_sel.front()) != left->columnNames.end()) {
lr = left;
left_check = 1;
}
else {
lr = right;
left_check = 0;
};
c->prm[setMap[op_sel.front()]].push_back(new unsigned int[d_res1.size()]);
c->prm_count[setMap[op_sel.front()]].push_back(d_res1.size());
if(lr->prm.size() != 0) {
// join prm segments, add seg_num*maxRecs to values and gather the result
unsigned int curr_count = 0;
for(unsigned int i = 0; i < lr->prm[setMap[op_sel.front()]].size(); i++) {
//lr->prm_d = lr->prm[setMap[op_sel.front()]][i];
if(lr->prm_d.size() == 0) // find the largest prm segment
lr->prm_d.resize(largest_prm(lr, op_sel.front()));
unsigned int g_size = lr->prm_count[setMap[op_sel.front()]][i];
cudaMemcpy((void**)(thrust::raw_pointer_cast(lr->prm_d.data())), (void**)lr->prm[setMap[op_sel.front()]][i], 4*g_size, cudaMemcpyHostToDevice);
thrust::transform(lr->prm_d.begin(), lr->prm_d.begin() + g_size,
res_tmp.begin() + curr_count, _1+(i*t->maxRecs));
curr_count = curr_count + lr->prm_count[setMap[op_sel.front()]][i];
};
if(left_check)
thrust::gather(d_res1.begin(), d_res1.end(), res_tmp.begin(), p.begin());
else
thrust::gather(d_res2.begin(), d_res2.end(), res_tmp.begin(), p.begin());
cudaMemcpy((void**)c->prm[setMap[op_sel.front()]][0], (void**)(thrust::raw_pointer_cast(p.data())), 4*d_res1.size(), cudaMemcpyDeviceToHost);
}
else { // copy d_res2 into prm[setMap[op_sel.front()]]
if(left_check)
thrust::copy(d_res1.begin(), d_res1.end(), p.begin());
else
thrust::copy(d_res2.begin(), d_res2.end(), p.begin());
cudaMemcpy((void**)c->prm[setMap[op_sel.front()]][0], (void**)(thrust::raw_pointer_cast(p.data())), 4*d_res1.size(), cudaMemcpyDeviceToHost);
}
};
op_sel.pop();
};
cout << "join final end " << d_res1.size() << " " << getFreeMem() << endl;
std::cout<< "table unreg time1 " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) <<'\n';
//right->d_res1
//left->d_res2
// modify left's and right's perm tables using D_res1 and d_res2
left->deAllocOnDevice();
right->deAllocOnDevice();
for (map<string, std::vector<unsigned int*> >::iterator it=left->prm.begin() ; it != left->prm.end(); ++it ) {
varNames[(*it).first]->deAllocOnDevice();
};
for (map<string, std::vector<unsigned int*> >::iterator it=right->prm.begin() ; it != right->prm.end(); ++it ) {
varNames[(*it).first]->deAllocOnDevice();
};
varNames[s] = c;
c->maxRecs = c->mRecCount;
c->segCount = 1;
clean_queues();
if(stat[s] == statement_count) {
c->free();
varNames.erase(s);
};
if(stat[j1] == statement_count) {
left->free();
varNames.erase(j1);
};
if(stat[j2] == statement_count && (strcmp(j1,j2.c_str()) != 0)) {
right->free();
varNames.erase(j2);
};
std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_order(char *s, char *f, int e, int ll)
{
if(ll == 0)
statement_count++;
if (scan_state == 0 && ll == 0) {
if (stat.find(f) == stat.end()) {
cout << "Order : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end() ) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
if (a->mRecCount == 0) {
if(varNames.find(s) == varNames.end())
varNames[s] = new CudaSet(0,1);
else {
CudaSet* c = varNames.find(s)->second;
c->mRecCount = 0;
};
return;
};
stack<string> exe_type, exe_value;
cout << "order: " << s << " " << f << endl;;
for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) {
if ((op_type.front()).compare("NAME") == 0) {
exe_type.push(op_value.front());
exe_value.push("ASC");
}
else {
exe_type.push(op_type.front());
exe_value.push(op_value.front());
};
};
// initialize permutation to [0, 1, 2, ... ,N-1]
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::sequence(permutation, permutation+(a->mRecCount));
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
CudaSet *b = a->copyStruct(a->mRecCount);
b->isJoined = a->isJoined;
// find the largest mRecSize of all data sources
unsigned int maxSize = 0;
stack<string> tp(exe_type);
queue<string> op_vx;
while (!tp.empty()) {
op_vx.push(tp.top());
CudaSet *t = varNames[setMap[tp.top()]];
if(t->mRecCount > maxSize)
maxSize = t->mRecCount;
tp.pop();
};
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, maxSize*float_size));
varNames[setMap[exe_type.top()]]->oldRecCount = varNames[setMap[exe_type.top()]]->mRecCount;
allocColumns(a, op_vx);
copyColumns(a, op_vx, 0);
varNames[setMap[exe_type.top()]]->mRecCount = varNames[setMap[exe_type.top()]]->oldRecCount;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, a->mRecCount, exe_value.top(), (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[colInd]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
update_permutation((c->d_columns)[j], raw_ptr, a->mRecCount, exe_value.top(), (char*)temp);
};
};
// gather a's prm to b's prm
if(a->prm.size() != 0) {
thrust::device_vector<unsigned int> p(a->mRecCount);
thrust::device_vector<unsigned int> p_a(a->mRecCount);
for ( map<string, std::vector<unsigned int*> >::iterator it=a->prm.begin() ; it != a->prm.end(); ++it ) {
b->prm[(*it).first].push_back(new unsigned int[a->mRecCount]);
b->prm_count[(*it).first].push_back(a->mRecCount);
//p_a = (*it).second[0];
cudaMemcpy((void**)(thrust::raw_pointer_cast(p_a.data())), (void**)(*it).second[0], 4*a->mRecCount, cudaMemcpyHostToDevice);
thrust::gather(permutation, permutation+a->mRecCount, p_a.begin(), p.begin());
//b->prm[(*it).first][0] = p;
cudaMemcpy((void**)b->prm[(*it).first][0], (void**)(thrust::raw_pointer_cast(p.data())), 4*a->mRecCount, cudaMemcpyDeviceToHost);
};
}
else {
thrust::device_vector<unsigned int> p(a->mRecCount);
b->prm[a->name].push_back(new unsigned int[a->mRecCount]);
b->prm_count[a->name].push_back(a->mRecCount);
thrust::copy(permutation, permutation+a->mRecCount, p.begin());
//b->prm[a->name][0] = p;
cudaMemcpy((void**)b->prm[a->name][0], (void**)(thrust::raw_pointer_cast(p.data())), 4*a->mRecCount, cudaMemcpyDeviceToHost);
};
b->deAllocOnDevice();
a->deAllocOnDevice();
thrust::device_free(permutation);
cudaFree(temp);
varNames[s] = b;
b->segCount = 1;
if (a->fact_table == 1)
b->fact_table = 1;
else
b->fact_table = 0;
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
}
void emit_select(char *s, char *f, int ll)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Select : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
queue<string> op_v1(op_value);
while(op_v1.size() > ll)
op_v1.pop();
stack<string> op_v2;
queue<string> op_v3;
for(int i=0; i < ll; ++i) {
op_v2.push(op_v1.front());
op_v3.push(op_v1.front());
op_v1.pop();
};
CudaSet *a;
a = varNames.find(f)->second;
if(a->mRecCount == 0) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
clean_queues();
return;
};
cout << "SELECT " << s << " " << f << endl;
std::clock_t start1 = std::clock();
// here we need to determine the column count and composition
queue<string> op_v(op_value);
queue<string> op_vx;
set<string> field_names;
map<string,string> aliases;
string tt;
for(int i=0; !op_v.empty(); ++i, op_v.pop()) {
if(a->columnNames.find(op_v.front()) != a->columnNames.end()) {
field_names.insert(op_v.front());
if(aliases.count(op_v.front()) == 0 && aliases.size() < ll) {
tt = op_v.front();
op_v.pop();
aliases[tt] = op_v.front();
};
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
op_vx.push(*it);
};
// find out how many columns a new set will have
queue<string> op_t(op_type);
int_type col_count = 0;
for(int i=0; !op_t.empty(); ++i, op_t.pop())
if((op_t.front()).compare("emit sel_name") == 0)
col_count++;
CudaSet* b, *c;
allocColumns(a, op_vx);
unsigned int cycle_count = 1;
curr_segment = 100;
if(a->prm.size() <= 1)
cycle_count = varNames[setMap[op_value.front()]]->segCount;
unsigned int ol_count = a->mRecCount;
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
//bck = a;
for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE
cout << "cycle " << i << " select mem " << getFreeMem() << endl;
std::clock_t start2 = std::clock();
if(i == 0)
b = new CudaSet(0, col_count);
copyColumns(a, op_vx, i);
std::cout<< "cpy time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
CudaSet *t = varNames[setMap[op_vx.front()]];
if (ll != 0) {
thrust::device_ptr<unsigned int> perm = order_inplace(a,op_v2,field_names,i);
std::cout<< "order time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
thrust::device_free(perm);
a->GroupBy(op_v3);
std::cout<< "grp time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
};
select(op_type,op_value,op_nums, op_nums_f,a,b, a->mRecCount);
std::cout<< "select time " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) <<'\n';
if(i == 1) {
for ( map<string,int>::iterator it=b->columnNames.begin() ; it != b->columnNames.end(); ++it )
setMap[(*it).first] = s;
};
if (ll != 0) {
if (i == 0) {
c = new CudaSet(b->mRecCount, col_count);
c->fact_table = 1;
c->segCount = 1;
}
else {
c->resize(b->mRecCount);
};
add(c,b,op_v3);
};
};
a->mRecCount = ol_count;
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
if(stat[f] == statement_count) {
a->deAllocOnDevice();
};
if (ll != 0) {
CudaSet *r = merge(c,op_v3, op_v2, aliases);
c->free();
c = r;
};
c->maxRecs = c->mRecCount;
c->name = s;
c->keep = 1;
for ( map<string,int>::iterator it=c->columnNames.begin() ; it != c->columnNames.end(); ++it ) {
setMap[(*it).first] = s;
};
cout << "final select " << c->mRecCount << endl;
clean_queues();
if (ll != 0) {
varNames[s] = c;
b->free();
}
else
varNames[s] = b;
varNames[s]->keep = 1;
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_filter(char *s, char *f, int e)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Filter : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
// save the filter parameters for possible later zone map filtering
top_type[f] = op_type;
top_value[f] = op_value;
top_nums[f] = op_nums;
top_nums_f[f] = op_nums_f;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
std::clock_t start1 = std::clock();
if(a->mRecCount == 0) {
b = new CudaSet(0,1);
}
else {
cout << "FILTER " << s << " " << f << endl;
std::clock_t start1 = std::clock();
b = a->copyDeviceStruct();
b->name = s;
b->isJoined = a->isJoined;
// if prm.size() <= 1 then process segment by segment
// else copy entire column to gpu
unsigned int cycle_count = 1;
allocColumns(a, op_value);
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
if(!a->isJoined)
cycle_count = varNames[setMap[op_value.front()]]->segCount;
thrust::device_vector<unsigned int> p(a->maxRecs);
curr_segment = 100;
for(unsigned int i = 0; i < cycle_count; i++) {
copyColumns(a, op_value, i);
filter(op_type,op_value,op_nums, op_nums_f,a, b, i, p);
};
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
cout << "filter is finished " << b->mRecCount << " " << getFreeMem() << endl;
//dealloc sources
for (map<string, std::vector<unsigned int*> >::iterator it=a->prm.begin() ; it != a->prm.end(); ++it ) {
varNames[(*it).first]->deAllocOnDevice();
};
a->deAllocOnDevice();
};
clean_queues();
if (varNames.count(s) > 0)
varNames[s]->free();
varNames[s] = b;
if(stat[s] == statement_count) {
b->free();
varNames.erase(s);
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
std::cout<< "filter time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void emit_store(char *s, char *f, char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
cout << "STORE: " << s << " " << f << " " << sep << endl;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Store(f,sep, limit, 0);
if(stat[s] == statement_count && a->keep == 0) {
a->free();
varNames.erase(s);
};
};
void emit_store_binary(char *s, char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(stat[f] == statement_count)
a->deAllocOnDevice();
printf("STORE: %s %s \n", s, f);
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
fact_file_loaded = 0;
while(!fact_file_loaded) {
cout << "LOADING " << f_file << " " << separator << endl;
fact_file_loaded = a->LoadBigFile(f_file.c_str(), separator.c_str());
cout << "STORING " << f << " " << limit << endl;
a->Store(f,"", limit, 1);
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(s);
};
};
void emit_load_binary(char *s, char *f, int d)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("BINARY LOAD: %s %s \n", s, f);
std::clock_t start1 = std::clock();
CudaSet *a;
unsigned int segCount, maxRecs;
char f1[100];
strcpy(f1, f);
strcat(f1,".");
char col_pos[3];
itoaa(cols.front(),col_pos);
strcat(f1,col_pos);
FILE* ff = fopen(f1, "rb");
fseeko(ff, -16, SEEK_END);
fread((char *)&totalRecs, 8, 1, ff);
fread((char *)&segCount, 4, 1, ff);
fread((char *)&maxRecs, 4, 1, ff);
fclose(ff);
queue<string> names(namevars);
while(!names.empty()) {
setMap[names.front()] = s;
names.pop();
};
a = new CudaSet(namevars, typevars, sizevars, cols,totalRecs, f);
a->segCount = segCount;
a->maxRecs = maxRecs;
a->keep = 1;
varNames[s] = a;
if(stat[s] == statement_count ) {
a->free();
varNames.erase(s);
};
std::cout<< "load time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_load(char *s, char *f, int d, char* sep)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("LOAD: %s %s %d %s \n", s, f, d, sep);
CudaSet *a;
a = new CudaSet(namevars, typevars, sizevars, cols, process_count);
a->mRecCount = 0;
a->resize(process_count);
a->keep = true;
a->fact_table = 1;
//a->LoadBigFile(f, sep);
string separator1(sep);
separator = separator1;
string ff(f);
f_file = ff;
a->maxRecs = a->mRecCount;
a->segCount = 0;
varNames[s] = a;
if(stat[s] == statement_count) {
a->free();
varNames.erase(s);
};
}
void yyerror(char *s, ...)
{
extern int yylineno;
va_list ap;
va_start(ap, s);
fprintf(stderr, "%d: error: ", yylineno);
vfprintf(stderr, s, ap);
fprintf(stderr, "\n");
}
void clean_queues()
{
while(!op_type.empty()) op_type.pop();
while(!op_value.empty()) op_value.pop();
while(!op_join.empty()) op_join.pop();
while(!op_nums.empty()) op_nums.pop();
while(!op_nums_f.empty()) op_nums_f.pop();
while(!j_col_count.empty()) j_col_count.pop();
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
sel_count = 0;
join_cnt = 0;
join_col_cnt = -1;
eqq = 0;
}
int main(int ac, char **av)
{
extern FILE *yyin;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if (!deviceProp.canMapHostMemory)
cout << "Device 0 cannot map host memory" << endl;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudppCreate(&theCudpp);
if (ac == 1) {
cout << "Usage : alenka -l process_count script.sql" << endl;
exit(1);
};
if(strcmp(av[1],"-l") == 0) {
process_count = atoff(av[2]);
cout << "Process count = " << process_count << endl;
}
else {
process_count = 6200000;
cout << "Process count = 6200000 " << endl;
};
if((yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[ac-1]);
exit(1);
};
if(yyparse()) {
printf("SQL scan parse failed\n");
exit(1);
};
fclose(yyin);
scan_state = 1;
std::clock_t start1 = std::clock();
statement_count = 0;
clean_queues();
if(ac > 1 && (yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[1]);
exit(1);
}
PROC_FLUSH_BUF ( yyin );
statement_count = 0;
if(!yyparse())
cout << "SQL scan parse worked" << endl;
else
cout << "SQL scan parse failed" << endl;
fclose(yyin);
std::cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
cudppDestroy(theCudpp);
}
|
78cb2ceb139274fa4e86f3a833ecee827e635116.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void yMinDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
// const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
} | 78cb2ceb139274fa4e86f3a833ecee827e635116.cu | #include "includes.h"
__global__ void yMinDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
// const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
} |
204f523cee87b4301d7b907b022e9ea5e98e2424.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <list>
#include <math.h>
#include <stdlib.h>
#include "cudacommon.h"
#include "MD.h"
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Utility.h"
using namespace std;
// Forward Declarations
template <class T, class forceVecType, class posVecType, bool useTexture,
typename texReader>
void runTest(const string& testName, ResultDatabase& resultDB,
OptionParser& op);
template <class T, class posVecType>
inline T distance(const posVecType* position, const int i, const int j);
template <class T>
inline void insertInOrder(std::list<T>& currDist, std::list<int>& currList,
const int j, const T distIJ, const int maxNeighbors);
template <class T, class posVecType>
inline int buildNeighborList(const int nAtom, const posVecType* position,
int* neighborList);
template <class T>
inline int populateNeighborList(std::list<T>& currDist,
std::list<int>& currList, const int j, const int nAtom,
int* neighborList);
// Texture caches for position info
texture<float4, 1, hipReadModeElementType> posTexture;
texture<int4, 1, hipReadModeElementType> posTexture_dp;
struct texReader_sp {
__device__ __forceinline__ float4 operator()(int idx) const
{
return tex1Dfetch(posTexture, idx);
}
};
// CUDA doesn't support double4 textures, so we have to do some conversion
// here, resulting in a bit of overhead, but it's still faster than
// an uncoalesced read
struct texReader_dp {
__device__ __forceinline__ double4 operator()(int idx) const
{
#if (__CUDA_ARCH__ < 130)
// Devices before arch 130 don't support DP, and having the
// __hiloint2double() intrinsic will cause compilation to fail.
// This return statement added as a workaround -- it will compile,
// but since the arch doesn't support DP, it will never be called
return make_double4(0., 0., 0., 0.);
#else
int4 v = tex1Dfetch(posTexture_dp, idx*2);
double2 a = make_double2(__hiloint2double(v.y, v.x),
__hiloint2double(v.w, v.z));
v = tex1Dfetch(posTexture_dp, idx*2 + 1);
double2 b = make_double2(__hiloint2double(v.y, v.x),
__hiloint2double(v.w, v.z));
return make_double4(a.x, a.y, b.x, b.y);
#endif
}
};
// ****************************************************************************
// Function: compute_lj_force
//
// Purpose:
// GPU kernel to calculate Lennard Jones force
//
// Arguments:
// force3: array to store the calculated forces
// position: positions of atoms
// neighCount: number of neighbors for each atom to consider
// neighList: atom neighbor list
// cutsq: cutoff distance squared
// lj1, lj2: LJ force constants
// inum: total number of atoms
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ****************************************************************************
template <class T, class forceVecType, class posVecType, bool useTexture,
typename texReader>
__global__ void compute_lj_force(forceVecType* __restrict__ force3,
const posVecType* __restrict__ position,
const int neighCount,
const int* __restrict__ neighList,
const T cutsq,
const T lj1,
const T lj2,
const int inum)
{
// Global ID - one thread per atom
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Position of this thread's atom
posVecType ipos = position[idx];
// Force accumulator
forceVecType f = {0.0f, 0.0f, 0.0f};
texReader positionTexReader;
int j = 0;
while (j < neighCount)
{
int jidx = neighList[j*inum + idx];
posVecType jpos;
if (useTexture)
{
// Use texture mem as a cache
jpos = positionTexReader(jidx);
}
else
{
jpos = position[jidx];
}
// Calculate distance
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx*delx + dely*dely + delz*delz;
// If distance is less than cutoff, calculate force
// and add to accumulator
if (r2inv < cutsq)
{
r2inv = 1.0f/r2inv;
T r6inv = r2inv * r2inv * r2inv;
T force = r2inv*r6inv*(lj1*r6inv - lj2);
f.x += delx * force;
f.y += dely * force;
f.z += delz * force;
}
j++;
}
// store the results
force3[idx] = f;
}
// ****************************************************************************
// Function: checkResults
//
// Purpose:
// Check device results against cpu results -- this is the CPU equivalent of
//
// Arguments:
// d_force: forces calculated on the device
// position: positions of atoms
// neighList: atom neighbor list
// nAtom: number of atoms
// Returns: true if results match, false otherwise
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ****************************************************************************
template <class T, class forceVecType, class posVecType>
bool checkResults(forceVecType* d_force, posVecType *position,
int *neighList, int nAtom)
{
for (int i = 0; i < nAtom; i++)
{
posVecType ipos = position[i];
forceVecType f = {0.0f, 0.0f, 0.0f};
int j = 0;
while (j < maxNeighbors)
{
int jidx = neighList[j*nAtom + i];
posVecType jpos = position[jidx];
// Calculate distance
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx*delx + dely*dely + delz*delz;
// If distance is less than cutoff, calculate force
if (r2inv < cutsq) {
r2inv = 1.0f/r2inv;
T r6inv = r2inv * r2inv * r2inv;
T force = r2inv*r6inv*(lj1*r6inv - lj2);
f.x += delx * force;
f.y += dely * force;
f.z += delz * force;
}
j++;
}
// Check the results
T diffx = (d_force[i].x - f.x) / d_force[i].x;
T diffy = (d_force[i].y - f.y) / d_force[i].y;
T diffz = (d_force[i].z - f.z) / d_force[i].z;
T err = sqrt(diffx*diffx) + sqrt(diffy*diffy) + sqrt(diffz*diffz);
if (err > (3.0 * EPSILON))
{
cout << "Test Failed, idx: " << i << " diff: " << err << "\n";
cout << "f.x: " << f.x << " df.x: " << d_force[i].x << "\n";
cout << "f.y: " << f.y << " df.y: " << d_force[i].y << "\n";
cout << "f.z: " << f.z << " df.z: " << d_force[i].z << "\n";
cout << "Test FAILED\n";
return false;
}
}
cout << "Test Passed\n";
return true;
}
// ********************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ********************************************************
void
addBenchmarkSpecOptions(OptionParser &op)
{
op.addOption("iterations", OPT_INT, "1",
"specify MD kernel iterations", 'r');
}
// ********************************************************
// Function: RunBenchmark
//
// Purpose:
// Executes the md benchmark
//
// Arguments:
// resultDB: results from the benchmark are stored in this db
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ********************************************************
void
RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
// Test to see if this device supports double precision
int device;
hipGetDevice(&device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
cout << "Running single precision test" << endl;
runTest<float, float3, float4, true, texReader_sp>("MD-LJ", resultDB, op);
if ((deviceProp.major == 1 && deviceProp.minor >= 3) ||
(deviceProp.major >= 2))
{
cout << "Running double precision test" << endl;
runTest<double, double3, double4, true, texReader_dp>
("MD-LJ-DP", resultDB, op);
} else {
cout << "Skipping double precision test" << endl;
char atts[32] = "DP_Not_Supported";
// resultDB requires neg entry for every possible result
int passes = op.getOptionInt("passes");
for (int i = 0; i < passes; i++) {
resultDB.AddResult("MD-LJ-DP" , atts, "GB/s", FLT_MAX);
resultDB.AddResult("MD-LJ-DP_PCIe" , atts, "GB/s", FLT_MAX);
resultDB.AddResult("MD-LJ-DP-Bandwidth", atts, "GB/s", FLT_MAX);
resultDB.AddResult("MD-LJ-DP-Bandwidth_PCIe", atts, "GB/s", FLT_MAX);
resultDB.AddResult("MD-LJ-DP_Parity" , atts, "GB/s", FLT_MAX);
}
}
}
template <class T, class forceVecType, class posVecType, bool useTexture,
typename texReader>
void runTest(const string& testName, ResultDatabase& resultDB, OptionParser& op)
{
// Problem Parameters
const int probSizes[4] = { 12288, 24576, 36864, 73728 };
int sizeClass = op.getOptionInt("size");
assert(sizeClass >= 0 && sizeClass < 5);
int nAtom = probSizes[sizeClass - 1];
// Allocate problem data on host
posVecType* position;
forceVecType* force;
int* neighborList;
CUDA_SAFE_CALL(hipHostMalloc((void**)&position, nAtom*sizeof(posVecType)));
CUDA_SAFE_CALL(hipHostMalloc((void**)&force, nAtom*sizeof(forceVecType)));
CUDA_SAFE_CALL(hipHostMalloc((void**)&neighborList,
nAtom*maxNeighbors*sizeof(int)));
// Allocate device memory for position and force
forceVecType* d_force;
posVecType* d_position;
CUDA_SAFE_CALL(hipMalloc((void**)&d_force, nAtom*sizeof(forceVecType)));
CUDA_SAFE_CALL(hipMalloc((void**)&d_position, nAtom*sizeof(posVecType)));
// Allocate device memory for neighbor list
int* d_neighborList;
CUDA_SAFE_CALL(hipMalloc((void**)&d_neighborList,
nAtom*maxNeighbors*sizeof(int)));
cout << "Initializing test problem (this can take several "
"minutes for large problems)\n";
// Seed random number generator
srand48(8650341L);
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < nAtom; i++)
{
position[i].x = (T)(drand48() * domainEdge);
position[i].y = (T)(drand48() * domainEdge);
position[i].z = (T)(drand48() * domainEdge);
}
if (useTexture)
{
// Set up 1D texture to cache position info
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>();
// Bind a 1D texture to the position array
CUDA_SAFE_CALL(hipBindTexture(0, posTexture, d_position, channelDesc,
nAtom*sizeof(float4)));
hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<int4>();
// Bind a 1D texture to the position array
CUDA_SAFE_CALL(hipBindTexture(0, posTexture_dp, d_position,
channelDesc2, nAtom*sizeof(double4)));
}
// Keep track of how many atoms are within the cutoff distance to
// accurately calculate FLOPS later
int totalPairs = buildNeighborList<T, posVecType>(nAtom, position,
neighborList);
cout << "Finished.\n";
cout << totalPairs << " of " << nAtom*maxNeighbors <<
" pairs within cutoff distance = " <<
100.0 * ((double)totalPairs / (nAtom*maxNeighbors)) << " %" << endl;
// Time the transfer of input data to the GPU
hipEvent_t inputTransfer_start, inputTransfer_stop;
hipEventCreate(&inputTransfer_start);
hipEventCreate(&inputTransfer_stop);
hipEventRecord(inputTransfer_start, 0);
// Copy neighbor list data to GPU
CUDA_SAFE_CALL(hipMemcpy(d_neighborList, neighborList,
maxNeighbors*nAtom*sizeof(int), hipMemcpyHostToDevice));
// Copy position to GPU
CUDA_SAFE_CALL(hipMemcpy(d_position, position, nAtom*sizeof(posVecType),
hipMemcpyHostToDevice));
hipEventRecord(inputTransfer_stop, 0);
CUDA_SAFE_CALL(hipEventSynchronize(inputTransfer_stop));
// Get elapsed time
float inputTransfer_time = 0.0f;
hipEventElapsedTime(&inputTransfer_time, inputTransfer_start,
inputTransfer_stop);
inputTransfer_time *= 1.e-3;
int blockSize = 256;
int gridSize = nAtom / blockSize;
// Warm up the kernel and check correctness
hipLaunchKernelGGL(( compute_lj_force<T, forceVecType, posVecType, useTexture, texReader>)
, dim3(gridSize), dim3(blockSize), 0, 0,
d_force, d_position, maxNeighbors, d_neighborList,
cutsq, lj1, lj2, nAtom);
CUDA_SAFE_CALL(hipDeviceSynchronize());
// Copy back forces
hipEvent_t outputTransfer_start, outputTransfer_stop;
hipEventCreate(&outputTransfer_start);
hipEventCreate(&outputTransfer_stop);
hipEventRecord(outputTransfer_start, 0);
CUDA_SAFE_CALL(hipMemcpy(force, d_force, nAtom*sizeof(forceVecType),
hipMemcpyDeviceToHost));
hipEventRecord(outputTransfer_stop, 0);
CUDA_SAFE_CALL(hipEventSynchronize(outputTransfer_stop));
// Get elapsed time
float outputTransfer_time = 0.0f;
hipEventElapsedTime(&outputTransfer_time, outputTransfer_start,
outputTransfer_stop);
outputTransfer_time *= 1.e-3;
// If results are incorrect, skip the performance tests
cout << "Performing Correctness Check (can take several minutes)\n";
if (!checkResults<T, forceVecType, posVecType>
(force, position, neighborList, nAtom))
{
return;
}
// Begin performance tests
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
int passes = op.getOptionInt("passes");
int iter = op.getOptionInt("iterations");
for (int i = 0; i < passes; i++)
{
// Other kernels will be involved in true parallel versions
hipEventRecord(kernel_start, 0);
for (int j = 0; j < iter; j++)
{
hipLaunchKernelGGL(( compute_lj_force<T, forceVecType, posVecType, useTexture, texReader>)
, dim3(gridSize), dim3(blockSize), 0, 0,
d_force, d_position, maxNeighbors, d_neighborList, cutsq,
lj1, lj2, nAtom);
}
hipEventRecord(kernel_stop, 0);
CUDA_SAFE_CALL(hipEventSynchronize(kernel_stop));
// get elapsed time
float kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time /= (float)iter;
kernel_time *= 1.e-3; // Convert to seconds
// Total number of flops
// Every pair of atoms compute distance - 8 flops
// totalPairs with distance < cutsq perform an additional 13
// for force calculation
double gflops = ((8 * nAtom * maxNeighbors) + (totalPairs * 13)) * 1e-9;
char atts[64];
sprintf(atts, "%d_atoms", nAtom);;
resultDB.AddResult(testName, atts, "GFLOPS", gflops / kernel_time);
resultDB.AddResult(testName+"_PCIe", atts, "GFLOPS",
gflops / (kernel_time+inputTransfer_time+outputTransfer_time));
int numPairs = nAtom * maxNeighbors;
long int nbytes = (3 * sizeof(T) * (1+numPairs)) + // position data
(3 * sizeof(T) * nAtom) + // force for each atom
(sizeof(int) * numPairs); // neighbor list
double gbytes = (double)nbytes / (1000. * 1000. * 1000.);
resultDB.AddResult(testName + "-Bandwidth", atts, "GB/s", gbytes /
kernel_time);
resultDB.AddResult(testName + "-Bandwidth_PCIe", atts, "GB/s",
gbytes / (kernel_time+inputTransfer_time+outputTransfer_time));
resultDB.AddResult(testName+"_Parity", atts, "N",
(inputTransfer_time+outputTransfer_time) / kernel_time);
}
// Clean up
// Host
CUDA_SAFE_CALL(hipHostFree(position));
CUDA_SAFE_CALL(hipHostFree(force));
CUDA_SAFE_CALL(hipHostFree(neighborList));
// Device
CUDA_SAFE_CALL(hipUnbindTexture(posTexture));
CUDA_SAFE_CALL(hipFree(d_position));
CUDA_SAFE_CALL(hipFree(d_force));
CUDA_SAFE_CALL(hipFree(d_neighborList));
CUDA_SAFE_CALL(hipEventDestroy(inputTransfer_start));
CUDA_SAFE_CALL(hipEventDestroy(inputTransfer_stop));
CUDA_SAFE_CALL(hipEventDestroy(outputTransfer_start));
CUDA_SAFE_CALL(hipEventDestroy(outputTransfer_stop));
CUDA_SAFE_CALL(hipEventDestroy(kernel_start));
CUDA_SAFE_CALL(hipEventDestroy(kernel_stop));
}
// ********************************************************
// Function: distance
//
// Purpose:
// Calculates distance squared between two atoms
//
// Arguments:
// position: atom position information
// i, j: indexes of the two atoms
//
// Returns: the computed distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T, class posVecType>
inline T distance(const posVecType* position, const int i, const int j)
{
posVecType ipos = position[i];
posVecType jpos = position[j];
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx * delx + dely * dely + delz * delz;
return r2inv;
}
// ********************************************************
// Function: insertInOrder
//
// Purpose:
// Adds atom j to current neighbor list and distance list
// if it's distance is low enough.
//
// Arguments:
// currDist: distance between current atom and each of its neighbors in the
// current list, sorted in ascending order
// currList: neighbor list for current atom, sorted by distance in asc. order
// j: atom to insert into neighbor list
// distIJ: distance between current atom and atom J
// maxNeighbors: max length of neighbor list
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T>
inline void insertInOrder(list<T>& currDist, list<int>& currList,
const int j, const T distIJ, const int maxNeighbors)
{
typename list<T>::iterator it;
typename list<int>::iterator it2;
it2 = currList.begin();
T currMax = currDist.back();
if (distIJ > currMax) return;
for (it=currDist.begin(); it!=currDist.end(); it++)
{
if (distIJ < (*it))
{
// Insert into appropriate place in list
currDist.insert(it,distIJ);
currList.insert(it2, j);
// Trim end of list
currList.resize(maxNeighbors);
currDist.resize(maxNeighbors);
return;
}
it2++;
}
}
// ********************************************************
// Function: buildNeighborList
//
// Purpose:
// Builds the neighbor list structure for all atoms for GPU coalesced reads
// and counts the number of pairs within the cutoff distance, so
// the benchmark gets an accurate FLOPS count
//
// Arguments:
// nAtom: total number of atoms
// position: pointer to the atom's position information
// neighborList: pointer to neighbor list data structure
//
// Returns: number of pairs of atoms within cutoff distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
// Jeremy Meredith, Tue Oct 9 17:35:16 EDT 2012
// On some slow systems and without optimization, this
// could take a while. Give users a rough completion
// percentage so they don't give up.
//
// ********************************************************
template <class T, class posVecType>
inline int buildNeighborList(const int nAtom, const posVecType* position,
int* neighborList)
{
int totalPairs = 0;
// Build Neighbor List
// Find the nearest N atoms to each other atom, where N = maxNeighbors
for (int i = 0; i < nAtom; i++)
{
// Print progress every 10% completion.
if (int((i+1)/(nAtom/10)) > int(i/(nAtom/10)))
cout << " " << 10*int((i+1)/(nAtom/10)) << "% done\n";
// Current neighbor list for atom i, initialized to -1
list<int> currList(maxNeighbors, -1);
// Distance to those neighbors. We're populating this with the
// closest neighbors, so initialize to FLT_MAX
list<T> currDist(maxNeighbors, FLT_MAX);
for (int j = 0; j < nAtom; j++)
{
if (i == j) continue; // An atom cannot be its own neighbor
// Calculate distance and insert in order into the current lists
T distIJ = distance<T, posVecType>(position, i, j);
insertInOrder<T>(currDist, currList, j, distIJ, maxNeighbors);
}
// We should now have the closest maxNeighbors neighbors and their
// distances to atom i. Populate the neighbor list data structure
// for GPU coalesced reads.
// The populate method returns how many of the maxNeighbors closest
// neighbors are within the cutoff distance. This will be used to
// calculate GFLOPS later.
totalPairs += populateNeighborList<T>(currDist, currList, i, nAtom,
neighborList);
}
return totalPairs;
}
// ********************************************************
// Function: populateNeighborList
//
// Purpose:
// Populates the neighbor list structure for a *single* atom for
// GPU coalesced reads and counts the number of pairs within the cutoff
// distance, (for current atom) so the benchmark gets an accurate FLOPS count
//
// Arguments:
// currDist: distance between current atom and each of its maxNeighbors
// neighbors
// currList: current list of neighbors
// i: current atom
// nAtom: total number of atoms
// neighborList: pointer to neighbor list data structure
//
// Returns: number of pairs of atoms within cutoff distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T>
inline int populateNeighborList(list<T>& currDist,
list<int>& currList, const int i, const int nAtom,
int* neighborList)
{
int idx = 0;
int validPairs = 0; // Pairs of atoms closer together than the cutoff
// Iterate across distance and neighbor list
typename list<T>::iterator distanceIter = currDist.begin();
for (list<int>::iterator neighborIter = currList.begin();
neighborIter != currList.end(); neighborIter++)
{
// Populate packed neighbor list
neighborList[(idx * nAtom) + i] = *neighborIter;
// If the distance is less than cutoff, increment valid counter
if (*distanceIter < cutsq)
validPairs++;
// Increment idx and distance iterator
idx++;
distanceIter++;
}
return validPairs;
}
| 204f523cee87b4301d7b907b022e9ea5e98e2424.cu | #include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <list>
#include <math.h>
#include <stdlib.h>
#include "cudacommon.h"
#include "MD.h"
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Utility.h"
using namespace std;
// Forward Declarations
template <class T, class forceVecType, class posVecType, bool useTexture,
typename texReader>
void runTest(const string& testName, ResultDatabase& resultDB,
OptionParser& op);
template <class T, class posVecType>
inline T distance(const posVecType* position, const int i, const int j);
template <class T>
inline void insertInOrder(std::list<T>& currDist, std::list<int>& currList,
const int j, const T distIJ, const int maxNeighbors);
template <class T, class posVecType>
inline int buildNeighborList(const int nAtom, const posVecType* position,
int* neighborList);
template <class T>
inline int populateNeighborList(std::list<T>& currDist,
std::list<int>& currList, const int j, const int nAtom,
int* neighborList);
// Texture caches for position info
texture<float4, 1, cudaReadModeElementType> posTexture;
texture<int4, 1, cudaReadModeElementType> posTexture_dp;
struct texReader_sp {
__device__ __forceinline__ float4 operator()(int idx) const
{
return tex1Dfetch(posTexture, idx);
}
};
// CUDA doesn't support double4 textures, so we have to do some conversion
// here, resulting in a bit of overhead, but it's still faster than
// an uncoalesced read
struct texReader_dp {
__device__ __forceinline__ double4 operator()(int idx) const
{
#if (__CUDA_ARCH__ < 130)
// Devices before arch 130 don't support DP, and having the
// __hiloint2double() intrinsic will cause compilation to fail.
// This return statement added as a workaround -- it will compile,
// but since the arch doesn't support DP, it will never be called
return make_double4(0., 0., 0., 0.);
#else
int4 v = tex1Dfetch(posTexture_dp, idx*2);
double2 a = make_double2(__hiloint2double(v.y, v.x),
__hiloint2double(v.w, v.z));
v = tex1Dfetch(posTexture_dp, idx*2 + 1);
double2 b = make_double2(__hiloint2double(v.y, v.x),
__hiloint2double(v.w, v.z));
return make_double4(a.x, a.y, b.x, b.y);
#endif
}
};
// ****************************************************************************
// Function: compute_lj_force
//
// Purpose:
// GPU kernel to calculate Lennard Jones force
//
// Arguments:
// force3: array to store the calculated forces
// position: positions of atoms
// neighCount: number of neighbors for each atom to consider
// neighList: atom neighbor list
// cutsq: cutoff distance squared
// lj1, lj2: LJ force constants
// inum: total number of atoms
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ****************************************************************************
template <class T, class forceVecType, class posVecType, bool useTexture,
typename texReader>
__global__ void compute_lj_force(forceVecType* __restrict__ force3,
const posVecType* __restrict__ position,
const int neighCount,
const int* __restrict__ neighList,
const T cutsq,
const T lj1,
const T lj2,
const int inum)
{
// Global ID - one thread per atom
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Position of this thread's atom
posVecType ipos = position[idx];
// Force accumulator
forceVecType f = {0.0f, 0.0f, 0.0f};
texReader positionTexReader;
int j = 0;
while (j < neighCount)
{
int jidx = neighList[j*inum + idx];
posVecType jpos;
if (useTexture)
{
// Use texture mem as a cache
jpos = positionTexReader(jidx);
}
else
{
jpos = position[jidx];
}
// Calculate distance
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx*delx + dely*dely + delz*delz;
// If distance is less than cutoff, calculate force
// and add to accumulator
if (r2inv < cutsq)
{
r2inv = 1.0f/r2inv;
T r6inv = r2inv * r2inv * r2inv;
T force = r2inv*r6inv*(lj1*r6inv - lj2);
f.x += delx * force;
f.y += dely * force;
f.z += delz * force;
}
j++;
}
// store the results
force3[idx] = f;
}
// ****************************************************************************
// Function: checkResults
//
// Purpose:
// Check device results against cpu results -- this is the CPU equivalent of
//
// Arguments:
// d_force: forces calculated on the device
// position: positions of atoms
// neighList: atom neighbor list
// nAtom: number of atoms
// Returns: true if results match, false otherwise
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ****************************************************************************
template <class T, class forceVecType, class posVecType>
bool checkResults(forceVecType* d_force, posVecType *position,
int *neighList, int nAtom)
{
for (int i = 0; i < nAtom; i++)
{
posVecType ipos = position[i];
forceVecType f = {0.0f, 0.0f, 0.0f};
int j = 0;
while (j < maxNeighbors)
{
int jidx = neighList[j*nAtom + i];
posVecType jpos = position[jidx];
// Calculate distance
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx*delx + dely*dely + delz*delz;
// If distance is less than cutoff, calculate force
if (r2inv < cutsq) {
r2inv = 1.0f/r2inv;
T r6inv = r2inv * r2inv * r2inv;
T force = r2inv*r6inv*(lj1*r6inv - lj2);
f.x += delx * force;
f.y += dely * force;
f.z += delz * force;
}
j++;
}
// Check the results
T diffx = (d_force[i].x - f.x) / d_force[i].x;
T diffy = (d_force[i].y - f.y) / d_force[i].y;
T diffz = (d_force[i].z - f.z) / d_force[i].z;
T err = sqrt(diffx*diffx) + sqrt(diffy*diffy) + sqrt(diffz*diffz);
if (err > (3.0 * EPSILON))
{
cout << "Test Failed, idx: " << i << " diff: " << err << "\n";
cout << "f.x: " << f.x << " df.x: " << d_force[i].x << "\n";
cout << "f.y: " << f.y << " df.y: " << d_force[i].y << "\n";
cout << "f.z: " << f.z << " df.z: " << d_force[i].z << "\n";
cout << "Test FAILED\n";
return false;
}
}
cout << "Test Passed\n";
return true;
}
// ********************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ********************************************************
void
addBenchmarkSpecOptions(OptionParser &op)
{
op.addOption("iterations", OPT_INT, "1",
"specify MD kernel iterations", 'r');
}
// ********************************************************
// Function: RunBenchmark
//
// Purpose:
// Executes the md benchmark
//
// Arguments:
// resultDB: results from the benchmark are stored in this db
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: August 13, 2009
//
// Modifications:
//
// ********************************************************
void
RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
// Test to see if this device supports double precision
int device;
cudaGetDevice(&device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
cout << "Running single precision test" << endl;
runTest<float, float3, float4, true, texReader_sp>("MD-LJ", resultDB, op);
if ((deviceProp.major == 1 && deviceProp.minor >= 3) ||
(deviceProp.major >= 2))
{
cout << "Running double precision test" << endl;
runTest<double, double3, double4, true, texReader_dp>
("MD-LJ-DP", resultDB, op);
} else {
cout << "Skipping double precision test" << endl;
char atts[32] = "DP_Not_Supported";
// resultDB requires neg entry for every possible result
int passes = op.getOptionInt("passes");
for (int i = 0; i < passes; i++) {
resultDB.AddResult("MD-LJ-DP" , atts, "GB/s", FLT_MAX);
resultDB.AddResult("MD-LJ-DP_PCIe" , atts, "GB/s", FLT_MAX);
resultDB.AddResult("MD-LJ-DP-Bandwidth", atts, "GB/s", FLT_MAX);
resultDB.AddResult("MD-LJ-DP-Bandwidth_PCIe", atts, "GB/s", FLT_MAX);
resultDB.AddResult("MD-LJ-DP_Parity" , atts, "GB/s", FLT_MAX);
}
}
}
template <class T, class forceVecType, class posVecType, bool useTexture,
typename texReader>
void runTest(const string& testName, ResultDatabase& resultDB, OptionParser& op)
{
// Problem Parameters
const int probSizes[4] = { 12288, 24576, 36864, 73728 };
int sizeClass = op.getOptionInt("size");
assert(sizeClass >= 0 && sizeClass < 5);
int nAtom = probSizes[sizeClass - 1];
// Allocate problem data on host
posVecType* position;
forceVecType* force;
int* neighborList;
CUDA_SAFE_CALL(cudaMallocHost((void**)&position, nAtom*sizeof(posVecType)));
CUDA_SAFE_CALL(cudaMallocHost((void**)&force, nAtom*sizeof(forceVecType)));
CUDA_SAFE_CALL(cudaMallocHost((void**)&neighborList,
nAtom*maxNeighbors*sizeof(int)));
// Allocate device memory for position and force
forceVecType* d_force;
posVecType* d_position;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_force, nAtom*sizeof(forceVecType)));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_position, nAtom*sizeof(posVecType)));
// Allocate device memory for neighbor list
int* d_neighborList;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_neighborList,
nAtom*maxNeighbors*sizeof(int)));
cout << "Initializing test problem (this can take several "
"minutes for large problems)\n";
// Seed random number generator
srand48(8650341L);
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < nAtom; i++)
{
position[i].x = (T)(drand48() * domainEdge);
position[i].y = (T)(drand48() * domainEdge);
position[i].z = (T)(drand48() * domainEdge);
}
if (useTexture)
{
// Set up 1D texture to cache position info
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>();
// Bind a 1D texture to the position array
CUDA_SAFE_CALL(cudaBindTexture(0, posTexture, d_position, channelDesc,
nAtom*sizeof(float4)));
cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<int4>();
// Bind a 1D texture to the position array
CUDA_SAFE_CALL(cudaBindTexture(0, posTexture_dp, d_position,
channelDesc2, nAtom*sizeof(double4)));
}
// Keep track of how many atoms are within the cutoff distance to
// accurately calculate FLOPS later
int totalPairs = buildNeighborList<T, posVecType>(nAtom, position,
neighborList);
cout << "Finished.\n";
cout << totalPairs << " of " << nAtom*maxNeighbors <<
" pairs within cutoff distance = " <<
100.0 * ((double)totalPairs / (nAtom*maxNeighbors)) << " %" << endl;
// Time the transfer of input data to the GPU
cudaEvent_t inputTransfer_start, inputTransfer_stop;
cudaEventCreate(&inputTransfer_start);
cudaEventCreate(&inputTransfer_stop);
cudaEventRecord(inputTransfer_start, 0);
// Copy neighbor list data to GPU
CUDA_SAFE_CALL(cudaMemcpy(d_neighborList, neighborList,
maxNeighbors*nAtom*sizeof(int), cudaMemcpyHostToDevice));
// Copy position to GPU
CUDA_SAFE_CALL(cudaMemcpy(d_position, position, nAtom*sizeof(posVecType),
cudaMemcpyHostToDevice));
cudaEventRecord(inputTransfer_stop, 0);
CUDA_SAFE_CALL(cudaEventSynchronize(inputTransfer_stop));
// Get elapsed time
float inputTransfer_time = 0.0f;
cudaEventElapsedTime(&inputTransfer_time, inputTransfer_start,
inputTransfer_stop);
inputTransfer_time *= 1.e-3;
int blockSize = 256;
int gridSize = nAtom / blockSize;
// Warm up the kernel and check correctness
compute_lj_force<T, forceVecType, posVecType, useTexture, texReader>
<<<gridSize, blockSize>>>
(d_force, d_position, maxNeighbors, d_neighborList,
cutsq, lj1, lj2, nAtom);
CUDA_SAFE_CALL(cudaThreadSynchronize());
// Copy back forces
cudaEvent_t outputTransfer_start, outputTransfer_stop;
cudaEventCreate(&outputTransfer_start);
cudaEventCreate(&outputTransfer_stop);
cudaEventRecord(outputTransfer_start, 0);
CUDA_SAFE_CALL(cudaMemcpy(force, d_force, nAtom*sizeof(forceVecType),
cudaMemcpyDeviceToHost));
cudaEventRecord(outputTransfer_stop, 0);
CUDA_SAFE_CALL(cudaEventSynchronize(outputTransfer_stop));
// Get elapsed time
float outputTransfer_time = 0.0f;
cudaEventElapsedTime(&outputTransfer_time, outputTransfer_start,
outputTransfer_stop);
outputTransfer_time *= 1.e-3;
// If results are incorrect, skip the performance tests
cout << "Performing Correctness Check (can take several minutes)\n";
if (!checkResults<T, forceVecType, posVecType>
(force, position, neighborList, nAtom))
{
return;
}
// Begin performance tests
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
int passes = op.getOptionInt("passes");
int iter = op.getOptionInt("iterations");
for (int i = 0; i < passes; i++)
{
// Other kernels will be involved in true parallel versions
cudaEventRecord(kernel_start, 0);
for (int j = 0; j < iter; j++)
{
compute_lj_force<T, forceVecType, posVecType, useTexture, texReader>
<<<gridSize, blockSize>>>
(d_force, d_position, maxNeighbors, d_neighborList, cutsq,
lj1, lj2, nAtom);
}
cudaEventRecord(kernel_stop, 0);
CUDA_SAFE_CALL(cudaEventSynchronize(kernel_stop));
// get elapsed time
float kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time /= (float)iter;
kernel_time *= 1.e-3; // Convert to seconds
// Total number of flops
// Every pair of atoms compute distance - 8 flops
// totalPairs with distance < cutsq perform an additional 13
// for force calculation
double gflops = ((8 * nAtom * maxNeighbors) + (totalPairs * 13)) * 1e-9;
char atts[64];
sprintf(atts, "%d_atoms", nAtom);;
resultDB.AddResult(testName, atts, "GFLOPS", gflops / kernel_time);
resultDB.AddResult(testName+"_PCIe", atts, "GFLOPS",
gflops / (kernel_time+inputTransfer_time+outputTransfer_time));
int numPairs = nAtom * maxNeighbors;
long int nbytes = (3 * sizeof(T) * (1+numPairs)) + // position data
(3 * sizeof(T) * nAtom) + // force for each atom
(sizeof(int) * numPairs); // neighbor list
double gbytes = (double)nbytes / (1000. * 1000. * 1000.);
resultDB.AddResult(testName + "-Bandwidth", atts, "GB/s", gbytes /
kernel_time);
resultDB.AddResult(testName + "-Bandwidth_PCIe", atts, "GB/s",
gbytes / (kernel_time+inputTransfer_time+outputTransfer_time));
resultDB.AddResult(testName+"_Parity", atts, "N",
(inputTransfer_time+outputTransfer_time) / kernel_time);
}
// Clean up
// Host
CUDA_SAFE_CALL(cudaFreeHost(position));
CUDA_SAFE_CALL(cudaFreeHost(force));
CUDA_SAFE_CALL(cudaFreeHost(neighborList));
// Device
CUDA_SAFE_CALL(cudaUnbindTexture(posTexture));
CUDA_SAFE_CALL(cudaFree(d_position));
CUDA_SAFE_CALL(cudaFree(d_force));
CUDA_SAFE_CALL(cudaFree(d_neighborList));
CUDA_SAFE_CALL(cudaEventDestroy(inputTransfer_start));
CUDA_SAFE_CALL(cudaEventDestroy(inputTransfer_stop));
CUDA_SAFE_CALL(cudaEventDestroy(outputTransfer_start));
CUDA_SAFE_CALL(cudaEventDestroy(outputTransfer_stop));
CUDA_SAFE_CALL(cudaEventDestroy(kernel_start));
CUDA_SAFE_CALL(cudaEventDestroy(kernel_stop));
}
// ********************************************************
// Function: distance
//
// Purpose:
// Calculates distance squared between two atoms
//
// Arguments:
// position: atom position information
// i, j: indexes of the two atoms
//
// Returns: the computed distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T, class posVecType>
inline T distance(const posVecType* position, const int i, const int j)
{
posVecType ipos = position[i];
posVecType jpos = position[j];
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx * delx + dely * dely + delz * delz;
return r2inv;
}
// ********************************************************
// Function: insertInOrder
//
// Purpose:
// Adds atom j to current neighbor list and distance list
// if it's distance is low enough.
//
// Arguments:
// currDist: distance between current atom and each of its neighbors in the
// current list, sorted in ascending order
// currList: neighbor list for current atom, sorted by distance in asc. order
// j: atom to insert into neighbor list
// distIJ: distance between current atom and atom J
// maxNeighbors: max length of neighbor list
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T>
inline void insertInOrder(list<T>& currDist, list<int>& currList,
const int j, const T distIJ, const int maxNeighbors)
{
typename list<T>::iterator it;
typename list<int>::iterator it2;
it2 = currList.begin();
T currMax = currDist.back();
if (distIJ > currMax) return;
for (it=currDist.begin(); it!=currDist.end(); it++)
{
if (distIJ < (*it))
{
// Insert into appropriate place in list
currDist.insert(it,distIJ);
currList.insert(it2, j);
// Trim end of list
currList.resize(maxNeighbors);
currDist.resize(maxNeighbors);
return;
}
it2++;
}
}
// ********************************************************
// Function: buildNeighborList
//
// Purpose:
// Builds the neighbor list structure for all atoms for GPU coalesced reads
// and counts the number of pairs within the cutoff distance, so
// the benchmark gets an accurate FLOPS count
//
// Arguments:
// nAtom: total number of atoms
// position: pointer to the atom's position information
// neighborList: pointer to neighbor list data structure
//
// Returns: number of pairs of atoms within cutoff distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
// Jeremy Meredith, Tue Oct 9 17:35:16 EDT 2012
// On some slow systems and without optimization, this
// could take a while. Give users a rough completion
// percentage so they don't give up.
//
// ********************************************************
template <class T, class posVecType>
inline int buildNeighborList(const int nAtom, const posVecType* position,
int* neighborList)
{
int totalPairs = 0;
// Build Neighbor List
// Find the nearest N atoms to each other atom, where N = maxNeighbors
for (int i = 0; i < nAtom; i++)
{
// Print progress every 10% completion.
if (int((i+1)/(nAtom/10)) > int(i/(nAtom/10)))
cout << " " << 10*int((i+1)/(nAtom/10)) << "% done\n";
// Current neighbor list for atom i, initialized to -1
list<int> currList(maxNeighbors, -1);
// Distance to those neighbors. We're populating this with the
// closest neighbors, so initialize to FLT_MAX
list<T> currDist(maxNeighbors, FLT_MAX);
for (int j = 0; j < nAtom; j++)
{
if (i == j) continue; // An atom cannot be its own neighbor
// Calculate distance and insert in order into the current lists
T distIJ = distance<T, posVecType>(position, i, j);
insertInOrder<T>(currDist, currList, j, distIJ, maxNeighbors);
}
// We should now have the closest maxNeighbors neighbors and their
// distances to atom i. Populate the neighbor list data structure
// for GPU coalesced reads.
// The populate method returns how many of the maxNeighbors closest
// neighbors are within the cutoff distance. This will be used to
// calculate GFLOPS later.
totalPairs += populateNeighborList<T>(currDist, currList, i, nAtom,
neighborList);
}
return totalPairs;
}
// ********************************************************
// Function: populateNeighborList
//
// Purpose:
// Populates the neighbor list structure for a *single* atom for
// GPU coalesced reads and counts the number of pairs within the cutoff
// distance, (for current atom) so the benchmark gets an accurate FLOPS count
//
// Arguments:
// currDist: distance between current atom and each of its maxNeighbors
// neighbors
// currList: current list of neighbors
// i: current atom
// nAtom: total number of atoms
// neighborList: pointer to neighbor list data structure
//
// Returns: number of pairs of atoms within cutoff distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T>
inline int populateNeighborList(list<T>& currDist,
list<int>& currList, const int i, const int nAtom,
int* neighborList)
{
int idx = 0;
int validPairs = 0; // Pairs of atoms closer together than the cutoff
// Iterate across distance and neighbor list
typename list<T>::iterator distanceIter = currDist.begin();
for (list<int>::iterator neighborIter = currList.begin();
neighborIter != currList.end(); neighborIter++)
{
// Populate packed neighbor list
neighborList[(idx * nAtom) + i] = *neighborIter;
// If the distance is less than cutoff, increment valid counter
if (*distanceIter < cutsq)
validPairs++;
// Increment idx and distance iterator
idx++;
distanceIter++;
}
return validPairs;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.